Remove files
This commit is contained in:
parent
376f0dff9c
commit
4b215b1d97
|
@ -1,304 +0,0 @@
|
|||
namespace Eigen {
|
||||
|
||||
/** \page Eigen2ToEigen3 Porting from Eigen2 to Eigen3
|
||||
|
||||
<div class="bigwarning">Eigen2 support is deprecated in Eigen 3.2.x and it will be removed in Eigen 3.3.</div>
|
||||
|
||||
This page lists the most important API changes between Eigen2 and Eigen3,
|
||||
and gives tips to help porting your application from Eigen2 to Eigen3.
|
||||
|
||||
\eigenAutoToc
|
||||
|
||||
\section CompatibilitySupport Eigen2 compatibility support
|
||||
|
||||
In order to ease the switch from Eigen2 to Eigen3, Eigen3 features \subpage Eigen2SupportModes "Eigen2 support modes".
|
||||
|
||||
The quick way to enable this is to define the \c EIGEN2_SUPPORT preprocessor token \b before including any Eigen header (typically it should be set in your project options).
|
||||
|
||||
A more powerful, \em staged migration path is also provided, which may be useful to migrate larger projects from Eigen2 to Eigen3. This is explained in the \ref Eigen2SupportModes "Eigen 2 support modes" page.
|
||||
|
||||
\section Using The USING_PART_OF_NAMESPACE_EIGEN macro
|
||||
|
||||
The USING_PART_OF_NAMESPACE_EIGEN macro has been removed. In Eigen 3, just do:
|
||||
\code
|
||||
using namespace Eigen;
|
||||
\endcode
|
||||
|
||||
\section ComplexDot Dot products over complex numbers
|
||||
|
||||
This is the single trickiest change between Eigen 2 and Eigen 3. It only affects code using \c std::complex numbers as scalar type.
|
||||
|
||||
Eigen 2's dot product was linear in the first variable. Eigen 3's dot product is linear in the second variable. In other words, the Eigen 2 code \code x.dot(y) \endcode is equivalent to the Eigen 3 code \code y.dot(x) \endcode In yet other words, dot products are complex-conjugated in Eigen 3 compared to Eigen 2. The switch to the new convention was commanded by common usage, especially with the notation \f$ x^Ty \f$ for dot products of column-vectors.
|
||||
|
||||
\section VectorBlocks Vector blocks
|
||||
|
||||
<table class="manual">
|
||||
<tr><th>Eigen 2</th><th>Eigen 3</th></th>
|
||||
<tr><td>\code
|
||||
vector.start(length)
|
||||
vector.start<length>()
|
||||
vector.end(length)
|
||||
vector.end<length>()
|
||||
\endcode</td><td>\code
|
||||
vector.head(length)
|
||||
vector.head<length>()
|
||||
vector.tail(length)
|
||||
vector.tail<length>()
|
||||
\endcode</td></tr>
|
||||
</table>
|
||||
|
||||
|
||||
\section Corners Matrix Corners
|
||||
|
||||
<table class="manual">
|
||||
<tr><th>Eigen 2</th><th>Eigen 3</th></th>
|
||||
<tr><td>\code
|
||||
matrix.corner(TopLeft,r,c)
|
||||
matrix.corner(TopRight,r,c)
|
||||
matrix.corner(BottomLeft,r,c)
|
||||
matrix.corner(BottomRight,r,c)
|
||||
matrix.corner<r,c>(TopLeft)
|
||||
matrix.corner<r,c>(TopRight)
|
||||
matrix.corner<r,c>(BottomLeft)
|
||||
matrix.corner<r,c>(BottomRight)
|
||||
\endcode</td><td>\code
|
||||
matrix.topLeftCorner(r,c)
|
||||
matrix.topRightCorner(r,c)
|
||||
matrix.bottomLeftCorner(r,c)
|
||||
matrix.bottomRightCorner(r,c)
|
||||
matrix.topLeftCorner<r,c>()
|
||||
matrix.topRightCorner<r,c>()
|
||||
matrix.bottomLeftCorner<r,c>()
|
||||
matrix.bottomRightCorner<r,c>()
|
||||
\endcode</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
Notice that Eigen3 also provides these new convenience methods: topRows(), bottomRows(), leftCols(), rightCols(). See in class DenseBase.
|
||||
|
||||
\section CoefficientWiseOperations Coefficient wise operations
|
||||
|
||||
In Eigen2, coefficient wise operations which have no proper mathematical definition (as a coefficient wise product)
|
||||
were achieved using the .cwise() prefix, e.g.:
|
||||
\code a.cwise() * b \endcode
|
||||
In Eigen3 this .cwise() prefix has been superseded by a new kind of matrix type called
|
||||
Array for which all operations are performed coefficient wise. You can easily view a matrix as an array and vice versa using
|
||||
the MatrixBase::array() and ArrayBase::matrix() functions respectively. Here is an example:
|
||||
\code
|
||||
Vector4f a, b, c;
|
||||
c = a.array() * b.array();
|
||||
\endcode
|
||||
Note that the .array() function is not at all a synonym of the deprecated .cwise() prefix.
|
||||
While the .cwise() prefix changed the behavior of the following operator, the array() function performs
|
||||
a permanent conversion to the array world. Therefore, for binary operations such as the coefficient wise product,
|
||||
both sides must be converted to an \em array as in the above example. On the other hand, when you
|
||||
concatenate multiple coefficient wise operations you only have to do the conversion once, e.g.:
|
||||
\code
|
||||
Vector4f a, b, c;
|
||||
c = a.array().abs().pow(3) * b.array().abs().sin();
|
||||
\endcode
|
||||
With Eigen2 you would have written:
|
||||
\code
|
||||
c = (a.cwise().abs().cwise().pow(3)).cwise() * (b.cwise().abs().cwise().sin());
|
||||
\endcode
|
||||
|
||||
\section PartAndExtract Triangular and self-adjoint matrices
|
||||
|
||||
In Eigen 2 you had to play with the part, extract, and marked functions to deal with triangular and selfadjoint matrices. In Eigen 3, all these functions have been removed in favor of the concept of \em views:
|
||||
|
||||
<table class="manual">
|
||||
<tr><th>Eigen 2</th><th>Eigen 3</th></tr>
|
||||
<tr><td>\code
|
||||
A.part<UpperTriangular>();
|
||||
A.part<StrictlyLowerTriangular>(); \endcode</td>
|
||||
<td>\code
|
||||
A.triangularView<Upper>()
|
||||
A.triangularView<StrictlyLower>()\endcode</td></tr>
|
||||
<tr><td>\code
|
||||
A.extract<UpperTriangular>();
|
||||
A.extract<StrictlyLowerTriangular>();\endcode</td>
|
||||
<td>\code
|
||||
A.triangularView<Upper>()
|
||||
A.triangularView<StrictlyLower>()\endcode</td></tr>
|
||||
<tr><td>\code
|
||||
A.marked<UpperTriangular>();
|
||||
A.marked<StrictlyLowerTriangular>();\endcode</td>
|
||||
<td>\code
|
||||
A.triangularView<Upper>()
|
||||
A.triangularView<StrictlyLower>()\endcode</td></tr>
|
||||
<tr><td colspan="2"></td></tr>
|
||||
<tr><td>\code
|
||||
A.part<SelfAdfjoint|UpperTriangular>();
|
||||
A.extract<SelfAdfjoint|LowerTriangular>();\endcode</td>
|
||||
<td>\code
|
||||
A.selfadjointView<Upper>()
|
||||
A.selfadjointView<Lower>()\endcode</td></tr>
|
||||
<tr><td colspan="2"></td></tr>
|
||||
<tr><td>\code
|
||||
UpperTriangular
|
||||
LowerTriangular
|
||||
UnitUpperTriangular
|
||||
UnitLowerTriangular
|
||||
StrictlyUpperTriangular
|
||||
StrictlyLowerTriangular
|
||||
\endcode</td><td>\code
|
||||
Upper
|
||||
Lower
|
||||
UnitUpper
|
||||
UnitLower
|
||||
StrictlyUpper
|
||||
StrictlyLower
|
||||
\endcode</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
\sa class TriangularView, class SelfAdjointView
|
||||
|
||||
\section TriangularSolveInPlace Triangular in-place solving
|
||||
|
||||
<table class="manual">
|
||||
<tr><th>Eigen 2</th><th>Eigen 3</th></tr>
|
||||
<tr><td>\code A.triangularSolveInPlace<XxxTriangular>(Y);\endcode</td><td>\code A.triangularView<Xxx>().solveInPlace(Y);\endcode</td></tr>
|
||||
</table>
|
||||
|
||||
|
||||
\section Decompositions Matrix decompositions
|
||||
|
||||
Some of Eigen 2's matrix decompositions have been renamed in Eigen 3, while some others have been removed and are replaced by other decompositions in Eigen 3.
|
||||
|
||||
<table class="manual">
|
||||
<tr>
|
||||
<th>Eigen 2</th>
|
||||
<th>Eigen 3</th>
|
||||
<th>Notes</th>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>LU</td>
|
||||
<td>FullPivLU</td>
|
||||
<td class="alt">See also the new PartialPivLU, it's much faster</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>QR</td>
|
||||
<td>HouseholderQR</td>
|
||||
<td class="alt">See also the new ColPivHouseholderQR, it's more reliable</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>SVD</td>
|
||||
<td>JacobiSVD</td>
|
||||
<td class="alt">We currently don't have a bidiagonalizing SVD; of course this is planned.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>EigenSolver and friends</td>
|
||||
<td>\code #include<Eigen/Eigenvalues> \endcode </td>
|
||||
<td class="alt">Moved to separate module</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
\section LinearSolvers Linear solvers
|
||||
|
||||
<table class="manual">
|
||||
<tr><th>Eigen 2</th><th>Eigen 3</th><th>Notes</th></tr>
|
||||
<tr><td>\code A.lu();\endcode</td>
|
||||
<td>\code A.fullPivLu();\endcode</td>
|
||||
<td class="alt">Now A.lu() returns a PartialPivLU</td></tr>
|
||||
<tr><td>\code A.lu().solve(B,&X);\endcode</td>
|
||||
<td>\code X = A.lu().solve(B);
|
||||
X = A.fullPivLu().solve(B);\endcode</td>
|
||||
<td class="alt">The returned by value is fully optimized</td></tr>
|
||||
<tr><td>\code A.llt().solve(B,&X);\endcode</td>
|
||||
<td>\code X = A.llt().solve(B);
|
||||
X = A.selfadjointView<Lower>.llt().solve(B);
|
||||
X = A.selfadjointView<Upper>.llt().solve(B);\endcode</td>
|
||||
<td class="alt">The returned by value is fully optimized and \n
|
||||
the selfadjointView API allows you to select the \n
|
||||
triangular part to work on (default is lower part)</td></tr>
|
||||
<tr><td>\code A.llt().solveInPlace(B);\endcode</td>
|
||||
<td>\code B = A.llt().solve(B);
|
||||
B = A.selfadjointView<Lower>.llt().solve(B);
|
||||
B = A.selfadjointView<Upper>.llt().solve(B);\endcode</td>
|
||||
<td class="alt">In place solving</td></tr>
|
||||
<tr><td>\code A.ldlt().solve(B,&X);\endcode</td>
|
||||
<td>\code X = A.ldlt().solve(B);
|
||||
X = A.selfadjointView<Lower>.ldlt().solve(B);
|
||||
X = A.selfadjointView<Upper>.ldlt().solve(B);\endcode</td>
|
||||
<td class="alt">The returned by value is fully optimized and \n
|
||||
the selfadjointView API allows you to select the \n
|
||||
triangular part to work on</td></tr>
|
||||
</table>
|
||||
|
||||
\section GeometryModule Changes in the Geometry module
|
||||
|
||||
The Geometry module is the one that changed the most. If you rely heavily on it, it's probably a good idea to use the \ref Eigen2SupportModes "Eigen 2 support modes" to perform your migration.
|
||||
|
||||
\section Transform The Transform class
|
||||
|
||||
In Eigen 2, the Transform class didn't really know whether it was a projective or affine transformation. In Eigen 3, it takes a new \a Mode template parameter, which indicates whether it's \a Projective or \a Affine transform. There is no default value.
|
||||
|
||||
The Transform3f (etc) typedefs are no more. In Eigen 3, the Transform typedefs explicitly refer to the \a Projective and \a Affine modes:
|
||||
|
||||
<table class="manual">
|
||||
<tr><th>Eigen 2</th><th>Eigen 3</th><th>Notes</th></tr>
|
||||
<tr>
|
||||
<td> Transform3f </td>
|
||||
<td> Affine3f or Projective3f </td>
|
||||
<td> Of course 3f is just an example here </td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
|
||||
\section LazyVsNoalias Lazy evaluation and noalias
|
||||
|
||||
In Eigen all operations are performed in a lazy fashion except the matrix products which are always evaluated into a temporary by default.
|
||||
In Eigen2, lazy evaluation could be enforced by tagging a product using the .lazy() function. However, in complex expressions it was not
|
||||
easy to determine where to put the lazy() function. In Eigen3, the lazy() feature has been superseded by the MatrixBase::noalias() function
|
||||
which can be used on the left hand side of an assignment when no aliasing can occur. Here is an example:
|
||||
\code
|
||||
MatrixXf a, b, c;
|
||||
...
|
||||
c.noalias() += 2 * a.transpose() * b;
|
||||
\endcode
|
||||
However, the noalias mechanism does not cover all the features of the old .lazy(). Indeed, in some extremely rare cases,
|
||||
it might be useful to explicit request for a lay product, i.e., for a product which will be evaluated one coefficient at once, on request,
|
||||
just like any other expressions. To this end you can use the MatrixBase::lazyProduct() function, however we strongly discourage you to
|
||||
use it unless you are sure of what you are doing, i.e., you have rigourosly measured a speed improvement.
|
||||
|
||||
\section AlignMacros Alignment-related macros
|
||||
|
||||
The EIGEN_ALIGN_128 macro has been renamed to EIGEN_ALIGN16. Don't be surprised, it's just that we switched to counting in bytes ;-)
|
||||
|
||||
The EIGEN_DONT_ALIGN option still exists in Eigen 3, but it has a new cousin: EIGEN_DONT_ALIGN_STATICALLY. It allows to get rid of all static alignment issues while keeping alignment of dynamic-size heap-allocated arrays, thus keeping vectorization for dynamic-size objects.
|
||||
|
||||
\section AlignedMap Aligned Map objects
|
||||
|
||||
A common issue with Eigen 2 was that when mapping an array with Map, there was no way to tell Eigen that your array was aligned. There was a ForceAligned option but it didn't mean that; it was just confusing and has been removed.
|
||||
|
||||
New in Eigen3 is the #Aligned option. See the documentation of class Map. Use it like this:
|
||||
\code
|
||||
Map<Vector4f, Aligned> myMappedVector(some_aligned_array);
|
||||
\endcode
|
||||
There also are related convenience static methods, which actually are the preferred way as they take care of such things as constness:
|
||||
\code
|
||||
result = Vector4f::MapAligned(some_aligned_array);
|
||||
\endcode
|
||||
|
||||
\section StdContainers STL Containers
|
||||
|
||||
In Eigen2, <tt>#include<Eigen/StdVector></tt> tweaked std::vector to automatically align elements. The problem was that that was quite invasive. In Eigen3, we only override standard behavior if you use Eigen::aligned_allocator<T> as your allocator type. So for example, if you use std::vector<Matrix4f>, you need to do the following change (note that aligned_allocator is under namespace Eigen):
|
||||
|
||||
<table class="manual">
|
||||
<tr><th>Eigen 2</th><th>Eigen 3</th></tr>
|
||||
<tr>
|
||||
<td> \code std::vector<Matrix4f> \endcode </td>
|
||||
<td> \code std::vector<Matrix4f, aligned_allocator<Matrix4f> > \endcode </td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
\section eiPrefix Internal ei_ prefix
|
||||
|
||||
In Eigen2, global internal functions and structures were prefixed by \c ei_. In Eigen3, they all have been moved into the more explicit \c internal namespace. So, e.g., \c ei_sqrt(x) now becomes \c internal::sqrt(x). Of course it is not recommended to rely on Eigen's internal features.
|
||||
|
||||
|
||||
|
||||
*/
|
||||
|
||||
}
|
|
@ -1,95 +0,0 @@
|
|||
namespace Eigen {
|
||||
|
||||
/** \page Eigen2SupportModes Eigen 2 support modes
|
||||
|
||||
<div class="bigwarning">Eigen2 support is deprecated in Eigen 3.2.x and it will be removed in Eigen 3.3.</div>
|
||||
|
||||
This page documents the Eigen2 support modes, a powerful tool to help migrating your project from Eigen 2 to Eigen 3.
|
||||
Don't miss our page on \ref Eigen2ToEigen3 "API changes" between Eigen 2 and Eigen 3.
|
||||
|
||||
\eigenAutoToc
|
||||
|
||||
\section EIGEN2_SUPPORT_Macro The quick way: define EIGEN2_SUPPORT
|
||||
|
||||
By defining EIGEN2_SUPPORT before including any Eigen 3 header, you get back a large part of the Eigen 2 API, while keeping the Eigen 3 API and ABI unchanged.
|
||||
|
||||
This defaults to the \ref Stage30 "stage 30" described below.
|
||||
|
||||
The rest of this page describes an optional, more powerful \em staged migration path.
|
||||
|
||||
\section StagedMigrationPathOverview Overview of the staged migration path
|
||||
|
||||
The primary reason why EIGEN2_SUPPORT alone may not be enough to migrate a large project from Eigen 2 to Eigen 3 is that some of the Eigen 2 API is inherently incompatible with the Eigen 3 API. This happens when the same identifier is used in Eigen 2 and in Eigen 3 with different meanings. To help migrate projects that rely on such API, we provide a staged migration path allowing to perform the migration \em incrementally.
|
||||
|
||||
It goes as follows:
|
||||
\li Step 0: start with a project using Eigen 2.
|
||||
\li Step 1: build your project against Eigen 3 with \ref Stage10 "Eigen 2 support stage 10". This mode enables maximum compatibility with the Eigen 2 API, with just a few exceptions.
|
||||
\li Step 2: build your project against Eigen 3 with \ref Stage20 "Eigen 2 support stage 20". This mode forces you to add eigen2_ prefixes to the Eigen2 identifiers that conflict with Eigen 3 API.
|
||||
\li Step 3: build your project against Eigen 3 with \ref Stage30 "Eigen 2 support stage 30". This mode enables the full Eigen 3 API.
|
||||
\li Step 4: build your project against Eigen 3 with \ref Stage40 "Eigen 2 support stage 40". This mode enables the full Eigen 3 strictness on matters, such as const-correctness, where Eigen 2 was looser.
|
||||
\li Step 5: build your project against Eigen 3 without any Eigen 2 support mode.
|
||||
|
||||
\section Stage10 Stage 10: define EIGEN2_SUPPORT_STAGE10_FULL_EIGEN2_API
|
||||
|
||||
Enable this mode by defining the EIGEN2_SUPPORT_STAGE10_FULL_EIGEN2_API preprocessor macro before including any Eigen 3 header.
|
||||
|
||||
This mode maximizes support for the Eigen 2 API. As a result, it does not offer the full Eigen 3 API. Also, it doesn't offer quite 100% of the Eigen 2 API.
|
||||
|
||||
The part of the Eigen 3 API that is not present in this mode, is Eigen 3's Geometry module. Indeed, this mode completely replaces it by a copy of Eigen 2's Geometry module.
|
||||
|
||||
The parts of the API that are still not 100% Eigen 2 compatible in this mode are:
|
||||
\li Dot products over complex numbers. Eigen 2's dot product was linear in the first variable. Eigen 3's dot product is linear in the second variable. In other words, the Eigen 2 code \code x.dot(y) \endcode is equivalent to the Eigen 3 code \code y.dot(x) \endcode In yet other words, dot products are complex-conjugated in Eigen 3 compared to Eigen 2. The switch to the new convention was commanded by common usage, especially with the notation \f$ x^Ty \f$ for dot products of column-vectors.
|
||||
\li The Sparse module.
|
||||
\li Certain fine details of linear algebraic decompositions. For example, LDLT decomposition is now pivoting in Eigen 3 whereas it wasn't in Eigen 2, so code that was relying on its underlying matrix structure will break.
|
||||
\li Usage of Eigen types in STL containers, \ref Eigen2ToEigen3 "as explained on this page".
|
||||
|
||||
\section Stage20 Stage 20: define EIGEN2_SUPPORT_STAGE20_RESOLVE_API_CONFLICTS
|
||||
|
||||
Enable this mode by defining the EIGEN2_SUPPORT_STAGE10_FULL_EIGEN2_API preprocessor macro before including any Eigen 3 header.
|
||||
|
||||
This mode removes the Eigen 2 API that is directly conflicting with Eigen 3 API. Instead, these bits of Eigen 2 API remain available with eigen2_ prefixes. The main examples of such API are:
|
||||
\li the whole Geometry module. For example, replace \c Quaternion by \c eigen2_Quaternion, replace \c Transform3f by \c eigen2_Transform3f, etc.
|
||||
\li the lu() method to obtain a LU decomposition. Replace by eigen2_lu().
|
||||
|
||||
There is also one more eigen2_-prefixed identifier that you should know about, even though its use is not checked at compile time by this mode: the dot() method. As was discussed above, over complex numbers, its meaning is different between Eigen 2 and Eigen 3. You can use eigen2_dot() to get the Eigen 2 behavior.
|
||||
|
||||
\section Stage30 Stage 30: define EIGEN2_SUPPORT_STAGE30_FULL_EIGEN3_API
|
||||
|
||||
Enable this mode by defining the EIGEN2_SUPPORT_STAGE30_FULL_EIGEN3_API preprocessor macro before including any Eigen 3 header. Also, this mode is what you get by default when you just define EIGEN2_SUPPORT.
|
||||
|
||||
This mode gives you the full unaltered Eigen 3 API, while still keeping as much support as possible for the Eigen 2 API.
|
||||
|
||||
The eigen2_-prefixed identifiers are still available, but at this stage you should now replace them by Eigen 3 identifiers. Have a look at our page on \ref Eigen2ToEigen3 "API changes" between Eigen 2 and Eigen 3.
|
||||
|
||||
\section Stage40 Stage 40: define EIGEN2_SUPPORT_STAGE40_FULL_EIGEN3_STRICTNESS
|
||||
|
||||
Enable this mode by defining the EIGEN2_SUPPORT_STAGE40_FULL_EIGEN3_STRICTNESS preprocessor macro before including any Eigen 3 header.
|
||||
|
||||
This mode tightens the last bits of strictness, especially const-correctness, that had to be loosened to support what Eigen 2 allowed. For example, this code compiled in Eigen 2:
|
||||
\code
|
||||
const float array[4];
|
||||
x = Map<Vector4f>(array);
|
||||
\endcode
|
||||
That allowed to circumvent constness. This is no longer allowed in Eigen 3. If you have to map const data in Eigen 3, map it as a const-qualified type. However, rather than explictly constructing Map objects, we strongly encourage you to use the static Map methods instead, as they take care of all of this for you:
|
||||
\code
|
||||
const float array[4];
|
||||
x = Vector4f::Map(array);
|
||||
\endcode
|
||||
This lets Eigen do the right thing for you and works equally well in Eigen 2 and in Eigen 3.
|
||||
|
||||
\section FinallyDropAllEigen2Support Finally drop all Eigen 2 support
|
||||
|
||||
Stage 40 is the first where it's "comfortable" to stay for a little longer period, since it preserves 100% Eigen 3 compatibility. However, we still encourage you to complete your migration as quickly as possible. While we do run the Eigen 2 test suite against Eigen 3's stage 10 support mode, we can't guarantee the same level of support and quality assurance for Eigen 2 support as we do for Eigen 3 itself, especially not in the long term. \ref Eigen2ToEigen3 "This page" describes a large part of the changes that you may need to perform.
|
||||
|
||||
\section ABICompatibility What about ABI compatibility?
|
||||
|
||||
It goes as follows:
|
||||
\li Stage 10 already is ABI compatible with Eigen 3 for the basic (Matrix, Array, SparseMatrix...) types. However, since this stage uses a copy of Eigen 2's Geometry module instead of Eigen 3's own Geometry module, the ABI in the Geometry module is not Eigen 3 compatible.
|
||||
\li Stage 20 removes the Eigen 3-incompatible Eigen 2 Geometry module (it remains available with eigen2_ prefix). So at this stage, all the identifiers that exist in Eigen 3 have the Eigen 3 ABI (and API).
|
||||
\li Stage 30 introduces the remaining Eigen 3 identifiers. So at this stage, you have the full Eigen 3 ABI.
|
||||
\li Stage 40 is no different than Stage 30 in these matters.
|
||||
|
||||
|
||||
*/
|
||||
|
||||
}
|
|
@ -1,207 +0,0 @@
|
|||
// A simple quickref for Eigen. Add anything that's missing.
|
||||
// Main author: Keir Mierle
|
||||
|
||||
#include <Eigen/Dense>
|
||||
|
||||
Matrix<double, 3, 3> A; // Fixed rows and cols. Same as Matrix3d.
|
||||
Matrix<double, 3, Dynamic> B; // Fixed rows, dynamic cols.
|
||||
Matrix<double, Dynamic, Dynamic> C; // Full dynamic. Same as MatrixXd.
|
||||
Matrix<double, 3, 3, RowMajor> E; // Row major; default is column-major.
|
||||
Matrix3f P, Q, R; // 3x3 float matrix.
|
||||
Vector3f x, y, z; // 3x1 float matrix.
|
||||
RowVector3f a, b, c; // 1x3 float matrix.
|
||||
VectorXd v; // Dynamic column vector of doubles
|
||||
double s;
|
||||
|
||||
// Basic usage
|
||||
// Eigen // Matlab // comments
|
||||
x.size() // length(x) // vector size
|
||||
C.rows() // size(C,1) // number of rows
|
||||
C.cols() // size(C,2) // number of columns
|
||||
x(i) // x(i+1) // Matlab is 1-based
|
||||
C(i,j) // C(i+1,j+1) //
|
||||
|
||||
A.resize(4, 4); // Runtime error if assertions are on.
|
||||
B.resize(4, 9); // Runtime error if assertions are on.
|
||||
A.resize(3, 3); // Ok; size didn't change.
|
||||
B.resize(3, 9); // Ok; only dynamic cols changed.
|
||||
|
||||
A << 1, 2, 3, // Initialize A. The elements can also be
|
||||
4, 5, 6, // matrices, which are stacked along cols
|
||||
7, 8, 9; // and then the rows are stacked.
|
||||
B << A, A, A; // B is three horizontally stacked A's.
|
||||
A.fill(10); // Fill A with all 10's.
|
||||
|
||||
// Eigen // Matlab
|
||||
MatrixXd::Identity(rows,cols) // eye(rows,cols)
|
||||
C.setIdentity(rows,cols) // C = eye(rows,cols)
|
||||
MatrixXd::Zero(rows,cols) // zeros(rows,cols)
|
||||
C.setZero(rows,cols) // C = ones(rows,cols)
|
||||
MatrixXd::Ones(rows,cols) // ones(rows,cols)
|
||||
C.setOnes(rows,cols) // C = ones(rows,cols)
|
||||
MatrixXd::Random(rows,cols) // rand(rows,cols)*2-1 // MatrixXd::Random returns uniform random numbers in (-1, 1).
|
||||
C.setRandom(rows,cols) // C = rand(rows,cols)*2-1
|
||||
VectorXd::LinSpaced(size,low,high) // linspace(low,high,size)'
|
||||
v.setLinSpaced(size,low,high) // v = linspace(low,high,size)'
|
||||
|
||||
|
||||
// Matrix slicing and blocks. All expressions listed here are read/write.
|
||||
// Templated size versions are faster. Note that Matlab is 1-based (a size N
|
||||
// vector is x(1)...x(N)).
|
||||
// Eigen // Matlab
|
||||
x.head(n) // x(1:n)
|
||||
x.head<n>() // x(1:n)
|
||||
x.tail(n) // x(end - n + 1: end)
|
||||
x.tail<n>() // x(end - n + 1: end)
|
||||
x.segment(i, n) // x(i+1 : i+n)
|
||||
x.segment<n>(i) // x(i+1 : i+n)
|
||||
P.block(i, j, rows, cols) // P(i+1 : i+rows, j+1 : j+cols)
|
||||
P.block<rows, cols>(i, j) // P(i+1 : i+rows, j+1 : j+cols)
|
||||
P.row(i) // P(i+1, :)
|
||||
P.col(j) // P(:, j+1)
|
||||
P.leftCols<cols>() // P(:, 1:cols)
|
||||
P.leftCols(cols) // P(:, 1:cols)
|
||||
P.middleCols<cols>(j) // P(:, j+1:j+cols)
|
||||
P.middleCols(j, cols) // P(:, j+1:j+cols)
|
||||
P.rightCols<cols>() // P(:, end-cols+1:end)
|
||||
P.rightCols(cols) // P(:, end-cols+1:end)
|
||||
P.topRows<rows>() // P(1:rows, :)
|
||||
P.topRows(rows) // P(1:rows, :)
|
||||
P.middleRows<rows>(i) // P(i+1:i+rows, :)
|
||||
P.middleRows(i, rows) // P(i+1:i+rows, :)
|
||||
P.bottomRows<rows>() // P(end-rows+1:end, :)
|
||||
P.bottomRows(rows) // P(end-rows+1:end, :)
|
||||
P.topLeftCorner(rows, cols) // P(1:rows, 1:cols)
|
||||
P.topRightCorner(rows, cols) // P(1:rows, end-cols+1:end)
|
||||
P.bottomLeftCorner(rows, cols) // P(end-rows+1:end, 1:cols)
|
||||
P.bottomRightCorner(rows, cols) // P(end-rows+1:end, end-cols+1:end)
|
||||
P.topLeftCorner<rows,cols>() // P(1:rows, 1:cols)
|
||||
P.topRightCorner<rows,cols>() // P(1:rows, end-cols+1:end)
|
||||
P.bottomLeftCorner<rows,cols>() // P(end-rows+1:end, 1:cols)
|
||||
P.bottomRightCorner<rows,cols>() // P(end-rows+1:end, end-cols+1:end)
|
||||
|
||||
// Of particular note is Eigen's swap function which is highly optimized.
|
||||
// Eigen // Matlab
|
||||
R.row(i) = P.col(j); // R(i, :) = P(:, i)
|
||||
R.col(j1).swap(mat1.col(j2)); // R(:, [j1 j2]) = R(:, [j2, j1])
|
||||
|
||||
// Views, transpose, etc; all read-write except for .adjoint().
|
||||
// Eigen // Matlab
|
||||
R.adjoint() // R'
|
||||
R.transpose() // R.' or conj(R')
|
||||
R.diagonal() // diag(R)
|
||||
x.asDiagonal() // diag(x)
|
||||
R.transpose().colwise().reverse(); // rot90(R)
|
||||
R.conjugate() // conj(R)
|
||||
|
||||
// All the same as Matlab, but matlab doesn't have *= style operators.
|
||||
// Matrix-vector. Matrix-matrix. Matrix-scalar.
|
||||
y = M*x; R = P*Q; R = P*s;
|
||||
a = b*M; R = P - Q; R = s*P;
|
||||
a *= M; R = P + Q; R = P/s;
|
||||
R *= Q; R = s*P;
|
||||
R += Q; R *= s;
|
||||
R -= Q; R /= s;
|
||||
|
||||
// Vectorized operations on each element independently
|
||||
// Eigen // Matlab
|
||||
R = P.cwiseProduct(Q); // R = P .* Q
|
||||
R = P.array() * s.array();// R = P .* s
|
||||
R = P.cwiseQuotient(Q); // R = P ./ Q
|
||||
R = P.array() / Q.array();// R = P ./ Q
|
||||
R = P.array() + s.array();// R = P + s
|
||||
R = P.array() - s.array();// R = P - s
|
||||
R.array() += s; // R = R + s
|
||||
R.array() -= s; // R = R - s
|
||||
R.array() < Q.array(); // R < Q
|
||||
R.array() <= Q.array(); // R <= Q
|
||||
R.cwiseInverse(); // 1 ./ P
|
||||
R.array().inverse(); // 1 ./ P
|
||||
R.array().sin() // sin(P)
|
||||
R.array().cos() // cos(P)
|
||||
R.array().pow(s) // P .^ s
|
||||
R.array().square() // P .^ 2
|
||||
R.array().cube() // P .^ 3
|
||||
R.cwiseSqrt() // sqrt(P)
|
||||
R.array().sqrt() // sqrt(P)
|
||||
R.array().exp() // exp(P)
|
||||
R.array().log() // log(P)
|
||||
R.cwiseMax(P) // max(R, P)
|
||||
R.array().max(P.array()) // max(R, P)
|
||||
R.cwiseMin(P) // min(R, P)
|
||||
R.array().min(P.array()) // min(R, P)
|
||||
R.cwiseAbs() // abs(P)
|
||||
R.array().abs() // abs(P)
|
||||
R.cwiseAbs2() // abs(P.^2)
|
||||
R.array().abs2() // abs(P.^2)
|
||||
(R.array() < s).select(P,Q); // (R < s ? P : Q)
|
||||
|
||||
// Reductions.
|
||||
int r, c;
|
||||
// Eigen // Matlab
|
||||
R.minCoeff() // min(R(:))
|
||||
R.maxCoeff() // max(R(:))
|
||||
s = R.minCoeff(&r, &c) // [s, i] = min(R(:)); [r, c] = ind2sub(size(R), i);
|
||||
s = R.maxCoeff(&r, &c) // [s, i] = max(R(:)); [r, c] = ind2sub(size(R), i);
|
||||
R.sum() // sum(R(:))
|
||||
R.colwise().sum() // sum(R)
|
||||
R.rowwise().sum() // sum(R, 2) or sum(R')'
|
||||
R.prod() // prod(R(:))
|
||||
R.colwise().prod() // prod(R)
|
||||
R.rowwise().prod() // prod(R, 2) or prod(R')'
|
||||
R.trace() // trace(R)
|
||||
R.all() // all(R(:))
|
||||
R.colwise().all() // all(R)
|
||||
R.rowwise().all() // all(R, 2)
|
||||
R.any() // any(R(:))
|
||||
R.colwise().any() // any(R)
|
||||
R.rowwise().any() // any(R, 2)
|
||||
|
||||
// Dot products, norms, etc.
|
||||
// Eigen // Matlab
|
||||
x.norm() // norm(x). Note that norm(R) doesn't work in Eigen.
|
||||
x.squaredNorm() // dot(x, x) Note the equivalence is not true for complex
|
||||
x.dot(y) // dot(x, y)
|
||||
x.cross(y) // cross(x, y) Requires #include <Eigen/Geometry>
|
||||
|
||||
//// Type conversion
|
||||
// Eigen // Matlab
|
||||
A.cast<double>(); // double(A)
|
||||
A.cast<float>(); // single(A)
|
||||
A.cast<int>(); // int32(A)
|
||||
A.real(); // real(A)
|
||||
A.imag(); // imag(A)
|
||||
// if the original type equals destination type, no work is done
|
||||
|
||||
// Note that for most operations Eigen requires all operands to have the same type:
|
||||
MatrixXf F = MatrixXf::Zero(3,3);
|
||||
A += F; // illegal in Eigen. In Matlab A = A+F is allowed
|
||||
A += F.cast<double>(); // F converted to double and then added (generally, conversion happens on-the-fly)
|
||||
|
||||
// Eigen can map existing memory into Eigen matrices.
|
||||
float array[3];
|
||||
Vector3f::Map(array).fill(10); // create a temporary Map over array and sets entries to 10
|
||||
int data[4] = {1, 2, 3, 4};
|
||||
Matrix2i mat2x2(data); // copies data into mat2x2
|
||||
Matrix2i::Map(data) = 2*mat2x2; // overwrite elements of data with 2*mat2x2
|
||||
MatrixXi::Map(data, 2, 2) += mat2x2; // adds mat2x2 to elements of data (alternative syntax if size is not know at compile time)
|
||||
|
||||
// Solve Ax = b. Result stored in x. Matlab: x = A \ b.
|
||||
x = A.ldlt().solve(b)); // A sym. p.s.d. #include <Eigen/Cholesky>
|
||||
x = A.llt() .solve(b)); // A sym. p.d. #include <Eigen/Cholesky>
|
||||
x = A.lu() .solve(b)); // Stable and fast. #include <Eigen/LU>
|
||||
x = A.qr() .solve(b)); // No pivoting. #include <Eigen/QR>
|
||||
x = A.svd() .solve(b)); // Stable, slowest. #include <Eigen/SVD>
|
||||
// .ldlt() -> .matrixL() and .matrixD()
|
||||
// .llt() -> .matrixL()
|
||||
// .lu() -> .matrixL() and .matrixU()
|
||||
// .qr() -> .matrixQ() and .matrixR()
|
||||
// .svd() -> .matrixU(), .singularValues(), and .matrixV()
|
||||
|
||||
// Eigenvalue problems
|
||||
// Eigen // Matlab
|
||||
A.eigenvalues(); // eig(A);
|
||||
EigenSolver<Matrix3d> eig(A); // [vec val] = eig(A)
|
||||
eig.eigenvalues(); // diag(val)
|
||||
eig.eigenvectors(); // vec
|
||||
// For self-adjoint matrices use SelfAdjointEigenSolver<>
|
|
@ -1,52 +0,0 @@
|
|||
namespace Eigen {
|
||||
|
||||
/** \page Experimental Experimental parts of Eigen
|
||||
|
||||
\eigenAutoToc
|
||||
|
||||
\section summary Summary
|
||||
|
||||
With the 2.0 release, Eigen's API is, to a large extent, stable. However, we wish to retain the freedom to make API incompatible changes. To that effect, we call many parts of Eigen "experimental" which means that they are not subject to API stability guarantee.
|
||||
|
||||
Our goal is that for the 2.1 release (expected in July 2009) most of these parts become API-stable too.
|
||||
|
||||
We are aware that API stability is a major concern for our users. That's why it's a priority for us to reach it, but at the same time we're being serious about not calling Eigen API-stable too early.
|
||||
|
||||
Experimental features may at any time:
|
||||
\li be removed;
|
||||
\li be subject to an API incompatible change;
|
||||
\li introduce API or ABI incompatible changes in your own code if you let them affect your API or ABI.
|
||||
|
||||
\section modules Experimental modules
|
||||
|
||||
The following modules are considered entirely experimental, and we make no firm API stability guarantee about them for the time being:
|
||||
\li SVD
|
||||
\li QR
|
||||
\li Cholesky
|
||||
\li Sparse
|
||||
\li Geometry (this one should be mostly stable, but it's a little too early to make a formal guarantee)
|
||||
|
||||
\section core Experimental parts of the Core module
|
||||
|
||||
In the Core module, the only classes subject to ABI stability guarantee (meaning that you can use it for data members in your public ABI) is:
|
||||
\li Matrix
|
||||
\li Map
|
||||
|
||||
All other classes offer no ABI guarantee, e.g. the layout of their data can be changed.
|
||||
|
||||
The only classes subject to (even partial) API stability guarantee (meaning that you can safely construct and use objects) are:
|
||||
\li MatrixBase : partial API stability (see below)
|
||||
\li Matrix : full API stability (except for experimental stuff inherited from MatrixBase)
|
||||
\li Map : full API stability (except for experimental stuff inherited from MatrixBase)
|
||||
|
||||
All other classes offer no direct API guarantee, e.g. their methods can be changed; however notice that most classes inherit MatrixBase and that this is where most of their API comes from -- so in practice most of the API is stable.
|
||||
|
||||
A few MatrixBase methods are considered experimental, hence not part of any API stability guarantee:
|
||||
\li all methods documented as internal
|
||||
\li all methods hidden in the Doxygen documentation
|
||||
\li all methods marked as experimental
|
||||
\li all methods defined in experimental modules
|
||||
|
||||
*/
|
||||
|
||||
}
|
|
@ -1,98 +0,0 @@
|
|||
project(EigenDoc)
|
||||
|
||||
set_directory_properties(PROPERTIES EXCLUDE_FROM_ALL TRUE)
|
||||
|
||||
project(EigenDoc)
|
||||
|
||||
if(CMAKE_COMPILER_IS_GNUCXX)
|
||||
if(CMAKE_SYSTEM_NAME MATCHES Linux)
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -O1 -g1")
|
||||
endif(CMAKE_SYSTEM_NAME MATCHES Linux)
|
||||
endif(CMAKE_COMPILER_IS_GNUCXX)
|
||||
|
||||
# Set some Doxygen flags
|
||||
set(EIGEN_DOXY_PROJECT_NAME "Eigen")
|
||||
set(EIGEN_DOXY_OUTPUT_DIRECTORY_SUFFIX "")
|
||||
set(EIGEN_DOXY_INPUT "\"${Eigen_SOURCE_DIR}/Eigen\" \"${Eigen_SOURCE_DIR}/doc\"")
|
||||
set(EIGEN_DOXY_HTML_COLORSTYLE_HUE "220")
|
||||
set(EIGEN_DOXY_TAGFILES "")
|
||||
|
||||
configure_file(
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/Doxyfile.in
|
||||
${CMAKE_CURRENT_BINARY_DIR}/Doxyfile
|
||||
)
|
||||
|
||||
set(EIGEN_DOXY_PROJECT_NAME "Eigen-unsupported")
|
||||
set(EIGEN_DOXY_OUTPUT_DIRECTORY_SUFFIX "/unsupported")
|
||||
set(EIGEN_DOXY_INPUT "\"${Eigen_SOURCE_DIR}/unsupported/Eigen\" \"${Eigen_SOURCE_DIR}/unsupported/doc\"")
|
||||
set(EIGEN_DOXY_HTML_COLORSTYLE_HUE "0")
|
||||
# set(EIGEN_DOXY_TAGFILES "\"${Eigen_BINARY_DIR}/doc/eigen.doxytags =../\"")
|
||||
set(EIGEN_DOXY_TAGFILES "")
|
||||
|
||||
configure_file(
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/Doxyfile.in
|
||||
${CMAKE_CURRENT_BINARY_DIR}/Doxyfile-unsupported
|
||||
)
|
||||
|
||||
configure_file(
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/eigendoxy_header.html.in
|
||||
${CMAKE_CURRENT_BINARY_DIR}/eigendoxy_header.html
|
||||
)
|
||||
|
||||
configure_file(
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/eigendoxy_footer.html.in
|
||||
${CMAKE_CURRENT_BINARY_DIR}/eigendoxy_footer.html
|
||||
)
|
||||
|
||||
configure_file(
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/eigendoxy_layout.xml.in
|
||||
${CMAKE_CURRENT_BINARY_DIR}/eigendoxy_layout.xml
|
||||
)
|
||||
|
||||
configure_file(
|
||||
${Eigen_SOURCE_DIR}/unsupported/doc/eigendoxy_layout.xml.in
|
||||
${Eigen_BINARY_DIR}/doc/unsupported/eigendoxy_layout.xml
|
||||
)
|
||||
|
||||
set(examples_targets "")
|
||||
set(snippets_targets "")
|
||||
|
||||
add_definitions("-DEIGEN_MAKING_DOCS")
|
||||
add_custom_target(all_examples)
|
||||
|
||||
add_subdirectory(examples)
|
||||
add_subdirectory(special_examples)
|
||||
add_subdirectory(snippets)
|
||||
|
||||
add_custom_target(
|
||||
doc-eigen-prerequisites
|
||||
ALL
|
||||
COMMAND ${CMAKE_COMMAND} -E make_directory ${CMAKE_CURRENT_BINARY_DIR}/html/
|
||||
COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/eigen_navtree_hacks.js ${CMAKE_CURRENT_BINARY_DIR}/html/
|
||||
COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/Eigen_Silly_Professor_64x64.png ${CMAKE_CURRENT_BINARY_DIR}/html/
|
||||
COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/AsciiQuickReference.txt ${CMAKE_CURRENT_BINARY_DIR}/html/
|
||||
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
|
||||
)
|
||||
|
||||
add_custom_target(
|
||||
doc-unsupported-prerequisites
|
||||
ALL
|
||||
COMMAND ${CMAKE_COMMAND} -E make_directory ${Eigen_BINARY_DIR}/doc/html/unsupported
|
||||
COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/eigen_navtree_hacks.js ${CMAKE_CURRENT_BINARY_DIR}/html/unsupported/
|
||||
COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/Eigen_Silly_Professor_64x64.png ${CMAKE_CURRENT_BINARY_DIR}/html/unsupported/
|
||||
WORKING_DIRECTORY ${Eigen_BINARY_DIR}/doc
|
||||
)
|
||||
|
||||
add_dependencies(doc-eigen-prerequisites all_snippets all_examples)
|
||||
add_dependencies(doc-unsupported-prerequisites unsupported_snippets unsupported_examples)
|
||||
|
||||
add_custom_target(doc ALL
|
||||
COMMAND doxygen
|
||||
COMMAND doxygen Doxyfile-unsupported
|
||||
COMMAND ${CMAKE_COMMAND} -E rename html eigen-doc
|
||||
COMMAND ${CMAKE_COMMAND} -E remove eigen-doc/eigen-doc.tgz
|
||||
COMMAND ${CMAKE_COMMAND} -E tar cfz eigen-doc/eigen-doc.tgz eigen-doc
|
||||
COMMAND ${CMAKE_COMMAND} -E rename eigen-doc html
|
||||
WORKING_DIRECTORY ${Eigen_BINARY_DIR}/doc)
|
||||
|
||||
add_dependencies(doc doc-eigen-prerequisites doc-unsupported-prerequisites)
|
|
@ -1,129 +0,0 @@
|
|||
namespace Eigen {
|
||||
|
||||
/** \page TopicClassHierarchy The class hierarchy
|
||||
|
||||
This page explains the design of the core classes in Eigen's class hierarchy and how they fit together. Casual
|
||||
users probably need not concern themselves with these details, but it may be useful for both advanced users
|
||||
and Eigen developers.
|
||||
|
||||
\eigenAutoToc
|
||||
|
||||
|
||||
\section TopicClassHierarchyPrinciples Principles
|
||||
|
||||
Eigen's class hierarchy is designed so that virtual functions are avoided where their overhead would
|
||||
significantly impair performance. Instead, Eigen achieves polymorphism with the Curiously Recurring Template
|
||||
Pattern (CRTP). In this pattern, the base class (for instance, \c MatrixBase) is in fact a template class, and
|
||||
the derived class (for instance, \c Matrix) inherits the base class with the derived class itself as a
|
||||
template argument (in this case, \c Matrix inherits from \c MatrixBase<Matrix>). This allows Eigen to
|
||||
resolve the polymorphic function calls at compile time.
|
||||
|
||||
In addition, the design avoids multiple inheritance. One reason for this is that in our experience, some
|
||||
compilers (like MSVC) fail to perform empty base class optimization, which is crucial for our fixed-size
|
||||
types.
|
||||
|
||||
|
||||
\section TopicClassHierarchyCoreClasses The core classes
|
||||
|
||||
These are the classes that you need to know about if you want to write functions that accept or return Eigen
|
||||
objects.
|
||||
|
||||
- Matrix means plain dense matrix. If \c m is a \c %Matrix, then, for instance, \c m+m is no longer a
|
||||
\c %Matrix, it is a "matrix expression".
|
||||
- MatrixBase means dense matrix expression. This means that a \c %MatrixBase is something that can be
|
||||
added, matrix-multiplied, LU-decomposed, QR-decomposed... All matrix expression classes, including
|
||||
\c %Matrix itself, inherit \c %MatrixBase.
|
||||
- Array means plain dense array. If \c x is an \c %Array, then, for instance, \c x+x is no longer an
|
||||
\c %Array, it is an "array expression".
|
||||
- ArrayBase means dense array expression. This means that an \c %ArrayBase is something that can be
|
||||
added, array-multiplied, and on which you can perform all sorts of array operations... All array
|
||||
expression classes, including \c %Array itself, inherit \c %ArrayBase.
|
||||
- DenseBase means dense (matrix or array) expression. Both \c %ArrayBase and \c %MatrixBase inherit
|
||||
\c %DenseBase. \c %DenseBase is where all the methods go that apply to dense expressions regardless of
|
||||
whether they are matrix or array expressions. For example, the \link DenseBase::block() block(...) \endlink
|
||||
methods are in \c %DenseBase.
|
||||
|
||||
\section TopicClassHierarchyBaseClasses Base classes
|
||||
|
||||
These classes serve as base classes for the five core classes mentioned above. They are more internal and so
|
||||
less interesting for users of the Eigen library.
|
||||
|
||||
- PlainObjectBase means dense (matrix or array) plain object, i.e. something that stores its own dense
|
||||
array of coefficients. This is where, for instance, the \link PlainObjectBase::resize() resize() \endlink
|
||||
methods go. \c %PlainObjectBase is inherited by \c %Matrix and by \c %Array. But above, we said that
|
||||
\c %Matrix inherits \c %MatrixBase and \c %Array inherits \c %ArrayBase. So does that mean multiple
|
||||
inheritance? No, because \c %PlainObjectBase \e itself inherits \c %MatrixBase or \c %ArrayBase depending
|
||||
on whether we are in the matrix or array case. When we said above that \c %Matrix inherited
|
||||
\c %MatrixBase, we omitted to say it does so indirectly via \c %PlainObjectBase. Same for \c %Array.
|
||||
- DenseCoeffsBase means something that has dense coefficient accessors. It is a base class for
|
||||
\c %DenseBase. The reason for \c %DenseCoeffsBase to exist is that the set of available coefficient
|
||||
accessors is very different depending on whether a dense expression has direct memory access or not (the
|
||||
\c DirectAccessBit flag). For example, if \c x is a plain matrix, then \c x has direct access, and
|
||||
\c x.transpose() and \c x.block(...) also have direct access, because their coefficients can be read right
|
||||
off memory, but for example, \c x+x does not have direct memory access, because obtaining any of its
|
||||
coefficients requires a computation (an addition), it can't be just read off memory.
|
||||
- EigenBase means anything that can be evaluated into a plain dense matrix or array (even if that would
|
||||
be a bad idea). \c %EigenBase is really the absolute base class for anything that remotely looks like a
|
||||
matrix or array. It is a base class for \c %DenseCoeffsBase, so it sits below all our dense class
|
||||
hierarchy, but it is not limited to dense expressions. For example, \c %EigenBase is also inherited by
|
||||
diagonal matrices, sparse matrices, etc...
|
||||
|
||||
|
||||
\section TopicClassHierarchyInheritanceDiagrams Inheritance diagrams
|
||||
|
||||
The inheritance diagram for Matrix looks as follows:
|
||||
|
||||
<pre>
|
||||
EigenBase<%Matrix>
|
||||
<-- DenseCoeffsBase<%Matrix> (direct access case)
|
||||
<-- DenseBase<%Matrix>
|
||||
<-- MatrixBase<%Matrix>
|
||||
<-- PlainObjectBase<%Matrix> (matrix case)
|
||||
<-- Matrix
|
||||
</pre>
|
||||
|
||||
The inheritance diagram for Array looks as follows:
|
||||
|
||||
<pre>
|
||||
EigenBase<%Array>
|
||||
<-- DenseCoeffsBase<%Array> (direct access case)
|
||||
<-- DenseBase<%Array>
|
||||
<-- ArrayBase<%Array>
|
||||
<-- PlainObjectBase<%Array> (array case)
|
||||
<-- Array
|
||||
</pre>
|
||||
|
||||
The inheritance diagram for some other matrix expression class, here denoted by \c SomeMatrixXpr, looks as
|
||||
follows:
|
||||
|
||||
<pre>
|
||||
EigenBase<SomeMatrixXpr>
|
||||
<-- DenseCoeffsBase<SomeMatrixXpr> (direct access or no direct access case)
|
||||
<-- DenseBase<SomeMatrixXpr>
|
||||
<-- MatrixBase<SomeMatrixXpr>
|
||||
<-- SomeMatrixXpr
|
||||
</pre>
|
||||
|
||||
The inheritance diagram for some other array expression class, here denoted by \c SomeArrayXpr, looks as
|
||||
follows:
|
||||
|
||||
<pre>
|
||||
EigenBase<SomeArrayXpr>
|
||||
<-- DenseCoeffsBase<SomeArrayXpr> (direct access or no direct access case)
|
||||
<-- DenseBase<SomeArrayXpr>
|
||||
<-- ArrayBase<SomeArrayXpr>
|
||||
<-- SomeArrayXpr
|
||||
</pre>
|
||||
|
||||
Finally, consider an example of something that is not a dense expression, for instance a diagonal matrix. The
|
||||
corresponding inheritance diagram is:
|
||||
|
||||
<pre>
|
||||
EigenBase<%DiagonalMatrix>
|
||||
<-- DiagonalBase<%DiagonalMatrix>
|
||||
<-- DiagonalMatrix
|
||||
</pre>
|
||||
|
||||
|
||||
*/
|
||||
}
|
|
@ -1,188 +0,0 @@
|
|||
namespace Eigen {
|
||||
|
||||
/** \page TopicCustomizingEigen Customizing/Extending Eigen
|
||||
|
||||
Eigen can be extended in several ways, for instance, by defining global methods, \ref ExtendingMatrixBase "by adding custom methods to MatrixBase", adding support to \ref CustomScalarType "custom types" etc.
|
||||
|
||||
\eigenAutoToc
|
||||
|
||||
\section ExtendingMatrixBase Extending MatrixBase (and other classes)
|
||||
|
||||
In this section we will see how to add custom methods to MatrixBase. Since all expressions and matrix types inherit MatrixBase, adding a method to MatrixBase make it immediately available to all expressions ! A typical use case is, for instance, to make Eigen compatible with another API.
|
||||
|
||||
You certainly know that in C++ it is not possible to add methods to an existing class. So how that's possible ? Here the trick is to include in the declaration of MatrixBase a file defined by the preprocessor token \c EIGEN_MATRIXBASE_PLUGIN:
|
||||
\code
|
||||
class MatrixBase {
|
||||
// ...
|
||||
#ifdef EIGEN_MATRIXBASE_PLUGIN
|
||||
#include EIGEN_MATRIXBASE_PLUGIN
|
||||
#endif
|
||||
};
|
||||
\endcode
|
||||
Therefore to extend MatrixBase with your own methods you just have to create a file with your method declaration and define EIGEN_MATRIXBASE_PLUGIN before you include any Eigen's header file.
|
||||
|
||||
You can extend many of the other classes used in Eigen by defining similarly named preprocessor symbols. For instance, define \c EIGEN_ARRAYBASE_PLUGIN if you want to extend the ArrayBase class. A full list of classes that can be extended in this way and the corresponding preprocessor symbols can be found on our page \ref TopicPreprocessorDirectives.
|
||||
|
||||
Here is an example of an extension file for adding methods to MatrixBase: \n
|
||||
\b MatrixBaseAddons.h
|
||||
\code
|
||||
inline Scalar at(uint i, uint j) const { return this->operator()(i,j); }
|
||||
inline Scalar& at(uint i, uint j) { return this->operator()(i,j); }
|
||||
inline Scalar at(uint i) const { return this->operator[](i); }
|
||||
inline Scalar& at(uint i) { return this->operator[](i); }
|
||||
|
||||
inline RealScalar squaredLength() const { return squaredNorm(); }
|
||||
inline RealScalar length() const { return norm(); }
|
||||
inline RealScalar invLength(void) const { return fast_inv_sqrt(squaredNorm()); }
|
||||
|
||||
template<typename OtherDerived>
|
||||
inline Scalar squaredDistanceTo(const MatrixBase<OtherDerived>& other) const
|
||||
{ return (derived() - other.derived()).squaredNorm(); }
|
||||
|
||||
template<typename OtherDerived>
|
||||
inline RealScalar distanceTo(const MatrixBase<OtherDerived>& other) const
|
||||
{ return internal::sqrt(derived().squaredDistanceTo(other)); }
|
||||
|
||||
inline void scaleTo(RealScalar l) { RealScalar vl = norm(); if (vl>1e-9) derived() *= (l/vl); }
|
||||
|
||||
inline Transpose<Derived> transposed() {return this->transpose();}
|
||||
inline const Transpose<Derived> transposed() const {return this->transpose();}
|
||||
|
||||
inline uint minComponentId(void) const { int i; this->minCoeff(&i); return i; }
|
||||
inline uint maxComponentId(void) const { int i; this->maxCoeff(&i); return i; }
|
||||
|
||||
template<typename OtherDerived>
|
||||
void makeFloor(const MatrixBase<OtherDerived>& other) { derived() = derived().cwiseMin(other.derived()); }
|
||||
template<typename OtherDerived>
|
||||
void makeCeil(const MatrixBase<OtherDerived>& other) { derived() = derived().cwiseMax(other.derived()); }
|
||||
|
||||
const CwiseUnaryOp<internal::scalar_add_op<Scalar>, Derived>
|
||||
operator+(const Scalar& scalar) const
|
||||
{ return CwiseUnaryOp<internal::scalar_add_op<Scalar>, Derived>(derived(), internal::scalar_add_op<Scalar>(scalar)); }
|
||||
|
||||
friend const CwiseUnaryOp<internal::scalar_add_op<Scalar>, Derived>
|
||||
operator+(const Scalar& scalar, const MatrixBase<Derived>& mat)
|
||||
{ return CwiseUnaryOp<internal::scalar_add_op<Scalar>, Derived>(mat.derived(), internal::scalar_add_op<Scalar>(scalar)); }
|
||||
\endcode
|
||||
|
||||
Then one can the following declaration in the config.h or whatever prerequisites header file of his project:
|
||||
\code
|
||||
#define EIGEN_MATRIXBASE_PLUGIN "MatrixBaseAddons.h"
|
||||
\endcode
|
||||
|
||||
\section InheritingFromMatrix Inheriting from Matrix
|
||||
|
||||
Before inheriting from Matrix, be really, i mean REALLY sure that using
|
||||
EIGEN_MATRIX_PLUGIN is not what you really want (see previous section).
|
||||
If you just need to add few members to Matrix, this is the way to go.
|
||||
|
||||
An example of when you actually need to inherit Matrix, is when you have
|
||||
several layers of heritage such as MyVerySpecificVector1,MyVerySpecificVector1 -> MyVector1 -> Matrix and.
|
||||
MyVerySpecificVector3,MyVerySpecificVector4 -> MyVector2 -> Matrix.
|
||||
|
||||
In order for your object to work within the %Eigen framework, you need to
|
||||
define a few members in your inherited class.
|
||||
|
||||
Here is a minimalistic example:\n
|
||||
\code
|
||||
class MyVectorType : public Eigen::VectorXd
|
||||
{
|
||||
public:
|
||||
MyVectorType(void):Eigen::VectorXd() {}
|
||||
|
||||
typedef Eigen::VectorXd Base;
|
||||
|
||||
// This constructor allows you to construct MyVectorType from Eigen expressions
|
||||
template<typename OtherDerived>
|
||||
MyVectorType(const Eigen::MatrixBase<OtherDerived>& other)
|
||||
: Eigen::Vector3d(other)
|
||||
{ }
|
||||
|
||||
// This method allows you to assign Eigen expressions to MyVectorType
|
||||
template<typename OtherDerived>
|
||||
MyVectorType & operator= (const Eigen::MatrixBase <OtherDerived>& other)
|
||||
{
|
||||
this->Base::operator=(other);
|
||||
return *this;
|
||||
}
|
||||
};
|
||||
\endcode
|
||||
|
||||
This is the kind of error you can get if you don't provide those methods
|
||||
\code
|
||||
error: no match for ‘operator=’ in ‘delta =
|
||||
(((Eigen::MatrixBase<Eigen::Matrix<std::complex<float>, 10000, 1, 2, 10000,
|
||||
1> >*)(& delta)) + 8u)->Eigen::MatrixBase<Derived>::cwise [with Derived =
|
||||
Eigen::Matrix<std::complex<float>, 10000, 1, 2, 10000,
|
||||
1>]().Eigen::Cwise<ExpressionType>::operator* [with OtherDerived =
|
||||
Eigen::Matrix<std::complex<float>, 10000, 1, 2, 10000, 1>, ExpressionType =
|
||||
Eigen::Matrix<std::complex<float>, 10000, 1, 2, 10000, 1>](((const
|
||||
Eigen::MatrixBase<Eigen::Matrix<std::complex<float>, 10000, 1, 2, 10000, 1>
|
||||
>&)(((const Eigen::MatrixBase<Eigen::Matrix<std::complex<float>, 10000, 1,
|
||||
>2, 10000, 1> >*)((const spectral1d*)where)) + 8u)))’
|
||||
\endcode
|
||||
|
||||
\anchor user_defined_scalars \section CustomScalarType Using custom scalar types
|
||||
|
||||
By default, Eigen currently supports standard floating-point types (\c float, \c double, \c std::complex<float>, \c std::complex<double>, \c long \c double), as well as all native integer types (e.g., \c int, \c unsigned \c int, \c short, etc.), and \c bool.
|
||||
On x86-64 systems, \c long \c double permits to locally enforces the use of x87 registers with extended accuracy (in comparison to SSE).
|
||||
|
||||
In order to add support for a custom type \c T you need:
|
||||
-# make sure the common operator (+,-,*,/,etc.) are supported by the type \c T
|
||||
-# add a specialization of struct Eigen::NumTraits<T> (see \ref NumTraits)
|
||||
-# define the math functions that makes sense for your type. This includes standard ones like sqrt, pow, sin, tan, conj, real, imag, etc, as well as abs2 which is Eigen specific.
|
||||
(see the file Eigen/src/Core/MathFunctions.h)
|
||||
|
||||
The math function should be defined in the same namespace than \c T, or in the \c std namespace though that second approach is not recommended.
|
||||
|
||||
Here is a concrete example adding support for the Adolc's \c adouble type. <a href="https://projects.coin-or.org/ADOL-C">Adolc</a> is an automatic differentiation library. The type \c adouble is basically a real value tracking the values of any number of partial derivatives.
|
||||
|
||||
\code
|
||||
#ifndef ADOLCSUPPORT_H
|
||||
#define ADOLCSUPPORT_H
|
||||
|
||||
#define ADOLC_TAPELESS
|
||||
#include <adolc/adouble.h>
|
||||
#include <Eigen/Core>
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
template<> struct NumTraits<adtl::adouble>
|
||||
: NumTraits<double> // permits to get the epsilon, dummy_precision, lowest, highest functions
|
||||
{
|
||||
typedef adtl::adouble Real;
|
||||
typedef adtl::adouble NonInteger;
|
||||
typedef adtl::adouble Nested;
|
||||
|
||||
enum {
|
||||
IsComplex = 0,
|
||||
IsInteger = 0,
|
||||
IsSigned = 1,
|
||||
RequireInitialization = 1,
|
||||
ReadCost = 1,
|
||||
AddCost = 3,
|
||||
MulCost = 3
|
||||
};
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
namespace adtl {
|
||||
|
||||
inline const adouble& conj(const adouble& x) { return x; }
|
||||
inline const adouble& real(const adouble& x) { return x; }
|
||||
inline adouble imag(const adouble&) { return 0.; }
|
||||
inline adouble abs(const adouble& x) { return fabs(x); }
|
||||
inline adouble abs2(const adouble& x) { return x*x; }
|
||||
|
||||
}
|
||||
|
||||
#endif // ADOLCSUPPORT_H
|
||||
\endcode
|
||||
|
||||
|
||||
\sa \ref TopicPreprocessorDirectives
|
||||
|
||||
*/
|
||||
|
||||
}
|
File diff suppressed because it is too large
Load Diff
Binary file not shown.
Before Width: | Height: | Size: 8.2 KiB |
|
@ -1,38 +0,0 @@
|
|||
namespace Eigen {
|
||||
|
||||
/** \eigenManualPage TopicFixedSizeVectorizable Fixed-size vectorizable Eigen objects
|
||||
|
||||
The goal of this page is to explain what we mean by "fixed-size vectorizable".
|
||||
|
||||
\section summary Executive Summary
|
||||
|
||||
An Eigen object is called "fixed-size vectorizable" if it has fixed size and that size is a multiple of 16 bytes.
|
||||
|
||||
Examples include:
|
||||
\li Eigen::Vector2d
|
||||
\li Eigen::Vector4d
|
||||
\li Eigen::Vector4f
|
||||
\li Eigen::Matrix2d
|
||||
\li Eigen::Matrix2f
|
||||
\li Eigen::Matrix4d
|
||||
\li Eigen::Matrix4f
|
||||
\li Eigen::Affine3d
|
||||
\li Eigen::Affine3f
|
||||
\li Eigen::Quaterniond
|
||||
\li Eigen::Quaternionf
|
||||
|
||||
\section explanation Explanation
|
||||
|
||||
First, "fixed-size" should be clear: an Eigen object has fixed size if its number of rows and its number of columns are fixed at compile-time. So for example Matrix3f has fixed size, but MatrixXf doesn't (the opposite of fixed-size is dynamic-size).
|
||||
|
||||
The array of coefficients of a fixed-size Eigen object is a plain "static array", it is not dynamically allocated. For example, the data behind a Matrix4f is just a "float array[16]".
|
||||
|
||||
Fixed-size objects are typically very small, which means that we want to handle them with zero runtime overhead -- both in terms of memory usage and of speed.
|
||||
|
||||
Now, vectorization (both SSE and AltiVec) works with 128-bit packets. Moreover, for performance reasons, these packets need to be have 128-bit alignment.
|
||||
|
||||
So it turns out that the only way that fixed-size Eigen objects can be vectorized, is if their size is a multiple of 128 bits, or 16 bytes. Eigen will then request 16-byte alignment for these objects, and henceforth rely on these objects being aligned so no runtime check for alignment is performed.
|
||||
|
||||
*/
|
||||
|
||||
}
|
|
@ -1,217 +0,0 @@
|
|||
namespace Eigen {
|
||||
|
||||
/** \page TopicFunctionTakingEigenTypes Writing Functions Taking %Eigen Types as Parameters
|
||||
|
||||
%Eigen's use of expression templates results in potentially every expression being of a different type. If you pass such an expression to a function taking a parameter of type Matrix, your expression will implicitly be evaluated into a temporary Matrix, which will then be passed to the function. This means that you lose the benefit of expression templates. Concretely, this has two drawbacks:
|
||||
\li The evaluation into a temporary may be useless and inefficient;
|
||||
\li This only allows the function to read from the expression, not to write to it.
|
||||
|
||||
Fortunately, all this myriad of expression types have in common that they all inherit a few common, templated base classes. By letting your function take templated parameters of these base types, you can let them play nicely with %Eigen's expression templates.
|
||||
|
||||
\eigenAutoToc
|
||||
|
||||
\section TopicFirstExamples Some First Examples
|
||||
|
||||
This section will provide simple examples for different types of objects %Eigen is offering. Before starting with the actual examples, we need to recapitulate which base objects we can work with (see also \ref TopicClassHierarchy).
|
||||
|
||||
\li MatrixBase: The common base class for all dense matrix expressions (as opposed to array expressions, as opposed to sparse and special matrix classes). Use it in functions that are meant to work only on dense matrices.
|
||||
\li ArrayBase: The common base class for all dense array expressions (as opposed to matrix expressions, etc). Use it in functions that are meant to work only on arrays.
|
||||
\li DenseBase: The common base class for all dense matrix expression, that is, the base class for both \c MatrixBase and \c ArrayBase. It can be used in functions that are meant to work on both matrices and arrays.
|
||||
\li EigenBase: The base class unifying all types of objects that can be evaluated into dense matrices or arrays, for example special matrix classes such as diagonal matrices, permutation matrices, etc. It can be used in functions that are meant to work on any such general type.
|
||||
|
||||
<b> %EigenBase Example </b><br/><br/>
|
||||
Prints the dimensions of the most generic object present in %Eigen. It could be any matrix expressions, any dense or sparse matrix and any array.
|
||||
<table class="example">
|
||||
<tr><th>Example:</th><th>Output:</th></tr>
|
||||
<tr><td>
|
||||
\include function_taking_eigenbase.cpp
|
||||
</td>
|
||||
<td>
|
||||
\verbinclude function_taking_eigenbase.out
|
||||
</td></tr></table>
|
||||
<b> %DenseBase Example </b><br/><br/>
|
||||
Prints a sub-block of the dense expression. Accepts any dense matrix or array expression, but no sparse objects and no special matrix classes such as DiagonalMatrix.
|
||||
\code
|
||||
template <typename Derived>
|
||||
void print_block(const DenseBase<Derived>& b, int x, int y, int r, int c)
|
||||
{
|
||||
std::cout << "block: " << b.block(x,y,r,c) << std::endl;
|
||||
}
|
||||
\endcode
|
||||
<b> %ArrayBase Example </b><br/><br/>
|
||||
Prints the maximum coefficient of the array or array-expression.
|
||||
\code
|
||||
template <typename Derived>
|
||||
void print_max_coeff(const ArrayBase<Derived> &a)
|
||||
{
|
||||
std::cout << "max: " << a.maxCoeff() << std::endl;
|
||||
}
|
||||
\endcode
|
||||
<b> %MatrixBase Example </b><br/><br/>
|
||||
Prints the inverse condition number of the given matrix or matrix-expression.
|
||||
\code
|
||||
template <typename Derived>
|
||||
void print_inv_cond(const MatrixBase<Derived>& a)
|
||||
{
|
||||
const typename JacobiSVD<typename Derived::PlainObject>::SingularValuesType&
|
||||
sing_vals = a.jacobiSvd().singularValues();
|
||||
std::cout << "inv cond: " << sing_vals(sing_vals.size()-1) / sing_vals(0) << std::endl;
|
||||
}
|
||||
\endcode
|
||||
<b> Multiple templated arguments example </b><br/><br/>
|
||||
Calculate the Euclidean distance between two points.
|
||||
\code
|
||||
template <typename DerivedA,typename DerivedB>
|
||||
typename DerivedA::Scalar squaredist(const MatrixBase<DerivedA>& p1,const MatrixBase<DerivedB>& p2)
|
||||
{
|
||||
return (p1-p2).squaredNorm();
|
||||
}
|
||||
\endcode
|
||||
Notice that we used two template parameters, one per argument. This permits the function to handle inputs of different types, e.g.,
|
||||
\code
|
||||
squaredist(v1,2*v2)
|
||||
\endcode
|
||||
where the first argument \c v1 is a vector and the second argument \c 2*v2 is an expression.
|
||||
<br/><br/>
|
||||
|
||||
These examples are just intended to give the reader a first impression of how functions can be written which take a plain and constant Matrix or Array argument. They are also intended to give the reader an idea about the most common base classes being the optimal candidates for functions. In the next section we will look in more detail at an example and the different ways it can be implemented, while discussing each implementation's problems and advantages. For the discussion below, Matrix and Array as well as MatrixBase and ArrayBase can be exchanged and all arguments still hold.
|
||||
|
||||
|
||||
\section TopicUsingRefClass How to write generic, but non-templated function?
|
||||
|
||||
In all the previous examples, the functions had to be template functions. This approach allows to write very generic code, but it is often desirable to write non templated function and still keep some level of genericity to avoid stupid copies of the arguments. The typical example is to write functions accepting both a MatrixXf or a block of a MatrixXf. This exactly the purpose of the Ref class. Here is a simple example:
|
||||
|
||||
<table class="example">
|
||||
<tr><th>Example:</th><th>Output:</th></tr>
|
||||
<tr><td>
|
||||
\include function_taking_ref.cpp
|
||||
</td>
|
||||
<td>
|
||||
\verbinclude function_taking_ref.out
|
||||
</td></tr></table>
|
||||
In the first two calls to inv_cond, no copy occur because the memory layout of the arguments matches the memory layout accepted by Ref<MatrixXf>. However, in the last call, we have a generic expression that will be automatically evaluated into a temporary MatrixXf by the Ref<> object.
|
||||
|
||||
A Ref object can also be writable. Here is an example of a function computing the covariance matrix of two input matrices where each row is an observation:
|
||||
\code
|
||||
void cov(const Ref<const MatrixXf> x, const Ref<const MatrixXf> y, Ref<MatrixXf> C)
|
||||
{
|
||||
const float num_observations = static_cast<float>(x.rows());
|
||||
const RowVectorXf x_mean = x.colwise().sum() / num_observations;
|
||||
const RowVectorXf y_mean = y.colwise().sum() / num_observations;
|
||||
C = (x.rowwise() - x_mean).transpose() * (y.rowwise() - y_mean) / num_observations;
|
||||
}
|
||||
\endcode
|
||||
and here are two examples calling cov without any copy:
|
||||
\code
|
||||
MatrixXf m1, m2, m3
|
||||
cov(m1, m2, m3);
|
||||
cov(m1.leftCols<3>(), m2.leftCols<3>(), m3.topLeftCorner<3,3>());
|
||||
\endcode
|
||||
The Ref<> class has two other optional template arguments allowing to control the kind of memory layout that can be accepted without any copy. See the class Ref documentation for the details.
|
||||
|
||||
\section TopicPlainFunctionsWorking In which cases do functions taking plain Matrix or Array arguments work?
|
||||
|
||||
Without using template functions, and without the Ref class, a naive implementation of the previous cov function might look like this
|
||||
\code
|
||||
MatrixXf cov(const MatrixXf& x, const MatrixXf& y)
|
||||
{
|
||||
const float num_observations = static_cast<float>(x.rows());
|
||||
const RowVectorXf x_mean = x.colwise().sum() / num_observations;
|
||||
const RowVectorXf y_mean = y.colwise().sum() / num_observations;
|
||||
return (x.rowwise() - x_mean).transpose() * (y.rowwise() - y_mean) / num_observations;
|
||||
}
|
||||
\endcode
|
||||
and contrary to what one might think at first, this implementation is fine unless you require a generic implementation that works with double matrices too and unless you do not care about temporary objects. Why is that the case? Where are temporaries involved? How can code as given below compile?
|
||||
\code
|
||||
MatrixXf x,y,z;
|
||||
MatrixXf C = cov(x,y+z);
|
||||
\endcode
|
||||
In this special case, the example is fine and will be working because both parameters are declared as \e const references. The compiler creates a temporary and evaluates the expression x+z into this temporary. Once the function is processed, the temporary is released and the result is assigned to C.
|
||||
|
||||
\b Note: Functions taking \e const references to Matrix (or Array) can process expressions at the cost of temporaries.
|
||||
|
||||
|
||||
\section TopicPlainFunctionsFailing In which cases do functions taking a plain Matrix or Array argument fail?
|
||||
|
||||
Here, we consider a slightly modified version of the function given above. This time, we do not want to return the result but pass an additional non-const paramter which allows us to store the result. A first naive implementation might look as follows.
|
||||
\code
|
||||
// Note: This code is flawed!
|
||||
void cov(const MatrixXf& x, const MatrixXf& y, MatrixXf& C)
|
||||
{
|
||||
const float num_observations = static_cast<float>(x.rows());
|
||||
const RowVectorXf x_mean = x.colwise().sum() / num_observations;
|
||||
const RowVectorXf y_mean = y.colwise().sum() / num_observations;
|
||||
C = (x.rowwise() - x_mean).transpose() * (y.rowwise() - y_mean) / num_observations;
|
||||
}
|
||||
\endcode
|
||||
When trying to execute the following code
|
||||
\code
|
||||
MatrixXf C = MatrixXf::Zero(3,6);
|
||||
cov(x,y, C.block(0,0,3,3));
|
||||
\endcode
|
||||
the compiler will fail, because it is not possible to convert the expression returned by \c MatrixXf::block() into a non-const \c MatrixXf&. This is the case because the compiler wants to protect you from writing your result to a temporary object. In this special case this protection is not intended -- we want to write to a temporary object. So how can we overcome this problem?
|
||||
|
||||
The solution which is preferred at the moment is based on a little \em hack. One needs to pass a const reference to the matrix and internally the constness needs to be cast away. The correct implementation for C98 compliant compilers would be
|
||||
\code
|
||||
template <typename Derived, typename OtherDerived>
|
||||
void cov(const MatrixBase<Derived>& x, const MatrixBase<Derived>& y, MatrixBase<OtherDerived> const & C)
|
||||
{
|
||||
typedef typename Derived::Scalar Scalar;
|
||||
typedef typename internal::plain_row_type<Derived>::type RowVectorType;
|
||||
|
||||
const Scalar num_observations = static_cast<Scalar>(x.rows());
|
||||
|
||||
const RowVectorType x_mean = x.colwise().sum() / num_observations;
|
||||
const RowVectorType y_mean = y.colwise().sum() / num_observations;
|
||||
|
||||
const_cast< MatrixBase<OtherDerived>& >(C) =
|
||||
(x.rowwise() - x_mean).transpose() * (y.rowwise() - y_mean) / num_observations;
|
||||
}
|
||||
\endcode
|
||||
The implementation above does now not only work with temporary expressions but it also allows to use the function with matrices of arbitrary floating point scalar types.
|
||||
|
||||
\b Note: The const cast hack will only work with templated functions. It will not work with the MatrixXf implementation because it is not possible to cast a Block expression to a Matrix reference!
|
||||
|
||||
|
||||
|
||||
\section TopicResizingInGenericImplementations How to resize matrices in generic implementations?
|
||||
|
||||
One might think we are done now, right? This is not completely true because in order for our covariance function to be generically applicable, we want the follwing code to work
|
||||
\code
|
||||
MatrixXf x = MatrixXf::Random(100,3);
|
||||
MatrixXf y = MatrixXf::Random(100,3);
|
||||
MatrixXf C;
|
||||
cov(x, y, C);
|
||||
\endcode
|
||||
This is not the case anymore, when we are using an implementation taking MatrixBase as a parameter. In general, %Eigen supports automatic resizing but it is not possible to do so on expressions. Why should resizing of a matrix Block be allowed? It is a reference to a sub-matrix and we definitely don't want to resize that. So how can we incorporate resizing if we cannot resize on MatrixBase? The solution is to resize the derived object as in this implementation.
|
||||
\code
|
||||
template <typename Derived, typename OtherDerived>
|
||||
void cov(const MatrixBase<Derived>& x, const MatrixBase<Derived>& y, MatrixBase<OtherDerived> const & C_)
|
||||
{
|
||||
typedef typename Derived::Scalar Scalar;
|
||||
typedef typename internal::plain_row_type<Derived>::type RowVectorType;
|
||||
|
||||
const Scalar num_observations = static_cast<Scalar>(x.rows());
|
||||
|
||||
const RowVectorType x_mean = x.colwise().sum() / num_observations;
|
||||
const RowVectorType y_mean = y.colwise().sum() / num_observations;
|
||||
|
||||
MatrixBase<OtherDerived>& C = const_cast< MatrixBase<OtherDerived>& >(C_);
|
||||
|
||||
C.derived().resize(x.cols(),x.cols()); // resize the derived object
|
||||
C = (x.rowwise() - x_mean).transpose() * (y.rowwise() - y_mean) / num_observations;
|
||||
}
|
||||
\endcode
|
||||
This implementation is now working for parameters being expressions and for parameters being matrices and having the wrong size. Resizing the expressions does not do any harm in this case unless they actually require resizing. That means, passing an expression with the wrong dimensions will result in a run-time error (in debug mode only) while passing expressions of the correct size will just work fine.
|
||||
|
||||
\b Note: In the above discussion the terms Matrix and Array and MatrixBase and ArrayBase can be exchanged and all arguments still hold.
|
||||
|
||||
\section TopicSummary Summary
|
||||
|
||||
- To summarize, the implementation of functions taking non-writable (const referenced) objects is not a big issue and does not lead to problematic situations in terms of compiling and running your program. However, a naive implementation is likely to introduce unnecessary temporary objects in your code. In order to avoid evaluating parameters into temporaries, pass them as (const) references to MatrixBase or ArrayBase (so templatize your function).
|
||||
|
||||
- Functions taking writable (non-const) parameters must take const references and cast away constness within the function body.
|
||||
|
||||
- Functions that take as parameters MatrixBase (or ArrayBase) objects, and potentially need to resize them (in the case where they are resizable), must call resize() on the derived class, as returned by derived().
|
||||
*/
|
||||
}
|
|
@ -1,128 +0,0 @@
|
|||
|
||||
namespace Eigen {
|
||||
|
||||
/** \page TopicWritingEfficientProductExpression Writing efficient matrix product expressions
|
||||
|
||||
In general achieving good performance with Eigen does no require any special effort:
|
||||
simply write your expressions in the most high level way. This is especially true
|
||||
for small fixed size matrices. For large matrices, however, it might be useful to
|
||||
take some care when writing your expressions in order to minimize useless evaluations
|
||||
and optimize the performance.
|
||||
In this page we will give a brief overview of the Eigen's internal mechanism to simplify
|
||||
and evaluate complex product expressions, and discuss the current limitations.
|
||||
In particular we will focus on expressions matching level 2 and 3 BLAS routines, i.e,
|
||||
all kind of matrix products and triangular solvers.
|
||||
|
||||
Indeed, in Eigen we have implemented a set of highly optimized routines which are very similar
|
||||
to BLAS's ones. Unlike BLAS, those routines are made available to user via a high level and
|
||||
natural API. Each of these routines can compute in a single evaluation a wide variety of expressions.
|
||||
Given an expression, the challenge is then to map it to a minimal set of routines.
|
||||
As explained latter, this mechanism has some limitations, and knowing them will allow
|
||||
you to write faster code by making your expressions more Eigen friendly.
|
||||
|
||||
\section GEMM General Matrix-Matrix product (GEMM)
|
||||
|
||||
Let's start with the most common primitive: the matrix product of general dense matrices.
|
||||
In the BLAS world this corresponds to the GEMM routine. Our equivalent primitive can
|
||||
perform the following operation:
|
||||
\f$ C.noalias() += \alpha op1(A) op2(B) \f$
|
||||
where A, B, and C are column and/or row major matrices (or sub-matrices),
|
||||
alpha is a scalar value, and op1, op2 can be transpose, adjoint, conjugate, or the identity.
|
||||
When Eigen detects a matrix product, it analyzes both sides of the product to extract a
|
||||
unique scalar factor alpha, and for each side, its effective storage order, shape, and conjugation states.
|
||||
More precisely each side is simplified by iteratively removing trivial expressions such as scalar multiple,
|
||||
negation and conjugation. Transpose and Block expressions are not evaluated and they only modify the storage order
|
||||
and shape. All other expressions are immediately evaluated.
|
||||
For instance, the following expression:
|
||||
\code m1.noalias() -= s4 * (s1 * m2.adjoint() * (-(s3*m3).conjugate()*s2)) \endcode
|
||||
is automatically simplified to:
|
||||
\code m1.noalias() += (s1*s2*conj(s3)*s4) * m2.adjoint() * m3.conjugate() \endcode
|
||||
which exactly matches our GEMM routine.
|
||||
|
||||
\subsection GEMM_Limitations Limitations
|
||||
Unfortunately, this simplification mechanism is not perfect yet and not all expressions which could be
|
||||
handled by a single GEMM-like call are correctly detected.
|
||||
<table class="manual" style="width:100%">
|
||||
<tr>
|
||||
<th>Not optimal expression</th>
|
||||
<th>Evaluated as</th>
|
||||
<th>Optimal version (single evaluation)</th>
|
||||
<th>Comments</th>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>\code
|
||||
m1 += m2 * m3; \endcode</td>
|
||||
<td>\code
|
||||
temp = m2 * m3;
|
||||
m1 += temp; \endcode</td>
|
||||
<td>\code
|
||||
m1.noalias() += m2 * m3; \endcode</td>
|
||||
<td>Use .noalias() to tell Eigen the result and right-hand-sides do not alias.
|
||||
Otherwise the product m2 * m3 is evaluated into a temporary.</td>
|
||||
</tr>
|
||||
<tr class="alt">
|
||||
<td></td>
|
||||
<td></td>
|
||||
<td>\code
|
||||
m1.noalias() += s1 * (m2 * m3); \endcode</td>
|
||||
<td>This is a special feature of Eigen. Here the product between a scalar
|
||||
and a matrix product does not evaluate the matrix product but instead it
|
||||
returns a matrix product expression tracking the scalar scaling factor. <br>
|
||||
Without this optimization, the matrix product would be evaluated into a
|
||||
temporary as in the next example.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>\code
|
||||
m1.noalias() += (m2 * m3).adjoint(); \endcode</td>
|
||||
<td>\code
|
||||
temp = m2 * m3;
|
||||
m1 += temp.adjoint(); \endcode</td>
|
||||
<td>\code
|
||||
m1.noalias() += m3.adjoint()
|
||||
* * m2.adjoint(); \endcode</td>
|
||||
<td>This is because the product expression has the EvalBeforeNesting bit which
|
||||
enforces the evaluation of the product by the Tranpose expression.</td>
|
||||
</tr>
|
||||
<tr class="alt">
|
||||
<td>\code
|
||||
m1 = m1 + m2 * m3; \endcode</td>
|
||||
<td>\code
|
||||
temp = m2 * m3;
|
||||
m1 = m1 + temp; \endcode</td>
|
||||
<td>\code m1.noalias() += m2 * m3; \endcode</td>
|
||||
<td>Here there is no way to detect at compile time that the two m1 are the same,
|
||||
and so the matrix product will be immediately evaluated.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>\code
|
||||
m1.noalias() = m4 + m2 * m3; \endcode</td>
|
||||
<td>\code
|
||||
temp = m2 * m3;
|
||||
m1 = m4 + temp; \endcode</td>
|
||||
<td>\code
|
||||
m1 = m4;
|
||||
m1.noalias() += m2 * m3; \endcode</td>
|
||||
<td>First of all, here the .noalias() in the first expression is useless because
|
||||
m2*m3 will be evaluated anyway. However, note how this expression can be rewritten
|
||||
so that no temporary is required. (tip: for very small fixed size matrix
|
||||
it is slighlty better to rewrite it like this: m1.noalias() = m2 * m3; m1 += m4;</td>
|
||||
</tr>
|
||||
<tr class="alt">
|
||||
<td>\code
|
||||
m1.noalias() += (s1*m2).block(..) * m3; \endcode</td>
|
||||
<td>\code
|
||||
temp = (s1*m2).block(..);
|
||||
m1 += temp * m3; \endcode</td>
|
||||
<td>\code
|
||||
m1.noalias() += s1 * m2.block(..) * m3; \endcode</td>
|
||||
<td>This is because our expression analyzer is currently not able to extract trivial
|
||||
expressions nested in a Block expression. Therefore the nested scalar
|
||||
multiple cannot be properly extracted.</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
Of course all these remarks hold for all other kind of products involving triangular or selfadjoint matrices.
|
||||
|
||||
*/
|
||||
|
||||
}
|
|
@ -1,495 +0,0 @@
|
|||
namespace Eigen {
|
||||
|
||||
/** \page TopicInsideEigenExample What happens inside Eigen, on a simple example
|
||||
|
||||
\eigenAutoToc
|
||||
|
||||
<hr>
|
||||
|
||||
|
||||
Consider the following example program:
|
||||
|
||||
\code
|
||||
#include<Eigen/Core>
|
||||
|
||||
int main()
|
||||
{
|
||||
int size = 50;
|
||||
// VectorXf is a vector of floats, with dynamic size.
|
||||
Eigen::VectorXf u(size), v(size), w(size);
|
||||
u = v + w;
|
||||
}
|
||||
\endcode
|
||||
|
||||
The goal of this page is to understand how Eigen compiles it, assuming that SSE2 vectorization is enabled (GCC option -msse2).
|
||||
|
||||
\section WhyInteresting Why it's interesting
|
||||
|
||||
Maybe you think, that the above example program is so simple, that compiling it shouldn't involve anything interesting. So before starting, let us explain what is nontrivial in compiling it correctly -- that is, producing optimized code -- so that the complexity of Eigen, that we'll explain here, is really useful.
|
||||
|
||||
Look at the line of code
|
||||
\code
|
||||
u = v + w; // (*)
|
||||
\endcode
|
||||
|
||||
The first important thing about compiling it, is that the arrays should be traversed only once, like
|
||||
\code
|
||||
for(int i = 0; i < size; i++) u[i] = v[i] + w[i];
|
||||
\endcode
|
||||
The problem is that if we make a naive C++ library where the VectorXf class has an operator+ returning a VectorXf, then the line of code (*) will amount to:
|
||||
\code
|
||||
VectorXf tmp = v + w;
|
||||
VectorXf u = tmp;
|
||||
\endcode
|
||||
Obviously, the introduction of the temporary \a tmp here is useless. It has a very bad effect on performance, first because the creation of \a tmp requires a dynamic memory allocation in this context, and second as there are now two for loops:
|
||||
\code
|
||||
for(int i = 0; i < size; i++) tmp[i] = v[i] + w[i];
|
||||
for(int i = 0; i < size; i++) u[i] = tmp[i];
|
||||
\endcode
|
||||
Traversing the arrays twice instead of once is terrible for performance, as it means that we do many redundant memory accesses.
|
||||
|
||||
The second important thing about compiling the above program, is to make correct use of SSE2 instructions. Notice that Eigen also supports AltiVec and that all the discussion that we make here applies also to AltiVec.
|
||||
|
||||
SSE2, like AltiVec, is a set of instructions allowing to perform computations on packets of 128 bits at once. Since a float is 32 bits, this means that SSE2 instructions can handle 4 floats at once. This means that, if correctly used, they can make our computation go up to 4x faster.
|
||||
|
||||
However, in the above program, we have chosen size=50, so our vectors consist of 50 float's, and 50 is not a multiple of 4. This means that we cannot hope to do all of that computation using SSE2 instructions. The second best thing, to which we should aim, is to handle the 48 first coefficients with SSE2 instructions, since 48 is the biggest multiple of 4 below 50, and then handle separately, without SSE2, the 49th and 50th coefficients. Something like this:
|
||||
|
||||
\code
|
||||
for(int i = 0; i < 4*(size/4); i+=4) u.packet(i) = v.packet(i) + w.packet(i);
|
||||
for(int i = 4*(size/4); i < size; i++) u[i] = v[i] + w[i];
|
||||
\endcode
|
||||
|
||||
So let us look line by line at our example program, and let's follow Eigen as it compiles it.
|
||||
|
||||
\section ConstructingVectors Constructing vectors
|
||||
|
||||
Let's analyze the first line:
|
||||
|
||||
\code
|
||||
Eigen::VectorXf u(size), v(size), w(size);
|
||||
\endcode
|
||||
|
||||
First of all, VectorXf is the following typedef:
|
||||
\code
|
||||
typedef Matrix<float, Dynamic, 1> VectorXf;
|
||||
\endcode
|
||||
|
||||
The class template Matrix is declared in src/Core/util/ForwardDeclarations.h with 6 template parameters, but the last 3 are automatically determined by the first 3. So you don't need to worry about them for now. Here, Matrix\<float, Dynamic, 1\> means a matrix of floats, with a dynamic number of rows and 1 column.
|
||||
|
||||
The Matrix class inherits a base class, MatrixBase. Don't worry about it, for now it suffices to say that MatrixBase is what unifies matrices/vectors and all the expressions types -- more on that below.
|
||||
|
||||
When we do
|
||||
\code
|
||||
Eigen::VectorXf u(size);
|
||||
\endcode
|
||||
the constructor that is called is Matrix::Matrix(int), in src/Core/Matrix.h. Besides some assertions, all it does is to construct the \a m_storage member, which is of type DenseStorage\<float, Dynamic, Dynamic, 1\>.
|
||||
|
||||
You may wonder, isn't it overengineering to have the storage in a separate class? The reason is that the Matrix class template covers all kinds of matrices and vector: both fixed-size and dynamic-size. The storage method is not the same in these two cases. For fixed-size, the matrix coefficients are stored as a plain member array. For dynamic-size, the coefficients will be stored as a pointer to a dynamically-allocated array. Because of this, we need to abstract storage away from the Matrix class. That's DenseStorage.
|
||||
|
||||
Let's look at this constructor, in src/Core/DenseStorage.h. You can see that there are many partial template specializations of DenseStorages here, treating separately the cases where dimensions are Dynamic or fixed at compile-time. The partial specialization that we are looking at is:
|
||||
\code
|
||||
template<typename T, int _Cols> class DenseStorage<T, Dynamic, Dynamic, _Cols>
|
||||
\endcode
|
||||
|
||||
Here, the constructor called is DenseStorage::DenseStorage(int size, int rows, int columns)
|
||||
with size=50, rows=50, columns=1.
|
||||
|
||||
Here is this constructor:
|
||||
\code
|
||||
inline DenseStorage(int size, int rows, int) : m_data(internal::aligned_new<T>(size)), m_rows(rows) {}
|
||||
\endcode
|
||||
|
||||
Here, the \a m_data member is the actual array of coefficients of the matrix. As you see, it is dynamically allocated. Rather than calling new[] or malloc(), as you can see, we have our own internal::aligned_new defined in src/Core/util/Memory.h. What it does is that if vectorization is enabled, then it uses a platform-specific call to allocate a 128-bit-aligned array, as that is very useful for vectorization with both SSE2 and AltiVec. If vectorization is disabled, it amounts to the standard new[].
|
||||
|
||||
As you can see, the constructor also sets the \a m_rows member to \a size. Notice that there is no \a m_columns member: indeed, in this partial specialization of DenseStorage, we know the number of columns at compile-time, since the _Cols template parameter is different from Dynamic. Namely, in our case, _Cols is 1, which is to say that our vector is just a matrix with 1 column. Hence, there is no need to store the number of columns as a runtime variable.
|
||||
|
||||
When you call VectorXf::data() to get the pointer to the array of coefficients, it returns DenseStorage::data() which returns the \a m_data member.
|
||||
|
||||
When you call VectorXf::size() to get the size of the vector, this is actually a method in the base class MatrixBase. It determines that the vector is a column-vector, since ColsAtCompileTime==1 (this comes from the template parameters in the typedef VectorXf). It deduces that the size is the number of rows, so it returns VectorXf::rows(), which returns DenseStorage::rows(), which returns the \a m_rows member, which was set to \a size by the constructor.
|
||||
|
||||
\section ConstructionOfSumXpr Construction of the sum expression
|
||||
|
||||
Now that our vectors are constructed, let's move on to the next line:
|
||||
|
||||
\code
|
||||
u = v + w;
|
||||
\endcode
|
||||
|
||||
The executive summary is that operator+ returns a "sum of vectors" expression, but doesn't actually perform the computation. It is the operator=, whose call occurs thereafter, that does the computation.
|
||||
|
||||
Let us now see what Eigen does when it sees this:
|
||||
|
||||
\code
|
||||
v + w
|
||||
\endcode
|
||||
|
||||
Here, v and w are of type VectorXf, which is a typedef for a specialization of Matrix (as we explained above), which is a subclass of MatrixBase. So what is being called is
|
||||
|
||||
\code
|
||||
MatrixBase::operator+(const MatrixBase&)
|
||||
\endcode
|
||||
|
||||
The return type of this operator is
|
||||
\code
|
||||
CwiseBinaryOp<internal::scalar_sum_op<float>, VectorXf, VectorXf>
|
||||
\endcode
|
||||
The CwiseBinaryOp class is our first encounter with an expression template. As we said, the operator+ doesn't by itself perform any computation, it just returns an abstract "sum of vectors" expression. Since there are also "difference of vectors" and "coefficient-wise product of vectors" expressions, we unify them all as "coefficient-wise binary operations", which we abbreviate as "CwiseBinaryOp". "Coefficient-wise" means that the operations is performed coefficient by coefficient. "binary" means that there are two operands -- we are adding two vectors with one another.
|
||||
|
||||
Now you might ask, what if we did something like
|
||||
|
||||
\code
|
||||
v + w + u;
|
||||
\endcode
|
||||
|
||||
The first v + w would return a CwiseBinaryOp as above, so in order for this to compile, we'd need to define an operator+ also in the class CwiseBinaryOp... at this point it starts looking like a nightmare: are we going to have to define all operators in each of the expression classes (as you guessed, CwiseBinaryOp is only one of many) ? This looks like a dead end!
|
||||
|
||||
The solution is that CwiseBinaryOp itself, as well as Matrix and all the other expression types, is a subclass of MatrixBase. So it is enough to define once and for all the operators in class MatrixBase.
|
||||
|
||||
Since MatrixBase is the common base class of different subclasses, the aspects that depend on the subclass must be abstracted from MatrixBase. This is called polymorphism.
|
||||
|
||||
The classical approach to polymorphism in C++ is by means of virtual functions. This is dynamic polymorphism. Here we don't want dynamic polymorphism because the whole design of Eigen is based around the assumption that all the complexity, all the abstraction, gets resolved at compile-time. This is crucial: if the abstraction can't get resolved at compile-time, Eigen's compile-time optimization mechanisms become useless, not to mention that if that abstraction has to be resolved at runtime it'll incur an overhead by itself.
|
||||
|
||||
Here, what we want is to have a single class MatrixBase as the base of many subclasses, in such a way that each MatrixBase object (be it a matrix, or vector, or any kind of expression) knows at compile-time (as opposed to run-time) of which particular subclass it is an object (i.e. whether it is a matrix, or an expression, and what kind of expression).
|
||||
|
||||
The solution is the <a href="http://en.wikipedia.org/wiki/Curiously_Recurring_Template_Pattern">Curiously Recurring Template Pattern</a>. Let's do the break now. Hopefully you can read this wikipedia page during the break if needed, but it won't be allowed during the exam.
|
||||
|
||||
In short, MatrixBase takes a template parameter \a Derived. Whenever we define a subclass Subclass, we actually make Subclass inherit MatrixBase\<Subclass\>. The point is that different subclasses inherit different MatrixBase types. Thanks to this, whenever we have an object of a subclass, and we call on it some MatrixBase method, we still remember even from inside the MatrixBase method which particular subclass we're talking about.
|
||||
|
||||
This means that we can put almost all the methods and operators in the base class MatrixBase, and have only the bare minimum in the subclasses. If you look at the subclasses in Eigen, like for instance the CwiseBinaryOp class, they have very few methods. There are coeff() and sometimes coeffRef() methods for access to the coefficients, there are rows() and cols() methods returning the number of rows and columns, but there isn't much more than that. All the meat is in MatrixBase, so it only needs to be coded once for all kinds of expressions, matrices, and vectors.
|
||||
|
||||
So let's end this digression and come back to the piece of code from our example program that we were currently analyzing,
|
||||
|
||||
\code
|
||||
v + w
|
||||
\endcode
|
||||
|
||||
Now that MatrixBase is a good friend, let's write fully the prototype of the operator+ that gets called here (this code is from src/Core/MatrixBase.h):
|
||||
|
||||
\code
|
||||
template<typename Derived>
|
||||
class MatrixBase
|
||||
{
|
||||
// ...
|
||||
|
||||
template<typename OtherDerived>
|
||||
const CwiseBinaryOp<internal::scalar_sum_op<typename internal::traits<Derived>::Scalar>, Derived, OtherDerived>
|
||||
operator+(const MatrixBase<OtherDerived> &other) const;
|
||||
|
||||
// ...
|
||||
};
|
||||
\endcode
|
||||
|
||||
Here of course, \a Derived and \a OtherDerived are VectorXf.
|
||||
|
||||
As we said, CwiseBinaryOp is also used for other operations such as substration, so it takes another template parameter determining the operation that will be applied to coefficients. This template parameter is a functor, that is, a class in which we have an operator() so it behaves like a function. Here, the functor used is internal::scalar_sum_op. It is defined in src/Core/Functors.h.
|
||||
|
||||
Let us now explain the internal::traits here. The internal::scalar_sum_op class takes one template parameter: the type of the numbers to handle. Here of course we want to pass the scalar type (a.k.a. numeric type) of VectorXf, which is \c float. How do we determine which is the scalar type of \a Derived ? Throughout Eigen, all matrix and expression types define a typedef \a Scalar which gives its scalar type. For example, VectorXf::Scalar is a typedef for \c float. So here, if life was easy, we could find the numeric type of \a Derived as just
|
||||
\code
|
||||
typename Derived::Scalar
|
||||
\endcode
|
||||
Unfortunately, we can't do that here, as the compiler would complain that the type Derived hasn't yet been defined. So we use a workaround: in src/Core/util/ForwardDeclarations.h, we declared (not defined!) all our subclasses, like Matrix, and we also declared the following class template:
|
||||
\code
|
||||
template<typename T> struct internal::traits;
|
||||
\endcode
|
||||
In src/Core/Matrix.h, right \em before the definition of class Matrix, we define a partial specialization of internal::traits for T=Matrix\<any template parameters\>. In this specialization of internal::traits, we define the Scalar typedef. So when we actually define Matrix, it is legal to refer to "typename internal::traits\<Matrix\>::Scalar".
|
||||
|
||||
Anyway, we have declared our operator+. In our case, where \a Derived and \a OtherDerived are VectorXf, the above declaration amounts to:
|
||||
\code
|
||||
class MatrixBase<VectorXf>
|
||||
{
|
||||
// ...
|
||||
|
||||
const CwiseBinaryOp<internal::scalar_sum_op<float>, VectorXf, VectorXf>
|
||||
operator+(const MatrixBase<VectorXf> &other) const;
|
||||
|
||||
// ...
|
||||
};
|
||||
\endcode
|
||||
|
||||
Let's now jump to src/Core/CwiseBinaryOp.h to see how it is defined. As you can see there, all it does is to return a CwiseBinaryOp object, and this object is just storing references to the left-hand-side and right-hand-side expressions -- here, these are the vectors \a v and \a w. Well, the CwiseBinaryOp object is also storing an instance of the (empty) functor class, but you shouldn't worry about it as that is a minor implementation detail.
|
||||
|
||||
Thus, the operator+ hasn't performed any actual computation. To summarize, the operation \a v + \a w just returned an object of type CwiseBinaryOp which did nothing else than just storing references to \a v and \a w.
|
||||
|
||||
\section Assignment The assignment
|
||||
|
||||
At this point, the expression \a v + \a w has finished evaluating, so, in the process of compiling the line of code
|
||||
\code
|
||||
u = v + w;
|
||||
\endcode
|
||||
we now enter the operator=.
|
||||
|
||||
What operator= is being called here? The vector u is an object of class VectorXf, i.e. Matrix. In src/Core/Matrix.h, inside the definition of class Matrix, we see this:
|
||||
\code
|
||||
template<typename OtherDerived>
|
||||
inline Matrix& operator=(const MatrixBase<OtherDerived>& other)
|
||||
{
|
||||
eigen_assert(m_storage.data()!=0 && "you cannot use operator= with a non initialized matrix (instead use set()");
|
||||
return Base::operator=(other.derived());
|
||||
}
|
||||
\endcode
|
||||
Here, Base is a typedef for MatrixBase\<Matrix\>. So, what is being called is the operator= of MatrixBase. Let's see its prototype in src/Core/MatrixBase.h:
|
||||
\code
|
||||
template<typename OtherDerived>
|
||||
Derived& operator=(const MatrixBase<OtherDerived>& other);
|
||||
\endcode
|
||||
Here, \a Derived is VectorXf (since u is a VectorXf) and \a OtherDerived is CwiseBinaryOp. More specifically, as explained in the previous section, \a OtherDerived is:
|
||||
\code
|
||||
CwiseBinaryOp<internal::scalar_sum_op<float>, VectorXf, VectorXf>
|
||||
\endcode
|
||||
So the full prototype of the operator= being called is:
|
||||
\code
|
||||
VectorXf& MatrixBase<VectorXf>::operator=(const MatrixBase<CwiseBinaryOp<internal::scalar_sum_op<float>, VectorXf, VectorXf> > & other);
|
||||
\endcode
|
||||
This operator= literally reads "copying a sum of two VectorXf's into another VectorXf".
|
||||
|
||||
Let's now look at the implementation of this operator=. It resides in the file src/Core/Assign.h.
|
||||
|
||||
What we can see there is:
|
||||
\code
|
||||
template<typename Derived>
|
||||
template<typename OtherDerived>
|
||||
inline Derived& MatrixBase<Derived>
|
||||
::operator=(const MatrixBase<OtherDerived>& other)
|
||||
{
|
||||
return internal::assign_selector<Derived,OtherDerived>::run(derived(), other.derived());
|
||||
}
|
||||
\endcode
|
||||
|
||||
OK so our next task is to understand internal::assign_selector :)
|
||||
|
||||
Here is its declaration (all that is still in the same file src/Core/Assign.h)
|
||||
\code
|
||||
template<typename Derived, typename OtherDerived,
|
||||
bool EvalBeforeAssigning = int(OtherDerived::Flags) & EvalBeforeAssigningBit,
|
||||
bool NeedToTranspose = Derived::IsVectorAtCompileTime
|
||||
&& OtherDerived::IsVectorAtCompileTime
|
||||
&& int(Derived::RowsAtCompileTime) == int(OtherDerived::ColsAtCompileTime)
|
||||
&& int(Derived::ColsAtCompileTime) == int(OtherDerived::RowsAtCompileTime)
|
||||
&& int(Derived::SizeAtCompileTime) != 1>
|
||||
struct internal::assign_selector;
|
||||
\endcode
|
||||
|
||||
So internal::assign_selector takes 4 template parameters, but the 2 last ones are automatically determined by the 2 first ones.
|
||||
|
||||
EvalBeforeAssigning is here to enforce the EvalBeforeAssigningBit. As explained <a href="TopicLazyEvaluation.html">here</a>, certain expressions have this flag which makes them automatically evaluate into temporaries before assigning them to another expression. This is the case of the Product expression, in order to avoid strange aliasing effects when doing "m = m * m;" However, of course here our CwiseBinaryOp expression doesn't have the EvalBeforeAssigningBit: we said since the beginning that we didn't want a temporary to be introduced here. So if you go to src/Core/CwiseBinaryOp.h, you'll see that the Flags in internal::traits\<CwiseBinaryOp\> don't include the EvalBeforeAssigningBit. The Flags member of CwiseBinaryOp is then imported from the internal::traits by the EIGEN_GENERIC_PUBLIC_INTERFACE macro. Anyway, here the template parameter EvalBeforeAssigning has the value \c false.
|
||||
|
||||
NeedToTranspose is here for the case where the user wants to copy a row-vector into a column-vector. We allow this as a special exception to the general rule that in assignments we require the dimesions to match. Anyway, here both the left-hand and right-hand sides are column vectors, in the sense that ColsAtCompileTime is equal to 1. So NeedToTranspose is \c false too.
|
||||
|
||||
So, here we are in the partial specialization:
|
||||
\code
|
||||
internal::assign_selector<Derived, OtherDerived, false, false>
|
||||
\endcode
|
||||
|
||||
Here's how it is defined:
|
||||
\code
|
||||
template<typename Derived, typename OtherDerived>
|
||||
struct internal::assign_selector<Derived,OtherDerived,false,false> {
|
||||
static Derived& run(Derived& dst, const OtherDerived& other) { return dst.lazyAssign(other.derived()); }
|
||||
};
|
||||
\endcode
|
||||
|
||||
OK so now our next job is to understand how lazyAssign works :)
|
||||
|
||||
\code
|
||||
template<typename Derived>
|
||||
template<typename OtherDerived>
|
||||
inline Derived& MatrixBase<Derived>
|
||||
::lazyAssign(const MatrixBase<OtherDerived>& other)
|
||||
{
|
||||
EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE(Derived,OtherDerived)
|
||||
eigen_assert(rows() == other.rows() && cols() == other.cols());
|
||||
internal::assign_impl<Derived, OtherDerived>::run(derived(),other.derived());
|
||||
return derived();
|
||||
}
|
||||
\endcode
|
||||
|
||||
What do we see here? Some assertions, and then the only interesting line is:
|
||||
\code
|
||||
internal::assign_impl<Derived, OtherDerived>::run(derived(),other.derived());
|
||||
\endcode
|
||||
|
||||
OK so now we want to know what is inside internal::assign_impl.
|
||||
|
||||
Here is its declaration:
|
||||
\code
|
||||
template<typename Derived1, typename Derived2,
|
||||
int Vectorization = internal::assign_traits<Derived1, Derived2>::Vectorization,
|
||||
int Unrolling = internal::assign_traits<Derived1, Derived2>::Unrolling>
|
||||
struct internal::assign_impl;
|
||||
\endcode
|
||||
Again, internal::assign_selector takes 4 template parameters, but the 2 last ones are automatically determined by the 2 first ones.
|
||||
|
||||
These two parameters \a Vectorization and \a Unrolling are determined by a helper class internal::assign_traits. Its job is to determine which vectorization strategy to use (that is \a Vectorization) and which unrolling strategy to use (that is \a Unrolling).
|
||||
|
||||
We'll not enter into the details of how these strategies are chosen (this is in the implementation of internal::assign_traits at the top of the same file). Let's just say that here \a Vectorization has the value \a LinearVectorization, and \a Unrolling has the value \a NoUnrolling (the latter is obvious since our vectors have dynamic size so there's no way to unroll the loop at compile-time).
|
||||
|
||||
So the partial specialization of internal::assign_impl that we're looking at is:
|
||||
\code
|
||||
internal::assign_impl<Derived1, Derived2, LinearVectorization, NoUnrolling>
|
||||
\endcode
|
||||
|
||||
Here is how it's defined:
|
||||
\code
|
||||
template<typename Derived1, typename Derived2>
|
||||
struct internal::assign_impl<Derived1, Derived2, LinearVectorization, NoUnrolling>
|
||||
{
|
||||
static void run(Derived1 &dst, const Derived2 &src)
|
||||
{
|
||||
const int size = dst.size();
|
||||
const int packetSize = internal::packet_traits<typename Derived1::Scalar>::size;
|
||||
const int alignedStart = internal::assign_traits<Derived1,Derived2>::DstIsAligned ? 0
|
||||
: internal::first_aligned(&dst.coeffRef(0), size);
|
||||
const int alignedEnd = alignedStart + ((size-alignedStart)/packetSize)*packetSize;
|
||||
|
||||
for(int index = 0; index < alignedStart; index++)
|
||||
dst.copyCoeff(index, src);
|
||||
|
||||
for(int index = alignedStart; index < alignedEnd; index += packetSize)
|
||||
{
|
||||
dst.template copyPacket<Derived2, Aligned, internal::assign_traits<Derived1,Derived2>::SrcAlignment>(index, src);
|
||||
}
|
||||
|
||||
for(int index = alignedEnd; index < size; index++)
|
||||
dst.copyCoeff(index, src);
|
||||
}
|
||||
};
|
||||
\endcode
|
||||
|
||||
Here's how it works. \a LinearVectorization means that the left-hand and right-hand side expression can be accessed linearly i.e. you can refer to their coefficients by one integer \a index, as opposed to having to refer to its coefficients by two integers \a row, \a column.
|
||||
|
||||
As we said at the beginning, vectorization works with blocks of 4 floats. Here, \a PacketSize is 4.
|
||||
|
||||
There are two potential problems that we need to deal with:
|
||||
\li first, vectorization works much better if the packets are 128-bit-aligned. This is especially important for write access. So when writing to the coefficients of \a dst, we want to group these coefficients by packets of 4 such that each of these packets is 128-bit-aligned. In general, this requires to skip a few coefficients at the beginning of \a dst. This is the purpose of \a alignedStart. We then copy these first few coefficients one by one, not by packets. However, in our case, the \a dst expression is a VectorXf and remember that in the construction of the vectors we allocated aligned arrays. Thanks to \a DstIsAligned, Eigen remembers that without having to do any runtime check, so \a alignedStart is zero and this part is avoided altogether.
|
||||
\li second, the number of coefficients to copy is not in general a multiple of \a packetSize. Here, there are 50 coefficients to copy and \a packetSize is 4. So we'll have to copy the last 2 coefficients one by one, not by packets. Here, \a alignedEnd is 48.
|
||||
|
||||
Now come the actual loops.
|
||||
|
||||
First, the vectorized part: the 48 first coefficients out of 50 will be copied by packets of 4:
|
||||
\code
|
||||
for(int index = alignedStart; index < alignedEnd; index += packetSize)
|
||||
{
|
||||
dst.template copyPacket<Derived2, Aligned, internal::assign_traits<Derived1,Derived2>::SrcAlignment>(index, src);
|
||||
}
|
||||
\endcode
|
||||
|
||||
What is copyPacket? It is defined in src/Core/Coeffs.h:
|
||||
\code
|
||||
template<typename Derived>
|
||||
template<typename OtherDerived, int StoreMode, int LoadMode>
|
||||
inline void MatrixBase<Derived>::copyPacket(int index, const MatrixBase<OtherDerived>& other)
|
||||
{
|
||||
eigen_internal_assert(index >= 0 && index < size());
|
||||
derived().template writePacket<StoreMode>(index,
|
||||
other.derived().template packet<LoadMode>(index));
|
||||
}
|
||||
\endcode
|
||||
|
||||
OK, what are writePacket() and packet() here?
|
||||
|
||||
First, writePacket() here is a method on the left-hand side VectorXf. So we go to src/Core/Matrix.h to look at its definition:
|
||||
\code
|
||||
template<int StoreMode>
|
||||
inline void writePacket(int index, const PacketScalar& x)
|
||||
{
|
||||
internal::pstoret<Scalar, PacketScalar, StoreMode>(m_storage.data() + index, x);
|
||||
}
|
||||
\endcode
|
||||
Here, \a StoreMode is \a #Aligned, indicating that we are doing a 128-bit-aligned write access, \a PacketScalar is a type representing a "SSE packet of 4 floats" and internal::pstoret is a function writing such a packet in memory. Their definitions are architecture-specific, we find them in src/Core/arch/SSE/PacketMath.h:
|
||||
|
||||
The line in src/Core/arch/SSE/PacketMath.h that determines the PacketScalar type (via a typedef in Matrix.h) is:
|
||||
\code
|
||||
template<> struct internal::packet_traits<float> { typedef __m128 type; enum {size=4}; };
|
||||
\endcode
|
||||
Here, __m128 is a SSE-specific type. Notice that the enum \a size here is what was used to define \a packetSize above.
|
||||
|
||||
And here is the implementation of internal::pstoret:
|
||||
\code
|
||||
template<> inline void internal::pstore(float* to, const __m128& from) { _mm_store_ps(to, from); }
|
||||
\endcode
|
||||
Here, __mm_store_ps is a SSE-specific intrinsic function, representing a single SSE instruction. The difference between internal::pstore and internal::pstoret is that internal::pstoret is a dispatcher handling both the aligned and unaligned cases, you find its definition in src/Core/GenericPacketMath.h:
|
||||
\code
|
||||
template<typename Scalar, typename Packet, int LoadMode>
|
||||
inline void internal::pstoret(Scalar* to, const Packet& from)
|
||||
{
|
||||
if(LoadMode == Aligned)
|
||||
internal::pstore(to, from);
|
||||
else
|
||||
internal::pstoreu(to, from);
|
||||
}
|
||||
\endcode
|
||||
|
||||
OK, that explains how writePacket() works. Now let's look into the packet() call. Remember that we are analyzing this line of code inside copyPacket():
|
||||
\code
|
||||
derived().template writePacket<StoreMode>(index,
|
||||
other.derived().template packet<LoadMode>(index));
|
||||
\endcode
|
||||
|
||||
Here, \a other is our sum expression \a v + \a w. The .derived() is just casting from MatrixBase to the subclass which here is CwiseBinaryOp. So let's go to src/Core/CwiseBinaryOp.h:
|
||||
\code
|
||||
class CwiseBinaryOp
|
||||
{
|
||||
// ...
|
||||
template<int LoadMode>
|
||||
inline PacketScalar packet(int index) const
|
||||
{
|
||||
return m_functor.packetOp(m_lhs.template packet<LoadMode>(index), m_rhs.template packet<LoadMode>(index));
|
||||
}
|
||||
};
|
||||
\endcode
|
||||
Here, \a m_lhs is the vector \a v, and \a m_rhs is the vector \a w. So the packet() function here is Matrix::packet(). The template parameter \a LoadMode is \a #Aligned. So we're looking at
|
||||
\code
|
||||
class Matrix
|
||||
{
|
||||
// ...
|
||||
template<int LoadMode>
|
||||
inline PacketScalar packet(int index) const
|
||||
{
|
||||
return internal::ploadt<Scalar, LoadMode>(m_storage.data() + index);
|
||||
}
|
||||
};
|
||||
\endcode
|
||||
We let you look up the definition of internal::ploadt in GenericPacketMath.h and the internal::pload in src/Core/arch/SSE/PacketMath.h. It is very similar to the above for internal::pstore.
|
||||
|
||||
Let's go back to CwiseBinaryOp::packet(). Once the packets from the vectors \a v and \a w have been returned, what does this function do? It calls m_functor.packetOp() on them. What is m_functor? Here we must remember what particular template specialization of CwiseBinaryOp we're dealing with:
|
||||
\code
|
||||
CwiseBinaryOp<internal::scalar_sum_op<float>, VectorXf, VectorXf>
|
||||
\endcode
|
||||
So m_functor is an object of the empty class internal::scalar_sum_op<float>. As we mentioned above, don't worry about why we constructed an object of this empty class at all -- it's an implementation detail, the point is that some other functors need to store member data.
|
||||
|
||||
Anyway, internal::scalar_sum_op is defined in src/Core/Functors.h:
|
||||
\code
|
||||
template<typename Scalar> struct internal::scalar_sum_op EIGEN_EMPTY_STRUCT {
|
||||
inline const Scalar operator() (const Scalar& a, const Scalar& b) const { return a + b; }
|
||||
template<typename PacketScalar>
|
||||
inline const PacketScalar packetOp(const PacketScalar& a, const PacketScalar& b) const
|
||||
{ return internal::padd(a,b); }
|
||||
};
|
||||
\endcode
|
||||
As you can see, all what packetOp() does is to call internal::padd on the two packets. Here is the definition of internal::padd from src/Core/arch/SSE/PacketMath.h:
|
||||
\code
|
||||
template<> inline __m128 internal::padd(const __m128& a, const __m128& b) { return _mm_add_ps(a,b); }
|
||||
\endcode
|
||||
Here, _mm_add_ps is a SSE-specific intrinsic function, representing a single SSE instruction.
|
||||
|
||||
To summarize, the loop
|
||||
\code
|
||||
for(int index = alignedStart; index < alignedEnd; index += packetSize)
|
||||
{
|
||||
dst.template copyPacket<Derived2, Aligned, internal::assign_traits<Derived1,Derived2>::SrcAlignment>(index, src);
|
||||
}
|
||||
\endcode
|
||||
has been compiled to the following code: for \a index going from 0 to the 11 ( = 48/4 - 1), read the i-th packet (of 4 floats) from the vector v and the i-th packet from the vector w using two __mm_load_ps SSE instructions, then add them together using a __mm_add_ps instruction, then store the result using a __mm_store_ps instruction.
|
||||
|
||||
There remains the second loop handling the last few (here, the last 2) coefficients:
|
||||
\code
|
||||
for(int index = alignedEnd; index < size; index++)
|
||||
dst.copyCoeff(index, src);
|
||||
\endcode
|
||||
However, it works just like the one we just explained, it is just simpler because there is no SSE vectorization involved here. copyPacket() becomes copyCoeff(), packet() becomes coeff(), writePacket() becomes coeffRef(). If you followed us this far, you can probably understand this part by yourself.
|
||||
|
||||
We see that all the C++ abstraction of Eigen goes away during compilation and that we indeed are precisely controlling which assembly instructions we emit. Such is the beauty of C++! Since we have such precise control over the emitted assembly instructions, but such complex logic to choose the right instructions, we can say that Eigen really behaves like an optimizing compiler. If you prefer, you could say that Eigen behaves like a script for the compiler. In a sense, C++ template metaprogramming is scripting the compiler -- and it's been shown that this scripting language is Turing-complete. See <a href="http://en.wikipedia.org/wiki/Template_metaprogramming"> Wikipedia</a>.
|
||||
|
||||
*/
|
||||
|
||||
}
|
|
@ -1,159 +0,0 @@
|
|||
|
||||
// This file strutures pages and modules into a convenient hierarchical structure.
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
/** \page UserManual_Generalities General topics
|
||||
- \subpage Eigen2ToEigen3
|
||||
- \subpage TopicFunctionTakingEigenTypes
|
||||
- \subpage TopicPreprocessorDirectives
|
||||
- \subpage TopicAssertions
|
||||
- \subpage TopicCustomizingEigen
|
||||
- \subpage TopicMultiThreading
|
||||
- \subpage TopicUsingIntelMKL
|
||||
- \subpage TopicTemplateKeyword
|
||||
- \subpage UserManual_UnderstandingEigen
|
||||
*/
|
||||
|
||||
/** \page UserManual_UnderstandingEigen Understanding Eigen
|
||||
- \subpage TopicInsideEigenExample
|
||||
- \subpage TopicClassHierarchy
|
||||
- \subpage TopicLazyEvaluation
|
||||
*/
|
||||
|
||||
/** \page UnclassifiedPages Unclassified pages
|
||||
- \subpage TopicResizing
|
||||
- \subpage TopicVectorization
|
||||
- \subpage TopicEigenExpressionTemplates
|
||||
- \subpage TopicScalarTypes
|
||||
- \subpage GettingStarted
|
||||
- \subpage TutorialSparse_example_details
|
||||
- \subpage TopicWritingEfficientProductExpression
|
||||
- \subpage Experimental
|
||||
*/
|
||||
|
||||
|
||||
/** \defgroup Support_modules Support modules
|
||||
* Category of modules which add support for external libraries.
|
||||
*/
|
||||
|
||||
|
||||
/** \defgroup DenseMatrixManipulation_chapter Dense matrix and array manipulation */
|
||||
/** \defgroup DenseMatrixManipulation_Alignement Alignment issues */
|
||||
/** \defgroup DenseMatrixManipulation_Reference Reference */
|
||||
|
||||
/** \addtogroup TutorialMatrixClass
|
||||
\ingroup DenseMatrixManipulation_chapter */
|
||||
/** \addtogroup TutorialMatrixArithmetic
|
||||
\ingroup DenseMatrixManipulation_chapter */
|
||||
/** \addtogroup TutorialArrayClass
|
||||
\ingroup DenseMatrixManipulation_chapter */
|
||||
/** \addtogroup TutorialBlockOperations
|
||||
\ingroup DenseMatrixManipulation_chapter */
|
||||
/** \addtogroup TutorialAdvancedInitialization
|
||||
\ingroup DenseMatrixManipulation_chapter */
|
||||
/** \addtogroup TutorialReductionsVisitorsBroadcasting
|
||||
\ingroup DenseMatrixManipulation_chapter */
|
||||
/** \addtogroup TutorialMapClass
|
||||
\ingroup DenseMatrixManipulation_chapter */
|
||||
/** \addtogroup TopicAliasing
|
||||
\ingroup DenseMatrixManipulation_chapter */
|
||||
/** \addtogroup TopicStorageOrders
|
||||
\ingroup DenseMatrixManipulation_chapter */
|
||||
|
||||
/** \addtogroup DenseMatrixManipulation_Alignement
|
||||
\ingroup DenseMatrixManipulation_chapter */
|
||||
/** \addtogroup TopicUnalignedArrayAssert
|
||||
\ingroup DenseMatrixManipulation_Alignement */
|
||||
/** \addtogroup TopicFixedSizeVectorizable
|
||||
\ingroup DenseMatrixManipulation_Alignement */
|
||||
/** \addtogroup TopicStructHavingEigenMembers
|
||||
\ingroup DenseMatrixManipulation_Alignement */
|
||||
/** \addtogroup TopicStlContainers
|
||||
\ingroup DenseMatrixManipulation_Alignement */
|
||||
/** \addtogroup TopicPassingByValue
|
||||
\ingroup DenseMatrixManipulation_Alignement */
|
||||
/** \addtogroup TopicWrongStackAlignment
|
||||
\ingroup DenseMatrixManipulation_Alignement */
|
||||
|
||||
/** \addtogroup DenseMatrixManipulation_Reference
|
||||
\ingroup DenseMatrixManipulation_chapter */
|
||||
/** \addtogroup Core_Module
|
||||
\ingroup DenseMatrixManipulation_Reference */
|
||||
/** \addtogroup Jacobi_Module
|
||||
\ingroup DenseMatrixManipulation_Reference */
|
||||
/** \addtogroup Householder_Module
|
||||
\ingroup DenseMatrixManipulation_Reference */
|
||||
|
||||
/** \addtogroup QuickRefPage
|
||||
\ingroup DenseMatrixManipulation_chapter */
|
||||
|
||||
|
||||
/** \defgroup DenseLinearSolvers_chapter Dense linear problems and decompositions */
|
||||
/** \defgroup DenseLinearSolvers_Reference Reference */
|
||||
|
||||
/** \addtogroup TutorialLinearAlgebra
|
||||
\ingroup DenseLinearSolvers_chapter */
|
||||
/** \addtogroup TopicLinearAlgebraDecompositions
|
||||
\ingroup DenseLinearSolvers_chapter */
|
||||
|
||||
/** \addtogroup DenseLinearSolvers_Reference
|
||||
\ingroup DenseLinearSolvers_chapter */
|
||||
/** \addtogroup Cholesky_Module
|
||||
\ingroup DenseLinearSolvers_Reference */
|
||||
/** \addtogroup LU_Module
|
||||
\ingroup DenseLinearSolvers_Reference */
|
||||
/** \addtogroup QR_Module
|
||||
\ingroup DenseLinearSolvers_Reference */
|
||||
/** \addtogroup SVD_Module
|
||||
\ingroup DenseLinearSolvers_Reference*/
|
||||
/** \addtogroup Eigenvalues_Module
|
||||
\ingroup DenseLinearSolvers_Reference */
|
||||
|
||||
|
||||
|
||||
|
||||
/** \defgroup Sparse_chapter Sparse linear algebra */
|
||||
/** \defgroup Sparse_Reference Reference */
|
||||
|
||||
/** \addtogroup TutorialSparse
|
||||
\ingroup Sparse_chapter */
|
||||
/** \addtogroup TopicSparseSystems
|
||||
\ingroup Sparse_chapter */
|
||||
|
||||
/** \addtogroup Sparse_Reference
|
||||
\ingroup Sparse_chapter */
|
||||
/** \addtogroup SparseCore_Module
|
||||
\ingroup Sparse_Reference */
|
||||
/** \addtogroup OrderingMethods_Module
|
||||
\ingroup Sparse_Reference */
|
||||
/** \addtogroup SparseCholesky_Module
|
||||
\ingroup Sparse_Reference */
|
||||
/** \addtogroup SparseLU_Module
|
||||
\ingroup Sparse_Reference */
|
||||
/** \addtogroup SparseQR_Module
|
||||
\ingroup Sparse_Reference */
|
||||
/** \addtogroup IterativeLinearSolvers_Module
|
||||
\ingroup Sparse_Reference */
|
||||
/** \addtogroup Sparse_Module
|
||||
\ingroup Sparse_Reference */
|
||||
/** \addtogroup Support_modules
|
||||
\ingroup Sparse_Reference */
|
||||
|
||||
/** \addtogroup SparseQuickRefPage
|
||||
\ingroup Sparse_chapter */
|
||||
|
||||
|
||||
/** \defgroup Geometry_chapter Geometry */
|
||||
/** \defgroup Geometry_Reference Reference */
|
||||
|
||||
/** \addtogroup TutorialGeometry
|
||||
\ingroup Geometry_chapter */
|
||||
|
||||
/** \addtogroup Geometry_Reference
|
||||
\ingroup Geometry_chapter */
|
||||
/** \addtogroup Geometry_Module
|
||||
\ingroup Geometry_Reference */
|
||||
/** \addtogroup Splines_Module
|
||||
\ingroup Geometry_Reference */
|
||||
}
|
|
@ -1,28 +0,0 @@
|
|||
namespace Eigen {
|
||||
|
||||
/** \mainpage notitle
|
||||
|
||||
This is the API documentation for Eigen3. You can <a href="eigen-doc.tgz">download</a> it as a tgz archive for offline reading.
|
||||
|
||||
You're already an Eigen2 user? Here is a \link Eigen2ToEigen3 Eigen2 to Eigen3 guide \endlink to help porting your application.
|
||||
|
||||
For a first contact with Eigen, the best place is to have a look at the \link GettingStarted getting started \endlink page that show you how to write and compile your first program with Eigen.
|
||||
|
||||
Then, the \b quick \b reference \b pages give you a quite complete description of the API in a very condensed format that is specially useful to recall the syntax of a particular feature, or to have a quick look at the API. They currently cover the two following feature sets, and more will come in the future:
|
||||
- \link QuickRefPage [QuickRef] Dense matrix and array manipulations \endlink
|
||||
- \link SparseQuickRefPage [QuickRef] Sparse linear algebra \endlink
|
||||
|
||||
You're a MatLab user? There is also a <a href="AsciiQuickReference.txt">short ASCII reference</a> with Matlab translations.
|
||||
|
||||
The \b main \b documentation is organized into \em chapters covering different domains of features.
|
||||
They are themselves composed of \em user \em manual pages describing the different features in a comprehensive way, and \em reference pages that gives you access to the API documentation through the related Eigen's \em modules and \em classes.
|
||||
|
||||
Under the \subpage UserManual_Generalities section, you will find documentation on more general topics such as preprocessor directives, controlling assertions, multi-threading, MKL support, some Eigen's internal insights, and much more...
|
||||
|
||||
Finally, do not miss the search engine, useful to quickly get to the documentation of a given class or function.
|
||||
|
||||
Want more? Checkout the <a href="unsupported/index.html">\em unsupported \em modules </a> documentation.
|
||||
|
||||
*/
|
||||
|
||||
}
|
|
@ -1,40 +0,0 @@
|
|||
namespace Eigen {
|
||||
|
||||
/** \eigenManualPage TopicPassingByValue Passing Eigen objects by value to functions
|
||||
|
||||
Passing objects by value is almost always a very bad idea in C++, as this means useless copies, and one should pass them by reference instead.
|
||||
|
||||
With Eigen, this is even more important: passing \ref TopicFixedSizeVectorizable "fixed-size vectorizable Eigen objects" by value is not only inefficient, it can be illegal or make your program crash! And the reason is that these Eigen objects have alignment modifiers that aren't respected when they are passed by value.
|
||||
|
||||
So for example, a function like this, where v is passed by value:
|
||||
|
||||
\code
|
||||
void my_function(Eigen::Vector2d v);
|
||||
\endcode
|
||||
|
||||
needs to be rewritten as follows, passing v by reference:
|
||||
|
||||
\code
|
||||
void my_function(const Eigen::Vector2d& v);
|
||||
\endcode
|
||||
|
||||
Likewise if you have a class having a Eigen object as member:
|
||||
|
||||
\code
|
||||
struct Foo
|
||||
{
|
||||
Eigen::Vector2d v;
|
||||
};
|
||||
void my_function(Foo v);
|
||||
\endcode
|
||||
|
||||
This function also needs to be rewritten like this:
|
||||
\code
|
||||
void my_function(const Foo& v);
|
||||
\endcode
|
||||
|
||||
Note that on the other hand, there is no problem with functions that return objects by value.
|
||||
|
||||
*/
|
||||
|
||||
}
|
|
@ -1,119 +0,0 @@
|
|||
namespace Eigen {
|
||||
|
||||
/** \page TopicPreprocessorDirectives Preprocessor directives
|
||||
|
||||
You can control some aspects of %Eigen by defining the preprocessor tokens using \c \#define. These macros
|
||||
should be defined before any %Eigen headers are included. Often they are best set in the project options.
|
||||
|
||||
This page lists the preprocesor tokens recognised by %Eigen.
|
||||
|
||||
\eigenAutoToc
|
||||
|
||||
|
||||
\section TopicPreprocessorDirectivesMajor Macros with major effects
|
||||
|
||||
These macros have a major effect and typically break the API (Application Programming Interface) and/or the
|
||||
ABI (Application Binary Interface). This can be rather dangerous: if parts of your program are compiled with
|
||||
one option, and other parts (or libraries that you use) are compiled with another option, your program may
|
||||
fail to link or exhibit subtle bugs. Nevertheless, these options can be useful for people who know what they
|
||||
are doing.
|
||||
|
||||
- \b EIGEN2_SUPPORT - if defined, enables the Eigen2 compatibility mode. This is meant to ease the transition
|
||||
of Eigen2 to Eigen3 (see \ref Eigen2ToEigen3). Not defined by default.
|
||||
- \b EIGEN2_SUPPORT_STAGEnn_xxx (for various values of nn and xxx) - staged migration path from Eigen2 to
|
||||
Eigen3; see \ref Eigen2SupportModes.
|
||||
- \b EIGEN_DEFAULT_DENSE_INDEX_TYPE - the type for column and row indices in matrices, vectors and array
|
||||
(DenseBase::Index). Set to \c std::ptrdiff_t by default.
|
||||
- \b EIGEN_DEFAULT_IO_FORMAT - the IOFormat to use when printing a matrix if no %IOFormat is specified.
|
||||
Defaults to the %IOFormat constructed by the default constructor IOFormat::IOFormat().
|
||||
- \b EIGEN_INITIALIZE_MATRICES_BY_ZERO - if defined, all entries of newly constructed matrices and arrays are
|
||||
initialized to zero, as are new entries in matrices and arrays after resizing. Not defined by default.
|
||||
- \b EIGEN_INITIALIZE_MATRICES_BY_NAN - if defined, all entries of newly constructed matrices and arrays are
|
||||
initialized to NaN, as are new entries in matrices and arrays after resizing. This option is especially
|
||||
useful for debugging purpose, though a memory tool like <a href="http://valgrind.org/">valgrind</a> is
|
||||
preferable. Not defined by default.
|
||||
- \b EIGEN_NO_AUTOMATIC_RESIZING - if defined, the matrices (or arrays) on both sides of an assignment
|
||||
<tt>a = b</tt> have to be of the same size; otherwise, %Eigen automatically resizes \c a so that it is of
|
||||
the correct size. Not defined by default.
|
||||
|
||||
|
||||
\section TopicPreprocessorDirectivesAssertions Assertions
|
||||
|
||||
The %Eigen library contains many assertions to guard against programming errors, both at compile time and at
|
||||
run time. However, these assertions do cost time and can thus be turned off.
|
||||
|
||||
- \b EIGEN_NO_DEBUG - disables %Eigen's assertions if defined. Not defined by default, unless the
|
||||
\c NDEBUG macro is defined (this is a standard C++ macro which disables all asserts).
|
||||
- \b EIGEN_NO_STATIC_ASSERT - if defined, compile-time static assertions are replaced by runtime assertions;
|
||||
this saves compilation time. Not defined by default.
|
||||
- \b eigen_assert - macro with one argument that is used inside %Eigen for assertions. By default, it is
|
||||
basically defined to be \c assert, which aborts the program if the assertion is violated. Redefine this
|
||||
macro if you want to do something else, like throwing an exception.
|
||||
- \b EIGEN_MPL2_ONLY - disable non MPL2 compatible features, or in other words disable the features which
|
||||
are still under the LGPL.
|
||||
|
||||
|
||||
\section TopicPreprocessorDirectivesPerformance Alignment, vectorization and performance tweaking
|
||||
|
||||
- \b EIGEN_MALLOC_ALREADY_ALIGNED - Can be set to 0 or 1 to tell whether default system malloc already
|
||||
returns aligned buffers. In not defined, then this information is automatically deduced from the compiler
|
||||
and system preprocessor tokens.
|
||||
- \b EIGEN_DONT_ALIGN - disables alignment completely. %Eigen will not try to align its objects and does not
|
||||
expect that any objects passed to it are aligned. This will turn off vectorization. Not defined by default.
|
||||
- \b EIGEN_DONT_ALIGN_STATICALLY - disables alignment of arrays on the stack. Not defined by default, unless
|
||||
\c EIGEN_DONT_ALIGN is defined.
|
||||
- \b EIGEN_DONT_PARALLELIZE - if defined, this disables multi-threading. This is only relevant if you enabled OpenMP.
|
||||
See \ref TopicMultiThreading for details.
|
||||
- \b EIGEN_DONT_VECTORIZE - disables explicit vectorization when defined. Not defined by default, unless
|
||||
alignment is disabled by %Eigen's platform test or the user defining \c EIGEN_DONT_ALIGN.
|
||||
- \b EIGEN_FAST_MATH - enables some optimizations which might affect the accuracy of the result. This currently
|
||||
enables the SSE vectorization of sin() and cos(), and speedups sqrt() for single precision. Defined to 1 by default.
|
||||
Define it to 0 to disable.
|
||||
- \b EIGEN_UNROLLING_LIMIT - defines the size of a loop to enable meta unrolling. Set it to zero to disable
|
||||
unrolling. The size of a loop here is expressed in %Eigen's own notion of "number of FLOPS", it does not
|
||||
correspond to the number of iterations or the number of instructions. The default is value 100.
|
||||
- \b EIGEN_STACK_ALLOCATION_LIMIT - defines the maximum bytes for a buffer to be allocated on the stack. For internal
|
||||
temporary buffers, dynamic memory allocation is employed as a fall back. For fixed-size matrices or arrays, exceeding
|
||||
this threshold raises a compile time assertion. Use 0 to set no limit. Default is 128 KB.
|
||||
|
||||
|
||||
\section TopicPreprocessorDirectivesPlugins Plugins
|
||||
|
||||
It is possible to add new methods to many fundamental classes in %Eigen by writing a plugin. As explained in
|
||||
the section \ref ExtendingMatrixBase, the plugin is specified by defining a \c EIGEN_xxx_PLUGIN macro. The
|
||||
following macros are supported; none of them are defined by default.
|
||||
|
||||
- \b EIGEN_ARRAY_PLUGIN - filename of plugin for extending the Array class.
|
||||
- \b EIGEN_ARRAYBASE_PLUGIN - filename of plugin for extending the ArrayBase class.
|
||||
- \b EIGEN_CWISE_PLUGIN - filename of plugin for extending the Cwise class.
|
||||
- \b EIGEN_DENSEBASE_PLUGIN - filename of plugin for extending the DenseBase class.
|
||||
- \b EIGEN_DYNAMICSPARSEMATRIX_PLUGIN - filename of plugin for extending the DynamicSparseMatrix class.
|
||||
- \b EIGEN_MATRIX_PLUGIN - filename of plugin for extending the Matrix class.
|
||||
- \b EIGEN_MATRIXBASE_PLUGIN - filename of plugin for extending the MatrixBase class.
|
||||
- \b EIGEN_PLAINOBJECTBASE_PLUGIN - filename of plugin for extending the PlainObjectBase class.
|
||||
- \b EIGEN_QUATERNIONBASE_PLUGIN - filename of plugin for extending the QuaternionBase class.
|
||||
- \b EIGEN_SPARSEMATRIX_PLUGIN - filename of plugin for extending the SparseMatrix class.
|
||||
- \b EIGEN_SPARSEMATRIXBASE_PLUGIN - filename of plugin for extending the SparseMatrixBase class.
|
||||
- \b EIGEN_SPARSEVECTOR_PLUGIN - filename of plugin for extending the SparseVector class.
|
||||
- \b EIGEN_TRANSFORM_PLUGIN - filename of plugin for extending the Transform class.
|
||||
- \b EIGEN_FUNCTORS_PLUGIN - filename of plugin for adding new functors and specializations of functor_traits.
|
||||
|
||||
|
||||
\section TopicPreprocessorDirectivesDevelopers Macros for Eigen developers
|
||||
|
||||
These macros are mainly meant for people developing %Eigen and for testing purposes. Even though, they might be useful for power users and the curious for debugging and testing purpose, they \b should \b not \b be \b used by real-word code.
|
||||
|
||||
- \b EIGEN_DEFAULT_TO_ROW_MAJOR - when defined, the default storage order for matrices becomes row-major
|
||||
instead of column-major. Not defined by default.
|
||||
- \b EIGEN_INTERNAL_DEBUGGING - if defined, enables assertions in %Eigen's internal routines. This is useful
|
||||
for debugging %Eigen itself. Not defined by default.
|
||||
- \b EIGEN_NO_MALLOC - if defined, any request from inside the %Eigen to allocate memory from the heap
|
||||
results in an assertion failure. This is useful to check that some routine does not allocate memory
|
||||
dynamically. Not defined by default.
|
||||
- \b EIGEN_RUNTIME_NO_MALLOC - if defined, a new switch is introduced which can be turned on and off by
|
||||
calling <tt>set_is_malloc_allowed(bool)</tt>. If malloc is not allowed and %Eigen tries to allocate memory
|
||||
dynamically anyway, an assertion failure results. Not defined by default.
|
||||
|
||||
*/
|
||||
|
||||
}
|
|
@ -1,727 +0,0 @@
|
|||
namespace Eigen {
|
||||
|
||||
/** \eigenManualPage QuickRefPage Quick reference guide
|
||||
|
||||
\eigenAutoToc
|
||||
|
||||
<hr>
|
||||
|
||||
<a href="#" class="top">top</a>
|
||||
\section QuickRef_Headers Modules and Header files
|
||||
|
||||
The Eigen library is divided in a Core module and several additional modules. Each module has a corresponding header file which has to be included in order to use the module. The \c %Dense and \c Eigen header files are provided to conveniently gain access to several modules at once.
|
||||
|
||||
<table class="manual">
|
||||
<tr><th>Module</th><th>Header file</th><th>Contents</th></tr>
|
||||
<tr><td>\link Core_Module Core \endlink</td><td>\code#include <Eigen/Core>\endcode</td><td>Matrix and Array classes, basic linear algebra (including triangular and selfadjoint products), array manipulation</td></tr>
|
||||
<tr class="alt"><td>\link Geometry_Module Geometry \endlink</td><td>\code#include <Eigen/Geometry>\endcode</td><td>Transform, Translation, Scaling, Rotation2D and 3D rotations (Quaternion, AngleAxis)</td></tr>
|
||||
<tr><td>\link LU_Module LU \endlink</td><td>\code#include <Eigen/LU>\endcode</td><td>Inverse, determinant, LU decompositions with solver (FullPivLU, PartialPivLU)</td></tr>
|
||||
<tr><td>\link Cholesky_Module Cholesky \endlink</td><td>\code#include <Eigen/Cholesky>\endcode</td><td>LLT and LDLT Cholesky factorization with solver</td></tr>
|
||||
<tr class="alt"><td>\link Householder_Module Householder \endlink</td><td>\code#include <Eigen/Householder>\endcode</td><td>Householder transformations; this module is used by several linear algebra modules</td></tr>
|
||||
<tr><td>\link SVD_Module SVD \endlink</td><td>\code#include <Eigen/SVD>\endcode</td><td>SVD decomposition with least-squares solver (JacobiSVD)</td></tr>
|
||||
<tr class="alt"><td>\link QR_Module QR \endlink</td><td>\code#include <Eigen/QR>\endcode</td><td>QR decomposition with solver (HouseholderQR, ColPivHouseholderQR, FullPivHouseholderQR)</td></tr>
|
||||
<tr><td>\link Eigenvalues_Module Eigenvalues \endlink</td><td>\code#include <Eigen/Eigenvalues>\endcode</td><td>Eigenvalue, eigenvector decompositions (EigenSolver, SelfAdjointEigenSolver, ComplexEigenSolver)</td></tr>
|
||||
<tr class="alt"><td>\link Sparse_modules Sparse \endlink</td><td>\code#include <Eigen/Sparse>\endcode</td><td>%Sparse matrix storage and related basic linear algebra (SparseMatrix, DynamicSparseMatrix, SparseVector)</td></tr>
|
||||
<tr><td></td><td>\code#include <Eigen/Dense>\endcode</td><td>Includes Core, Geometry, LU, Cholesky, SVD, QR, and Eigenvalues header files</td></tr>
|
||||
<tr class="alt"><td></td><td>\code#include <Eigen/Eigen>\endcode</td><td>Includes %Dense and %Sparse header files (the whole Eigen library)</td></tr>
|
||||
</table>
|
||||
|
||||
<a href="#" class="top">top</a>
|
||||
\section QuickRef_Types Array, matrix and vector types
|
||||
|
||||
|
||||
\b Recall: Eigen provides two kinds of dense objects: mathematical matrices and vectors which are both represented by the template class Matrix, and general 1D and 2D arrays represented by the template class Array:
|
||||
\code
|
||||
typedef Matrix<Scalar, RowsAtCompileTime, ColsAtCompileTime, Options> MyMatrixType;
|
||||
typedef Array<Scalar, RowsAtCompileTime, ColsAtCompileTime, Options> MyArrayType;
|
||||
\endcode
|
||||
|
||||
\li \c Scalar is the scalar type of the coefficients (e.g., \c float, \c double, \c bool, \c int, etc.).
|
||||
\li \c RowsAtCompileTime and \c ColsAtCompileTime are the number of rows and columns of the matrix as known at compile-time or \c Dynamic.
|
||||
\li \c Options can be \c ColMajor or \c RowMajor, default is \c ColMajor. (see class Matrix for more options)
|
||||
|
||||
All combinations are allowed: you can have a matrix with a fixed number of rows and a dynamic number of columns, etc. The following are all valid:
|
||||
\code
|
||||
Matrix<double, 6, Dynamic> // Dynamic number of columns (heap allocation)
|
||||
Matrix<double, Dynamic, 2> // Dynamic number of rows (heap allocation)
|
||||
Matrix<double, Dynamic, Dynamic, RowMajor> // Fully dynamic, row major (heap allocation)
|
||||
Matrix<double, 13, 3> // Fully fixed (usually allocated on stack)
|
||||
\endcode
|
||||
|
||||
In most cases, you can simply use one of the convenience typedefs for \ref matrixtypedefs "matrices" and \ref arraytypedefs "arrays". Some examples:
|
||||
<table class="example">
|
||||
<tr><th>Matrices</th><th>Arrays</th></tr>
|
||||
<tr><td>\code
|
||||
Matrix<float,Dynamic,Dynamic> <=> MatrixXf
|
||||
Matrix<double,Dynamic,1> <=> VectorXd
|
||||
Matrix<int,1,Dynamic> <=> RowVectorXi
|
||||
Matrix<float,3,3> <=> Matrix3f
|
||||
Matrix<float,4,1> <=> Vector4f
|
||||
\endcode</td><td>\code
|
||||
Array<float,Dynamic,Dynamic> <=> ArrayXXf
|
||||
Array<double,Dynamic,1> <=> ArrayXd
|
||||
Array<int,1,Dynamic> <=> RowArrayXi
|
||||
Array<float,3,3> <=> Array33f
|
||||
Array<float,4,1> <=> Array4f
|
||||
\endcode</td></tr>
|
||||
</table>
|
||||
|
||||
Conversion between the matrix and array worlds:
|
||||
\code
|
||||
Array44f a1, a1;
|
||||
Matrix4f m1, m2;
|
||||
m1 = a1 * a2; // coeffwise product, implicit conversion from array to matrix.
|
||||
a1 = m1 * m2; // matrix product, implicit conversion from matrix to array.
|
||||
a2 = a1 + m1.array(); // mixing array and matrix is forbidden
|
||||
m2 = a1.matrix() + m1; // and explicit conversion is required.
|
||||
ArrayWrapper<Matrix4f> m1a(m1); // m1a is an alias for m1.array(), they share the same coefficients
|
||||
MatrixWrapper<Array44f> a1m(a1);
|
||||
\endcode
|
||||
|
||||
In the rest of this document we will use the following symbols to emphasize the features which are specifics to a given kind of object:
|
||||
\li <a name="matrixonly"></a>\matrixworld linear algebra matrix and vector only
|
||||
\li <a name="arrayonly"></a>\arrayworld array objects only
|
||||
|
||||
\subsection QuickRef_Basics Basic matrix manipulation
|
||||
|
||||
<table class="manual">
|
||||
<tr><th></th><th>1D objects</th><th>2D objects</th><th>Notes</th></tr>
|
||||
<tr><td>Constructors</td>
|
||||
<td>\code
|
||||
Vector4d v4;
|
||||
Vector2f v1(x, y);
|
||||
Array3i v2(x, y, z);
|
||||
Vector4d v3(x, y, z, w);
|
||||
|
||||
VectorXf v5; // empty object
|
||||
ArrayXf v6(size);
|
||||
\endcode</td><td>\code
|
||||
Matrix4f m1;
|
||||
|
||||
|
||||
|
||||
|
||||
MatrixXf m5; // empty object
|
||||
MatrixXf m6(nb_rows, nb_columns);
|
||||
\endcode</td><td class="note">
|
||||
By default, the coefficients \n are left uninitialized</td></tr>
|
||||
<tr class="alt"><td>Comma initializer</td>
|
||||
<td>\code
|
||||
Vector3f v1; v1 << x, y, z;
|
||||
ArrayXf v2(4); v2 << 1, 2, 3, 4;
|
||||
|
||||
\endcode</td><td>\code
|
||||
Matrix3f m1; m1 << 1, 2, 3,
|
||||
4, 5, 6,
|
||||
7, 8, 9;
|
||||
\endcode</td><td></td></tr>
|
||||
|
||||
<tr><td>Comma initializer (bis)</td>
|
||||
<td colspan="2">
|
||||
\include Tutorial_commainit_02.cpp
|
||||
</td>
|
||||
<td>
|
||||
output:
|
||||
\verbinclude Tutorial_commainit_02.out
|
||||
</td>
|
||||
</tr>
|
||||
|
||||
<tr class="alt"><td>Runtime info</td>
|
||||
<td>\code
|
||||
vector.size();
|
||||
|
||||
vector.innerStride();
|
||||
vector.data();
|
||||
\endcode</td><td>\code
|
||||
matrix.rows(); matrix.cols();
|
||||
matrix.innerSize(); matrix.outerSize();
|
||||
matrix.innerStride(); matrix.outerStride();
|
||||
matrix.data();
|
||||
\endcode</td><td class="note">Inner/Outer* are storage order dependent</td></tr>
|
||||
<tr><td>Compile-time info</td>
|
||||
<td colspan="2">\code
|
||||
ObjectType::Scalar ObjectType::RowsAtCompileTime
|
||||
ObjectType::RealScalar ObjectType::ColsAtCompileTime
|
||||
ObjectType::Index ObjectType::SizeAtCompileTime
|
||||
\endcode</td><td></td></tr>
|
||||
<tr class="alt"><td>Resizing</td>
|
||||
<td>\code
|
||||
vector.resize(size);
|
||||
|
||||
|
||||
vector.resizeLike(other_vector);
|
||||
vector.conservativeResize(size);
|
||||
\endcode</td><td>\code
|
||||
matrix.resize(nb_rows, nb_cols);
|
||||
matrix.resize(Eigen::NoChange, nb_cols);
|
||||
matrix.resize(nb_rows, Eigen::NoChange);
|
||||
matrix.resizeLike(other_matrix);
|
||||
matrix.conservativeResize(nb_rows, nb_cols);
|
||||
\endcode</td><td class="note">no-op if the new sizes match,<br/>otherwise data are lost<br/><br/>resizing with data preservation</td></tr>
|
||||
|
||||
<tr><td>Coeff access with \n range checking</td>
|
||||
<td>\code
|
||||
vector(i) vector.x()
|
||||
vector[i] vector.y()
|
||||
vector.z()
|
||||
vector.w()
|
||||
\endcode</td><td>\code
|
||||
matrix(i,j)
|
||||
\endcode</td><td class="note">Range checking is disabled if \n NDEBUG or EIGEN_NO_DEBUG is defined</td></tr>
|
||||
|
||||
<tr class="alt"><td>Coeff access without \n range checking</td>
|
||||
<td>\code
|
||||
vector.coeff(i)
|
||||
vector.coeffRef(i)
|
||||
\endcode</td><td>\code
|
||||
matrix.coeff(i,j)
|
||||
matrix.coeffRef(i,j)
|
||||
\endcode</td><td></td></tr>
|
||||
|
||||
<tr><td>Assignment/copy</td>
|
||||
<td colspan="2">\code
|
||||
object = expression;
|
||||
object_of_float = expression_of_double.cast<float>();
|
||||
\endcode</td><td class="note">the destination is automatically resized (if possible)</td></tr>
|
||||
|
||||
</table>
|
||||
|
||||
\subsection QuickRef_PredefMat Predefined Matrices
|
||||
|
||||
<table class="manual">
|
||||
<tr>
|
||||
<th>Fixed-size matrix or vector</th>
|
||||
<th>Dynamic-size matrix</th>
|
||||
<th>Dynamic-size vector</th>
|
||||
</tr>
|
||||
<tr style="border-bottom-style: none;">
|
||||
<td>
|
||||
\code
|
||||
typedef {Matrix3f|Array33f} FixedXD;
|
||||
FixedXD x;
|
||||
|
||||
x = FixedXD::Zero();
|
||||
x = FixedXD::Ones();
|
||||
x = FixedXD::Constant(value);
|
||||
x = FixedXD::Random();
|
||||
x = FixedXD::LinSpaced(size, low, high);
|
||||
|
||||
x.setZero();
|
||||
x.setOnes();
|
||||
x.setConstant(value);
|
||||
x.setRandom();
|
||||
x.setLinSpaced(size, low, high);
|
||||
\endcode
|
||||
</td>
|
||||
<td>
|
||||
\code
|
||||
typedef {MatrixXf|ArrayXXf} Dynamic2D;
|
||||
Dynamic2D x;
|
||||
|
||||
x = Dynamic2D::Zero(rows, cols);
|
||||
x = Dynamic2D::Ones(rows, cols);
|
||||
x = Dynamic2D::Constant(rows, cols, value);
|
||||
x = Dynamic2D::Random(rows, cols);
|
||||
N/A
|
||||
|
||||
x.setZero(rows, cols);
|
||||
x.setOnes(rows, cols);
|
||||
x.setConstant(rows, cols, value);
|
||||
x.setRandom(rows, cols);
|
||||
N/A
|
||||
\endcode
|
||||
</td>
|
||||
<td>
|
||||
\code
|
||||
typedef {VectorXf|ArrayXf} Dynamic1D;
|
||||
Dynamic1D x;
|
||||
|
||||
x = Dynamic1D::Zero(size);
|
||||
x = Dynamic1D::Ones(size);
|
||||
x = Dynamic1D::Constant(size, value);
|
||||
x = Dynamic1D::Random(size);
|
||||
x = Dynamic1D::LinSpaced(size, low, high);
|
||||
|
||||
x.setZero(size);
|
||||
x.setOnes(size);
|
||||
x.setConstant(size, value);
|
||||
x.setRandom(size);
|
||||
x.setLinSpaced(size, low, high);
|
||||
\endcode
|
||||
</td>
|
||||
</tr>
|
||||
|
||||
<tr><td colspan="3">Identity and \link MatrixBase::Unit basis vectors \endlink \matrixworld</td></tr>
|
||||
<tr style="border-bottom-style: none;">
|
||||
<td>
|
||||
\code
|
||||
x = FixedXD::Identity();
|
||||
x.setIdentity();
|
||||
|
||||
Vector3f::UnitX() // 1 0 0
|
||||
Vector3f::UnitY() // 0 1 0
|
||||
Vector3f::UnitZ() // 0 0 1
|
||||
\endcode
|
||||
</td>
|
||||
<td>
|
||||
\code
|
||||
x = Dynamic2D::Identity(rows, cols);
|
||||
x.setIdentity(rows, cols);
|
||||
|
||||
|
||||
|
||||
N/A
|
||||
\endcode
|
||||
</td>
|
||||
<td>\code
|
||||
N/A
|
||||
|
||||
|
||||
VectorXf::Unit(size,i)
|
||||
VectorXf::Unit(4,1) == Vector4f(0,1,0,0)
|
||||
== Vector4f::UnitY()
|
||||
\endcode
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
|
||||
|
||||
\subsection QuickRef_Map Mapping external arrays
|
||||
|
||||
<table class="manual">
|
||||
<tr>
|
||||
<td>Contiguous \n memory</td>
|
||||
<td>\code
|
||||
float data[] = {1,2,3,4};
|
||||
Map<Vector3f> v1(data); // uses v1 as a Vector3f object
|
||||
Map<ArrayXf> v2(data,3); // uses v2 as a ArrayXf object
|
||||
Map<Array22f> m1(data); // uses m1 as a Array22f object
|
||||
Map<MatrixXf> m2(data,2,2); // uses m2 as a MatrixXf object
|
||||
\endcode</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Typical usage \n of strides</td>
|
||||
<td>\code
|
||||
float data[] = {1,2,3,4,5,6,7,8,9};
|
||||
Map<VectorXf,0,InnerStride<2> > v1(data,3); // = [1,3,5]
|
||||
Map<VectorXf,0,InnerStride<> > v2(data,3,InnerStride<>(3)); // = [1,4,7]
|
||||
Map<MatrixXf,0,OuterStride<3> > m2(data,2,3); // both lines |1,4,7|
|
||||
Map<MatrixXf,0,OuterStride<> > m1(data,2,3,OuterStride<>(3)); // are equal to: |2,5,8|
|
||||
\endcode</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
|
||||
<a href="#" class="top">top</a>
|
||||
\section QuickRef_ArithmeticOperators Arithmetic Operators
|
||||
|
||||
<table class="manual">
|
||||
<tr><td>
|
||||
add \n subtract</td><td>\code
|
||||
mat3 = mat1 + mat2; mat3 += mat1;
|
||||
mat3 = mat1 - mat2; mat3 -= mat1;\endcode
|
||||
</td></tr>
|
||||
<tr class="alt"><td>
|
||||
scalar product</td><td>\code
|
||||
mat3 = mat1 * s1; mat3 *= s1; mat3 = s1 * mat1;
|
||||
mat3 = mat1 / s1; mat3 /= s1;\endcode
|
||||
</td></tr>
|
||||
<tr><td>
|
||||
matrix/vector \n products \matrixworld</td><td>\code
|
||||
col2 = mat1 * col1;
|
||||
row2 = row1 * mat1; row1 *= mat1;
|
||||
mat3 = mat1 * mat2; mat3 *= mat1; \endcode
|
||||
</td></tr>
|
||||
<tr class="alt"><td>
|
||||
transposition \n adjoint \matrixworld</td><td>\code
|
||||
mat1 = mat2.transpose(); mat1.transposeInPlace();
|
||||
mat1 = mat2.adjoint(); mat1.adjointInPlace();
|
||||
\endcode
|
||||
</td></tr>
|
||||
<tr><td>
|
||||
\link MatrixBase::dot() dot \endlink product \n inner product \matrixworld</td><td>\code
|
||||
scalar = vec1.dot(vec2);
|
||||
scalar = col1.adjoint() * col2;
|
||||
scalar = (col1.adjoint() * col2).value();\endcode
|
||||
</td></tr>
|
||||
<tr class="alt"><td>
|
||||
outer product \matrixworld</td><td>\code
|
||||
mat = col1 * col2.transpose();\endcode
|
||||
</td></tr>
|
||||
|
||||
<tr><td>
|
||||
\link MatrixBase::norm() norm \endlink \n \link MatrixBase::normalized() normalization \endlink \matrixworld</td><td>\code
|
||||
scalar = vec1.norm(); scalar = vec1.squaredNorm()
|
||||
vec2 = vec1.normalized(); vec1.normalize(); // inplace \endcode
|
||||
</td></tr>
|
||||
|
||||
<tr class="alt"><td>
|
||||
\link MatrixBase::cross() cross product \endlink \matrixworld</td><td>\code
|
||||
#include <Eigen/Geometry>
|
||||
vec3 = vec1.cross(vec2);\endcode</td></tr>
|
||||
</table>
|
||||
|
||||
<a href="#" class="top">top</a>
|
||||
\section QuickRef_Coeffwise Coefficient-wise \& Array operators
|
||||
Coefficient-wise operators for matrices and vectors:
|
||||
<table class="manual">
|
||||
<tr><th>Matrix API \matrixworld</th><th>Via Array conversions</th></tr>
|
||||
<tr><td>\code
|
||||
mat1.cwiseMin(mat2)
|
||||
mat1.cwiseMax(mat2)
|
||||
mat1.cwiseAbs2()
|
||||
mat1.cwiseAbs()
|
||||
mat1.cwiseSqrt()
|
||||
mat1.cwiseProduct(mat2)
|
||||
mat1.cwiseQuotient(mat2)\endcode
|
||||
</td><td>\code
|
||||
mat1.array().min(mat2.array())
|
||||
mat1.array().max(mat2.array())
|
||||
mat1.array().abs2()
|
||||
mat1.array().abs()
|
||||
mat1.array().sqrt()
|
||||
mat1.array() * mat2.array()
|
||||
mat1.array() / mat2.array()
|
||||
\endcode</td></tr>
|
||||
</table>
|
||||
|
||||
It is also very simple to apply any user defined function \c foo using DenseBase::unaryExpr together with std::ptr_fun:
|
||||
\code mat1.unaryExpr(std::ptr_fun(foo))\endcode
|
||||
|
||||
Array operators:\arrayworld
|
||||
|
||||
<table class="manual">
|
||||
<tr><td>Arithmetic operators</td><td>\code
|
||||
array1 * array2 array1 / array2 array1 *= array2 array1 /= array2
|
||||
array1 + scalar array1 - scalar array1 += scalar array1 -= scalar
|
||||
\endcode</td></tr>
|
||||
<tr><td>Comparisons</td><td>\code
|
||||
array1 < array2 array1 > array2 array1 < scalar array1 > scalar
|
||||
array1 <= array2 array1 >= array2 array1 <= scalar array1 >= scalar
|
||||
array1 == array2 array1 != array2 array1 == scalar array1 != scalar
|
||||
\endcode</td></tr>
|
||||
<tr><td>Trigo, power, and \n misc functions \n and the STL variants</td><td>\code
|
||||
array1.min(array2)
|
||||
array1.max(array2)
|
||||
array1.abs2()
|
||||
array1.abs() abs(array1)
|
||||
array1.sqrt() sqrt(array1)
|
||||
array1.log() log(array1)
|
||||
array1.exp() exp(array1)
|
||||
array1.pow(exponent) pow(array1,exponent)
|
||||
array1.square()
|
||||
array1.cube()
|
||||
array1.inverse()
|
||||
array1.sin() sin(array1)
|
||||
array1.cos() cos(array1)
|
||||
array1.tan() tan(array1)
|
||||
array1.asin() asin(array1)
|
||||
array1.acos() acos(array1)
|
||||
\endcode
|
||||
</td></tr>
|
||||
</table>
|
||||
|
||||
<a href="#" class="top">top</a>
|
||||
\section QuickRef_Reductions Reductions
|
||||
|
||||
Eigen provides several reduction methods such as:
|
||||
\link DenseBase::minCoeff() minCoeff() \endlink, \link DenseBase::maxCoeff() maxCoeff() \endlink,
|
||||
\link DenseBase::sum() sum() \endlink, \link DenseBase::prod() prod() \endlink,
|
||||
\link MatrixBase::trace() trace() \endlink \matrixworld,
|
||||
\link MatrixBase::norm() norm() \endlink \matrixworld, \link MatrixBase::squaredNorm() squaredNorm() \endlink \matrixworld,
|
||||
\link DenseBase::all() all() \endlink, and \link DenseBase::any() any() \endlink.
|
||||
All reduction operations can be done matrix-wise,
|
||||
\link DenseBase::colwise() column-wise \endlink or
|
||||
\link DenseBase::rowwise() row-wise \endlink. Usage example:
|
||||
<table class="manual">
|
||||
<tr><td rowspan="3" style="border-right-style:dashed;vertical-align:middle">\code
|
||||
5 3 1
|
||||
mat = 2 7 8
|
||||
9 4 6 \endcode
|
||||
</td> <td>\code mat.minCoeff(); \endcode</td><td>\code 1 \endcode</td></tr>
|
||||
<tr class="alt"><td>\code mat.colwise().minCoeff(); \endcode</td><td>\code 2 3 1 \endcode</td></tr>
|
||||
<tr style="vertical-align:middle"><td>\code mat.rowwise().minCoeff(); \endcode</td><td>\code
|
||||
1
|
||||
2
|
||||
4
|
||||
\endcode</td></tr>
|
||||
</table>
|
||||
|
||||
Special versions of \link DenseBase::minCoeff(IndexType*,IndexType*) const minCoeff \endlink and \link DenseBase::maxCoeff(IndexType*,IndexType*) const maxCoeff \endlink:
|
||||
\code
|
||||
int i, j;
|
||||
s = vector.minCoeff(&i); // s == vector[i]
|
||||
s = matrix.maxCoeff(&i, &j); // s == matrix(i,j)
|
||||
\endcode
|
||||
Typical use cases of all() and any():
|
||||
\code
|
||||
if((array1 > 0).all()) ... // if all coefficients of array1 are greater than 0 ...
|
||||
if((array1 < array2).any()) ... // if there exist a pair i,j such that array1(i,j) < array2(i,j) ...
|
||||
\endcode
|
||||
|
||||
|
||||
<a href="#" class="top">top</a>\section QuickRef_Blocks Sub-matrices
|
||||
|
||||
Read-write access to a \link DenseBase::col(Index) column \endlink
|
||||
or a \link DenseBase::row(Index) row \endlink of a matrix (or array):
|
||||
\code
|
||||
mat1.row(i) = mat2.col(j);
|
||||
mat1.col(j1).swap(mat1.col(j2));
|
||||
\endcode
|
||||
|
||||
Read-write access to sub-vectors:
|
||||
<table class="manual">
|
||||
<tr>
|
||||
<th>Default versions</th>
|
||||
<th>Optimized versions when the size \n is known at compile time</th></tr>
|
||||
<th></th>
|
||||
|
||||
<tr><td>\code vec1.head(n)\endcode</td><td>\code vec1.head<n>()\endcode</td><td>the first \c n coeffs </td></tr>
|
||||
<tr><td>\code vec1.tail(n)\endcode</td><td>\code vec1.tail<n>()\endcode</td><td>the last \c n coeffs </td></tr>
|
||||
<tr><td>\code vec1.segment(pos,n)\endcode</td><td>\code vec1.segment<n>(pos)\endcode</td>
|
||||
<td>the \c n coeffs in the \n range [\c pos : \c pos + \c n - 1]</td></tr>
|
||||
<tr class="alt"><td colspan="3">
|
||||
|
||||
Read-write access to sub-matrices:</td></tr>
|
||||
<tr>
|
||||
<td>\code mat1.block(i,j,rows,cols)\endcode
|
||||
\link DenseBase::block(Index,Index,Index,Index) (more) \endlink</td>
|
||||
<td>\code mat1.block<rows,cols>(i,j)\endcode
|
||||
\link DenseBase::block(Index,Index) (more) \endlink</td>
|
||||
<td>the \c rows x \c cols sub-matrix \n starting from position (\c i,\c j)</td></tr>
|
||||
<tr><td>\code
|
||||
mat1.topLeftCorner(rows,cols)
|
||||
mat1.topRightCorner(rows,cols)
|
||||
mat1.bottomLeftCorner(rows,cols)
|
||||
mat1.bottomRightCorner(rows,cols)\endcode
|
||||
<td>\code
|
||||
mat1.topLeftCorner<rows,cols>()
|
||||
mat1.topRightCorner<rows,cols>()
|
||||
mat1.bottomLeftCorner<rows,cols>()
|
||||
mat1.bottomRightCorner<rows,cols>()\endcode
|
||||
<td>the \c rows x \c cols sub-matrix \n taken in one of the four corners</td></tr>
|
||||
<tr><td>\code
|
||||
mat1.topRows(rows)
|
||||
mat1.bottomRows(rows)
|
||||
mat1.leftCols(cols)
|
||||
mat1.rightCols(cols)\endcode
|
||||
<td>\code
|
||||
mat1.topRows<rows>()
|
||||
mat1.bottomRows<rows>()
|
||||
mat1.leftCols<cols>()
|
||||
mat1.rightCols<cols>()\endcode
|
||||
<td>specialized versions of block() \n when the block fit two corners</td></tr>
|
||||
</table>
|
||||
|
||||
|
||||
|
||||
<a href="#" class="top">top</a>\section QuickRef_Misc Miscellaneous operations
|
||||
|
||||
\subsection QuickRef_Reverse Reverse
|
||||
Vectors, rows, and/or columns of a matrix can be reversed (see DenseBase::reverse(), DenseBase::reverseInPlace(), VectorwiseOp::reverse()).
|
||||
\code
|
||||
vec.reverse() mat.colwise().reverse() mat.rowwise().reverse()
|
||||
vec.reverseInPlace()
|
||||
\endcode
|
||||
|
||||
\subsection QuickRef_Replicate Replicate
|
||||
Vectors, matrices, rows, and/or columns can be replicated in any direction (see DenseBase::replicate(), VectorwiseOp::replicate())
|
||||
\code
|
||||
vec.replicate(times) vec.replicate<Times>
|
||||
mat.replicate(vertical_times, horizontal_times) mat.replicate<VerticalTimes, HorizontalTimes>()
|
||||
mat.colwise().replicate(vertical_times, horizontal_times) mat.colwise().replicate<VerticalTimes, HorizontalTimes>()
|
||||
mat.rowwise().replicate(vertical_times, horizontal_times) mat.rowwise().replicate<VerticalTimes, HorizontalTimes>()
|
||||
\endcode
|
||||
|
||||
|
||||
<a href="#" class="top">top</a>\section QuickRef_DiagTriSymm Diagonal, Triangular, and Self-adjoint matrices
|
||||
(matrix world \matrixworld)
|
||||
|
||||
\subsection QuickRef_Diagonal Diagonal matrices
|
||||
|
||||
<table class="example">
|
||||
<tr><th>Operation</th><th>Code</th></tr>
|
||||
<tr><td>
|
||||
view a vector \link MatrixBase::asDiagonal() as a diagonal matrix \endlink \n </td><td>\code
|
||||
mat1 = vec1.asDiagonal();\endcode
|
||||
</td></tr>
|
||||
<tr><td>
|
||||
Declare a diagonal matrix</td><td>\code
|
||||
DiagonalMatrix<Scalar,SizeAtCompileTime> diag1(size);
|
||||
diag1.diagonal() = vector;\endcode
|
||||
</td></tr>
|
||||
<tr><td>Access the \link MatrixBase::diagonal() diagonal \endlink and \link MatrixBase::diagonal(Index) super/sub diagonals \endlink of a matrix as a vector (read/write)</td>
|
||||
<td>\code
|
||||
vec1 = mat1.diagonal(); mat1.diagonal() = vec1; // main diagonal
|
||||
vec1 = mat1.diagonal(+n); mat1.diagonal(+n) = vec1; // n-th super diagonal
|
||||
vec1 = mat1.diagonal(-n); mat1.diagonal(-n) = vec1; // n-th sub diagonal
|
||||
vec1 = mat1.diagonal<1>(); mat1.diagonal<1>() = vec1; // first super diagonal
|
||||
vec1 = mat1.diagonal<-2>(); mat1.diagonal<-2>() = vec1; // second sub diagonal
|
||||
\endcode</td>
|
||||
</tr>
|
||||
|
||||
<tr><td>Optimized products and inverse</td>
|
||||
<td>\code
|
||||
mat3 = scalar * diag1 * mat1;
|
||||
mat3 += scalar * mat1 * vec1.asDiagonal();
|
||||
mat3 = vec1.asDiagonal().inverse() * mat1
|
||||
mat3 = mat1 * diag1.inverse()
|
||||
\endcode</td>
|
||||
</tr>
|
||||
|
||||
</table>
|
||||
|
||||
\subsection QuickRef_TriangularView Triangular views
|
||||
|
||||
TriangularView gives a view on a triangular part of a dense matrix and allows to perform optimized operations on it. The opposite triangular part is never referenced and can be used to store other information.
|
||||
|
||||
\note The .triangularView() template member function requires the \c template keyword if it is used on an
|
||||
object of a type that depends on a template parameter; see \ref TopicTemplateKeyword for details.
|
||||
|
||||
<table class="example">
|
||||
<tr><th>Operation</th><th>Code</th></tr>
|
||||
<tr><td>
|
||||
Reference to a triangular with optional \n
|
||||
unit or null diagonal (read/write):
|
||||
</td><td>\code
|
||||
m.triangularView<Xxx>()
|
||||
\endcode \n
|
||||
\c Xxx = ::Upper, ::Lower, ::StrictlyUpper, ::StrictlyLower, ::UnitUpper, ::UnitLower
|
||||
</td></tr>
|
||||
<tr><td>
|
||||
Writing to a specific triangular part:\n (only the referenced triangular part is evaluated)
|
||||
</td><td>\code
|
||||
m1.triangularView<Eigen::Lower>() = m2 + m3 \endcode
|
||||
</td></tr>
|
||||
<tr><td>
|
||||
Conversion to a dense matrix setting the opposite triangular part to zero:
|
||||
</td><td>\code
|
||||
m2 = m1.triangularView<Eigen::UnitUpper>()\endcode
|
||||
</td></tr>
|
||||
<tr><td>
|
||||
Products:
|
||||
</td><td>\code
|
||||
m3 += s1 * m1.adjoint().triangularView<Eigen::UnitUpper>() * m2
|
||||
m3 -= s1 * m2.conjugate() * m1.adjoint().triangularView<Eigen::Lower>() \endcode
|
||||
</td></tr>
|
||||
<tr><td>
|
||||
Solving linear equations:\n
|
||||
\f$ M_2 := L_1^{-1} M_2 \f$ \n
|
||||
\f$ M_3 := {L_1^*}^{-1} M_3 \f$ \n
|
||||
\f$ M_4 := M_4 U_1^{-1} \f$
|
||||
</td><td>\n \code
|
||||
L1.triangularView<Eigen::UnitLower>().solveInPlace(M2)
|
||||
L1.triangularView<Eigen::Lower>().adjoint().solveInPlace(M3)
|
||||
U1.triangularView<Eigen::Upper>().solveInPlace<OnTheRight>(M4)\endcode
|
||||
</td></tr>
|
||||
</table>
|
||||
|
||||
\subsection QuickRef_SelfadjointMatrix Symmetric/selfadjoint views
|
||||
|
||||
Just as for triangular matrix, you can reference any triangular part of a square matrix to see it as a selfadjoint
|
||||
matrix and perform special and optimized operations. Again the opposite triangular part is never referenced and can be
|
||||
used to store other information.
|
||||
|
||||
\note The .selfadjointView() template member function requires the \c template keyword if it is used on an
|
||||
object of a type that depends on a template parameter; see \ref TopicTemplateKeyword for details.
|
||||
|
||||
<table class="example">
|
||||
<tr><th>Operation</th><th>Code</th></tr>
|
||||
<tr><td>
|
||||
Conversion to a dense matrix:
|
||||
</td><td>\code
|
||||
m2 = m.selfadjointView<Eigen::Lower>();\endcode
|
||||
</td></tr>
|
||||
<tr><td>
|
||||
Product with another general matrix or vector:
|
||||
</td><td>\code
|
||||
m3 = s1 * m1.conjugate().selfadjointView<Eigen::Upper>() * m3;
|
||||
m3 -= s1 * m3.adjoint() * m1.selfadjointView<Eigen::Lower>();\endcode
|
||||
</td></tr>
|
||||
<tr><td>
|
||||
Rank 1 and rank K update: \n
|
||||
\f$ upper(M_1) \mathrel{{+}{=}} s_1 M_2 M_2^* \f$ \n
|
||||
\f$ lower(M_1) \mathbin{{-}{=}} M_2^* M_2 \f$
|
||||
</td><td>\n \code
|
||||
M1.selfadjointView<Eigen::Upper>().rankUpdate(M2,s1);
|
||||
M1.selfadjointView<Eigen::Lower>().rankUpdate(M2.adjoint(),-1); \endcode
|
||||
</td></tr>
|
||||
<tr><td>
|
||||
Rank 2 update: (\f$ M \mathrel{{+}{=}} s u v^* + s v u^* \f$)
|
||||
</td><td>\code
|
||||
M.selfadjointView<Eigen::Upper>().rankUpdate(u,v,s);
|
||||
\endcode
|
||||
</td></tr>
|
||||
<tr><td>
|
||||
Solving linear equations:\n(\f$ M_2 := M_1^{-1} M_2 \f$)
|
||||
</td><td>\code
|
||||
// via a standard Cholesky factorization
|
||||
m2 = m1.selfadjointView<Eigen::Upper>().llt().solve(m2);
|
||||
// via a Cholesky factorization with pivoting
|
||||
m2 = m1.selfadjointView<Eigen::Lower>().ldlt().solve(m2);
|
||||
\endcode
|
||||
</td></tr>
|
||||
</table>
|
||||
|
||||
*/
|
||||
|
||||
/*
|
||||
<table class="tutorial_code">
|
||||
<tr><td>
|
||||
\link MatrixBase::asDiagonal() make a diagonal matrix \endlink \n from a vector </td><td>\code
|
||||
mat1 = vec1.asDiagonal();\endcode
|
||||
</td></tr>
|
||||
<tr><td>
|
||||
Declare a diagonal matrix</td><td>\code
|
||||
DiagonalMatrix<Scalar,SizeAtCompileTime> diag1(size);
|
||||
diag1.diagonal() = vector;\endcode
|
||||
</td></tr>
|
||||
<tr><td>Access \link MatrixBase::diagonal() the diagonal and super/sub diagonals of a matrix \endlink as a vector (read/write)</td>
|
||||
<td>\code
|
||||
vec1 = mat1.diagonal(); mat1.diagonal() = vec1; // main diagonal
|
||||
vec1 = mat1.diagonal(+n); mat1.diagonal(+n) = vec1; // n-th super diagonal
|
||||
vec1 = mat1.diagonal(-n); mat1.diagonal(-n) = vec1; // n-th sub diagonal
|
||||
vec1 = mat1.diagonal<1>(); mat1.diagonal<1>() = vec1; // first super diagonal
|
||||
vec1 = mat1.diagonal<-2>(); mat1.diagonal<-2>() = vec1; // second sub diagonal
|
||||
\endcode</td>
|
||||
</tr>
|
||||
|
||||
<tr><td>View on a triangular part of a matrix (read/write)</td>
|
||||
<td>\code
|
||||
mat2 = mat1.triangularView<Xxx>();
|
||||
// Xxx = Upper, Lower, StrictlyUpper, StrictlyLower, UnitUpper, UnitLower
|
||||
mat1.triangularView<Upper>() = mat2 + mat3; // only the upper part is evaluated and referenced
|
||||
\endcode</td></tr>
|
||||
|
||||
<tr><td>View a triangular part as a symmetric/self-adjoint matrix (read/write)</td>
|
||||
<td>\code
|
||||
mat2 = mat1.selfadjointView<Xxx>(); // Xxx = Upper or Lower
|
||||
mat1.selfadjointView<Upper>() = mat2 + mat2.adjoint(); // evaluated and write to the upper triangular part only
|
||||
\endcode</td></tr>
|
||||
|
||||
</table>
|
||||
|
||||
Optimized products:
|
||||
\code
|
||||
mat3 += scalar * vec1.asDiagonal() * mat1
|
||||
mat3 += scalar * mat1 * vec1.asDiagonal()
|
||||
mat3.noalias() += scalar * mat1.triangularView<Xxx>() * mat2
|
||||
mat3.noalias() += scalar * mat2 * mat1.triangularView<Xxx>()
|
||||
mat3.noalias() += scalar * mat1.selfadjointView<Upper or Lower>() * mat2
|
||||
mat3.noalias() += scalar * mat2 * mat1.selfadjointView<Upper or Lower>()
|
||||
mat1.selfadjointView<Upper or Lower>().rankUpdate(mat2);
|
||||
mat1.selfadjointView<Upper or Lower>().rankUpdate(mat2.adjoint(), scalar);
|
||||
\endcode
|
||||
|
||||
Inverse products: (all are optimized)
|
||||
\code
|
||||
mat3 = vec1.asDiagonal().inverse() * mat1
|
||||
mat3 = mat1 * diag1.inverse()
|
||||
mat1.triangularView<Xxx>().solveInPlace(mat2)
|
||||
mat1.triangularView<Xxx>().solveInPlace<OnTheRight>(mat2)
|
||||
mat2 = mat1.selfadjointView<Upper or Lower>().llt().solve(mat2)
|
||||
\endcode
|
||||
|
||||
*/
|
||||
}
|
|
@ -1,100 +0,0 @@
|
|||
namespace Eigen {
|
||||
|
||||
/** \page GettingStarted Getting started
|
||||
|
||||
\eigenAutoToc
|
||||
|
||||
This is a very short guide on how to get started with Eigen. It has a dual purpose. It serves as a minimal introduction to the Eigen library for people who want to start coding as soon as possible. You can also read this page as the first part of the Tutorial, which explains the library in more detail; in this case you will continue with \ref TutorialMatrixClass.
|
||||
|
||||
\section GettingStartedInstallation How to "install" Eigen?
|
||||
|
||||
In order to use Eigen, you just need to download and extract Eigen's source code (see <a href="http://eigen.tuxfamily.org/index.php?title=Main_Page#Download">the wiki</a> for download instructions). In fact, the header files in the \c Eigen subdirectory are the only files required to compile programs using Eigen. The header files are the same for all platforms. It is not necessary to use CMake or install anything.
|
||||
|
||||
|
||||
\section GettingStartedFirstProgram A simple first program
|
||||
|
||||
Here is a rather simple program to get you started.
|
||||
|
||||
\include QuickStart_example.cpp
|
||||
|
||||
We will explain the program after telling you how to compile it.
|
||||
|
||||
|
||||
\section GettingStartedCompiling Compiling and running your first program
|
||||
|
||||
There is no library to link to. The only thing that you need to keep in mind when compiling the above program is that the compiler must be able to find the Eigen header files. The directory in which you placed Eigen's source code must be in the include path. With GCC you use the -I option to achieve this, so you can compile the program with a command like this:
|
||||
|
||||
\code g++ -I /path/to/eigen/ my_program.cpp -o my_program \endcode
|
||||
|
||||
On Linux or Mac OS X, another option is to symlink or copy the Eigen folder into /usr/local/include/. This way, you can compile the program with:
|
||||
|
||||
\code g++ my_program.cpp -o my_program \endcode
|
||||
|
||||
When you run the program, it produces the following output:
|
||||
|
||||
\include QuickStart_example.out
|
||||
|
||||
|
||||
\section GettingStartedExplanation Explanation of the first program
|
||||
|
||||
The Eigen header files define many types, but for simple applications it may be enough to use only the \c MatrixXd type. This represents a matrix of arbitrary size (hence the \c X in \c MatrixXd), in which every entry is a \c double (hence the \c d in \c MatrixXd). See the \ref QuickRef_Types "quick reference guide" for an overview of the different types you can use to represent a matrix.
|
||||
|
||||
The \c Eigen/Dense header file defines all member functions for the MatrixXd type and related types (see also the \ref QuickRef_Headers "table of header files"). All classes and functions defined in this header file (and other Eigen header files) are in the \c Eigen namespace.
|
||||
|
||||
The first line of the \c main function declares a variable of type \c MatrixXd and specifies that it is a matrix with 2 rows and 2 columns (the entries are not initialized). The statement <tt>m(0,0) = 3</tt> sets the entry in the top-left corner to 3. You need to use round parentheses to refer to entries in the matrix. As usual in computer science, the index of the first index is 0, as opposed to the convention in mathematics that the first index is 1.
|
||||
|
||||
The following three statements sets the other three entries. The final line outputs the matrix \c m to the standard output stream.
|
||||
|
||||
|
||||
\section GettingStartedExample2 Example 2: Matrices and vectors
|
||||
|
||||
Here is another example, which combines matrices with vectors. Concentrate on the left-hand program for now; we will talk about the right-hand program later.
|
||||
|
||||
<table class="manual">
|
||||
<tr><th>Size set at run time:</th><th>Size set at compile time:</th></tr>
|
||||
<tr><td>
|
||||
\include QuickStart_example2_dynamic.cpp
|
||||
</td>
|
||||
<td>
|
||||
\include QuickStart_example2_fixed.cpp
|
||||
</td></tr></table>
|
||||
|
||||
The output is as follows:
|
||||
|
||||
\include QuickStart_example2_dynamic.out
|
||||
|
||||
|
||||
\section GettingStartedExplanation2 Explanation of the second example
|
||||
|
||||
The second example starts by declaring a 3-by-3 matrix \c m which is initialized using the \link DenseBase::Random(Index,Index) Random() \endlink method with random values between -1 and 1. The next line applies a linear mapping such that the values are between 10 and 110. The function call \link DenseBase::Constant(Index,Index,const Scalar&) MatrixXd::Constant\endlink(3,3,1.2) returns a 3-by-3 matrix expression having all coefficients equal to 1.2. The rest is standard arithmetics.
|
||||
|
||||
The next line of the \c main function introduces a new type: \c VectorXd. This represents a (column) vector of arbitrary size. Here, the vector \c v is created to contain \c 3 coefficients which are left unitialized. The one but last line uses the so-called comma-initializer, explained in \ref TutorialAdvancedInitialization, to set all coefficients of the vector \c v to be as follows:
|
||||
|
||||
\f[
|
||||
v =
|
||||
\begin{bmatrix}
|
||||
1 \\
|
||||
2 \\
|
||||
3
|
||||
\end{bmatrix}.
|
||||
\f]
|
||||
|
||||
The final line of the program multiplies the matrix \c m with the vector \c v and outputs the result.
|
||||
|
||||
Now look back at the second example program. We presented two versions of it. In the version in the left column, the matrix is of type \c MatrixXd which represents matrices of arbitrary size. The version in the right column is similar, except that the matrix is of type \c Matrix3d, which represents matrices of a fixed size (here 3-by-3). Because the type already encodes the size of the matrix, it is not necessary to specify the size in the constructor; compare <tt>MatrixXd m(3,3)</tt> with <tt>Matrix3d m</tt>. Similarly, we have \c VectorXd on the left (arbitrary size) versus \c Vector3d on the right (fixed size). Note that here the coefficients of vector \c v are directly set in the constructor, though the same syntax of the left example could be used too.
|
||||
|
||||
The use of fixed-size matrices and vectors has two advantages. The compiler emits better (faster) code because it knows the size of the matrices and vectors. Specifying the size in the type also allows for more rigorous checking at compile-time. For instance, the compiler will complain if you try to multiply a \c Matrix4d (a 4-by-4 matrix) with a \c Vector3d (a vector of size 3). However, the use of many types increases compilation time and the size of the executable. The size of the matrix may also not be known at compile-time. A rule of thumb is to use fixed-size matrices for size 4-by-4 and smaller.
|
||||
|
||||
|
||||
\section GettingStartedConclusion Where to go from here?
|
||||
|
||||
It's worth taking the time to read the \ref TutorialMatrixClass "long tutorial".
|
||||
|
||||
However if you think you don't need it, you can directly use the classes documentation and our \ref QuickRefPage.
|
||||
|
||||
\li \b Next: \ref TutorialMatrixClass
|
||||
|
||||
*/
|
||||
|
||||
}
|
||||
|
|
@ -1,183 +0,0 @@
|
|||
namespace Eigen {
|
||||
/** \eigenManualPage TopicSparseSystems Solving Sparse Linear Systems
|
||||
In Eigen, there are several methods available to solve linear systems when the coefficient matrix is sparse. Because of the special representation of this class of matrices, special care should be taken in order to get a good performance. See \ref TutorialSparse for a detailed introduction about sparse matrices in Eigen. This page lists the sparse solvers available in Eigen. The main steps that are common to all these linear solvers are introduced as well. Depending on the properties of the matrix, the desired accuracy, the end-user is able to tune those steps in order to improve the performance of its code. Note that it is not required to know deeply what's hiding behind these steps: the last section presents a benchmark routine that can be easily used to get an insight on the performance of all the available solvers.
|
||||
|
||||
\eigenAutoToc
|
||||
|
||||
\section TutorialSparseDirectSolvers Sparse solvers
|
||||
|
||||
%Eigen currently provides a limited set of built-in solvers, as well as wrappers to external solver libraries.
|
||||
They are summarized in the following table:
|
||||
|
||||
<table class="manual">
|
||||
<tr><th>Class</th><th>Module</th><th>Solver kind</th><th>Matrix kind</th><th>Features related to performance</th>
|
||||
<th>Dependencies,License</th><th class="width20em"><p>Notes</p></th></tr>
|
||||
<tr><td>SimplicialLLT </td><td>\link SparseCholesky_Module SparseCholesky \endlink</td><td>Direct LLt factorization</td><td>SPD</td><td>Fill-in reducing</td>
|
||||
<td>built-in, LGPL</td>
|
||||
<td>SimplicialLDLT is often preferable</td></tr>
|
||||
<tr><td>SimplicialLDLT </td><td>\link SparseCholesky_Module SparseCholesky \endlink</td><td>Direct LDLt factorization</td><td>SPD</td><td>Fill-in reducing</td>
|
||||
<td>built-in, LGPL</td>
|
||||
<td>Recommended for very sparse and not too large problems (e.g., 2D Poisson eq.)</td></tr>
|
||||
<tr><td>ConjugateGradient</td><td>\link IterativeLinearSolvers_Module IterativeLinearSolvers \endlink</td><td>Classic iterative CG</td><td>SPD</td><td>Preconditionning</td>
|
||||
<td>built-in, MPL2</td>
|
||||
<td>Recommended for large symmetric problems (e.g., 3D Poisson eq.)</td></tr>
|
||||
<tr><td>BiCGSTAB</td><td>\link IterativeLinearSolvers_Module IterativeLinearSolvers \endlink</td><td>Iterative stabilized bi-conjugate gradient</td><td>Square</td><td>Preconditionning</td>
|
||||
<td>built-in, MPL2</td>
|
||||
<td>To speedup the convergence, try it with the \ref IncompleteLUT preconditioner.</td></tr>
|
||||
<tr><td>SparseLU</td> <td>\link SparseLU_Module SparseLU \endlink </td> <td>LU factorization </td>
|
||||
<td>Square </td><td>Fill-in reducing, Leverage fast dense algebra</td>
|
||||
<td> built-in, MPL2</td> <td>optimized for small and large problems with irregular patterns </td></tr>
|
||||
<tr><td>SparseQR</td> <td>\link SparseQR_Module SparseQR \endlink</td> <td> QR factorization</td>
|
||||
<td>Any, rectangular</td><td> Fill-in reducing</td>
|
||||
<td>built-in, MPL2</td><td>recommended for least-square problems, has a basic rank-revealing feature</td></tr>
|
||||
<tr> <th colspan="7"> Wrappers to external solvers </th></tr>
|
||||
<tr><td>PastixLLT \n PastixLDLT \n PastixLU</td><td>\link PaStiXSupport_Module PaStiXSupport \endlink</td><td>Direct LLt, LDLt, LU factorizations</td><td>SPD \n SPD \n Square</td><td>Fill-in reducing, Leverage fast dense algebra, Multithreading</td>
|
||||
<td>Requires the <a href="http://pastix.gforge.inria.fr">PaStiX</a> package, \b CeCILL-C </td>
|
||||
<td>optimized for tough problems and symmetric patterns</td></tr>
|
||||
<tr><td>CholmodSupernodalLLT</td><td>\link CholmodSupport_Module CholmodSupport \endlink</td><td>Direct LLt factorization</td><td>SPD</td><td>Fill-in reducing, Leverage fast dense algebra</td>
|
||||
<td>Requires the <a href="http://www.cise.ufl.edu/research/sparse/SuiteSparse/">SuiteSparse</a> package, \b GPL </td>
|
||||
<td></td></tr>
|
||||
<tr><td>UmfPackLU</td><td>\link UmfPackSupport_Module UmfPackSupport \endlink</td><td>Direct LU factorization</td><td>Square</td><td>Fill-in reducing, Leverage fast dense algebra</td>
|
||||
<td>Requires the <a href="http://www.cise.ufl.edu/research/sparse/SuiteSparse/">SuiteSparse</a> package, \b GPL </td>
|
||||
<td></td></tr>
|
||||
<tr><td>SuperLU</td><td>\link SuperLUSupport_Module SuperLUSupport \endlink</td><td>Direct LU factorization</td><td>Square</td><td>Fill-in reducing, Leverage fast dense algebra</td>
|
||||
<td>Requires the <a href="http://crd-legacy.lbl.gov/~xiaoye/SuperLU/">SuperLU</a> library, (BSD-like)</td>
|
||||
<td></td></tr>
|
||||
<tr><td>SPQR</td><td>\link SPQRSupport_Module SPQRSupport \endlink </td> <td> QR factorization </td>
|
||||
<td> Any, rectangular</td><td>fill-in reducing, multithreaded, fast dense algebra</td>
|
||||
<td> requires the <a href="http://www.cise.ufl.edu/research/sparse/SuiteSparse/">SuiteSparse</a> package, \b GPL </td><td>recommended for linear least-squares problems, has a rank-revealing feature</tr>
|
||||
</table>
|
||||
|
||||
Here \c SPD means symmetric positive definite.
|
||||
|
||||
All these solvers follow the same general concept.
|
||||
Here is a typical and general example:
|
||||
\code
|
||||
#include <Eigen/RequiredModuleName>
|
||||
// ...
|
||||
SparseMatrix<double> A;
|
||||
// fill A
|
||||
VectorXd b, x;
|
||||
// fill b
|
||||
// solve Ax = b
|
||||
SolverClassName<SparseMatrix<double> > solver;
|
||||
solver.compute(A);
|
||||
if(solver.info()!=Success) {
|
||||
// decomposition failed
|
||||
return;
|
||||
}
|
||||
x = solver.solve(b);
|
||||
if(solver.info()!=Success) {
|
||||
// solving failed
|
||||
return;
|
||||
}
|
||||
// solve for another right hand side:
|
||||
x1 = solver.solve(b1);
|
||||
\endcode
|
||||
|
||||
For \c SPD solvers, a second optional template argument allows to specify which triangular part have to be used, e.g.:
|
||||
|
||||
\code
|
||||
#include <Eigen/IterativeLinearSolvers>
|
||||
|
||||
ConjugateGradient<SparseMatrix<double>, Eigen::Upper> solver;
|
||||
x = solver.compute(A).solve(b);
|
||||
\endcode
|
||||
In the above example, only the upper triangular part of the input matrix A is considered for solving. The opposite triangle might either be empty or contain arbitrary values.
|
||||
|
||||
In the case where multiple problems with the same sparsity pattern have to be solved, then the "compute" step can be decomposed as follow:
|
||||
\code
|
||||
SolverClassName<SparseMatrix<double> > solver;
|
||||
solver.analyzePattern(A); // for this step the numerical values of A are not used
|
||||
solver.factorize(A);
|
||||
x1 = solver.solve(b1);
|
||||
x2 = solver.solve(b2);
|
||||
...
|
||||
A = ...; // modify the values of the nonzeros of A, the nonzeros pattern must stay unchanged
|
||||
solver.factorize(A);
|
||||
x1 = solver.solve(b1);
|
||||
x2 = solver.solve(b2);
|
||||
...
|
||||
\endcode
|
||||
The compute() method is equivalent to calling both analyzePattern() and factorize().
|
||||
|
||||
Finally, each solver provides some specific features, such as determinant, access to the factors, controls of the iterations, and so on.
|
||||
More details are availble in the documentations of the respective classes.
|
||||
|
||||
\section TheSparseCompute The Compute Step
|
||||
In the compute() function, the matrix is generally factorized: LLT for self-adjoint matrices, LDLT for general hermitian matrices, LU for non hermitian matrices and QR for rectangular matrices. These are the results of using direct solvers. For this class of solvers precisely, the compute step is further subdivided into analyzePattern() and factorize().
|
||||
|
||||
The goal of analyzePattern() is to reorder the nonzero elements of the matrix, such that the factorization step creates less fill-in. This step exploits only the structure of the matrix. Hence, the results of this step can be used for other linear systems where the matrix has the same structure. Note however that sometimes, some external solvers (like SuperLU) require that the values of the matrix are set in this step, for instance to equilibrate the rows and columns of the matrix. In this situation, the results of this step should not be used with other matrices.
|
||||
|
||||
Eigen provides a limited set of methods to reorder the matrix in this step, either built-in (COLAMD, AMD) or external (METIS). These methods are set in template parameter list of the solver :
|
||||
\code
|
||||
DirectSolverClassName<SparseMatrix<double>, OrderingMethod<IndexType> > solver;
|
||||
\endcode
|
||||
|
||||
See the \link OrderingMethods_Module OrderingMethods module \endlink for the list of available methods and the associated options.
|
||||
|
||||
In factorize(), the factors of the coefficient matrix are computed. This step should be called each time the values of the matrix change. However, the structural pattern of the matrix should not change between multiple calls.
|
||||
|
||||
For iterative solvers, the compute step is used to eventually setup a preconditioner. For instance, with the ILUT preconditioner, the incomplete factors L and U are computed in this step. Remember that, basically, the goal of the preconditioner is to speedup the convergence of an iterative method by solving a modified linear system where the coefficient matrix has more clustered eigenvalues. For real problems, an iterative solver should always be used with a preconditioner. In Eigen, a preconditioner is selected by simply adding it as a template parameter to the iterative solver object.
|
||||
\code
|
||||
IterativeSolverClassName<SparseMatrix<double>, PreconditionerName<SparseMatrix<double> > solver;
|
||||
\endcode
|
||||
The member function preconditioner() returns a read-write reference to the preconditioner
|
||||
to directly interact with it. See the \link IterativeLinearSolvers_Module Iterative solvers module \endlink and the documentation of each class for the list of available methods.
|
||||
|
||||
\section TheSparseSolve The Solve step
|
||||
The solve() function computes the solution of the linear systems with one or many right hand sides.
|
||||
\code
|
||||
X = solver.solve(B);
|
||||
\endcode
|
||||
Here, B can be a vector or a matrix where the columns form the different right hand sides. The solve() function can be called several times as well, for instance when all the right hand sides are not available at once.
|
||||
\code
|
||||
x1 = solver.solve(b1);
|
||||
// Get the second right hand side b2
|
||||
x2 = solver.solve(b2);
|
||||
// ...
|
||||
\endcode
|
||||
For direct methods, the solution are computed at the machine precision. Sometimes, the solution need not be too accurate. In this case, the iterative methods are more suitable and the desired accuracy can be set before the solve step using \b setTolerance(). For all the available functions, please, refer to the documentation of the \link IterativeLinearSolvers_Module Iterative solvers module \endlink.
|
||||
|
||||
\section BenchmarkRoutine
|
||||
Most of the time, all you need is to know how much time it will take to qolve your system, and hopefully, what is the most suitable solver. In Eigen, we provide a benchmark routine that can be used for this purpose. It is very easy to use. In the build directory, navigate to bench/spbench and compile the routine by typing \b make \e spbenchsolver. Run it with --help option to get the list of all available options. Basically, the matrices to test should be in <a href="http://math.nist.gov/MatrixMarket/formats.html">MatrixMarket Coordinate format</a>, and the routine returns the statistics from all available solvers in Eigen.
|
||||
|
||||
The following table gives an example of XML statistics from several Eigen built-in and external solvers.
|
||||
<TABLE border="1">
|
||||
<TR><TH>Matrix <TH> N <TH> NNZ <TH> <TH > UMFPACK <TH > SUPERLU <TH > PASTIX LU <TH >BiCGSTAB <TH > BiCGSTAB+ILUT <TH >GMRES+ILUT<TH > LDLT <TH> CHOLMOD LDLT <TH > PASTIX LDLT <TH > LLT <TH > CHOLMOD SP LLT <TH > CHOLMOD LLT <TH > PASTIX LLT <TH> CG</TR>
|
||||
<TR><TH rowspan="4">vector_graphics <TD rowspan="4"> 12855 <TD rowspan="4"> 72069 <TH>Compute Time <TD>0.0254549<TD>0.0215677<TD>0.0701827<TD>0.000153388<TD>0.0140107<TD>0.0153709<TD>0.0101601<TD style="background-color:red">0.00930502<TD>0.0649689
|
||||
<TR><TH>Solve Time <TD>0.00337835<TD>0.000951826<TD>0.00484373<TD>0.0374886<TD>0.0046445<TD>0.00847754<TD>0.000541813<TD style="background-color:red">0.000293696<TD>0.00485376
|
||||
<TR><TH>Total Time <TD>0.0288333<TD>0.0225195<TD>0.0750265<TD>0.037642<TD>0.0186552<TD>0.0238484<TD>0.0107019<TD style="background-color:red">0.00959871<TD>0.0698227
|
||||
<TR><TH>Error(Iter) <TD> 1.299e-16 <TD> 2.04207e-16 <TD> 4.83393e-15 <TD> 3.94856e-11 (80) <TD> 1.03861e-12 (3) <TD> 5.81088e-14 (6) <TD> 1.97578e-16 <TD> 1.83927e-16 <TD> 4.24115e-15
|
||||
<TR><TH rowspan="4">poisson_SPD <TD rowspan="4"> 19788 <TD rowspan="4"> 308232 <TH>Compute Time <TD>0.425026<TD>1.82378<TD>0.617367<TD>0.000478921<TD>1.34001<TD>1.33471<TD>0.796419<TD>0.857573<TD>0.473007<TD>0.814826<TD style="background-color:red">0.184719<TD>0.861555<TD>0.470559<TD>0.000458188
|
||||
<TR><TH>Solve Time <TD>0.0280053<TD>0.0194402<TD>0.0268747<TD>0.249437<TD>0.0548444<TD>0.0926991<TD>0.00850204<TD>0.0053171<TD>0.0258932<TD>0.00874603<TD style="background-color:red">0.00578155<TD>0.00530361<TD>0.0248942<TD>0.239093
|
||||
<TR><TH>Total Time <TD>0.453031<TD>1.84322<TD>0.644241<TD>0.249916<TD>1.39486<TD>1.42741<TD>0.804921<TD>0.862891<TD>0.4989<TD>0.823572<TD style="background-color:red">0.190501<TD>0.866859<TD>0.495453<TD>0.239551
|
||||
<TR><TH>Error(Iter) <TD> 4.67146e-16 <TD> 1.068e-15 <TD> 1.3397e-15 <TD> 6.29233e-11 (201) <TD> 3.68527e-11 (6) <TD> 3.3168e-15 (16) <TD> 1.86376e-15 <TD> 1.31518e-16 <TD> 1.42593e-15 <TD> 3.45361e-15 <TD> 3.14575e-16 <TD> 2.21723e-15 <TD> 7.21058e-16 <TD> 9.06435e-12 (261)
|
||||
<TR><TH rowspan="4">sherman2 <TD rowspan="4"> 1080 <TD rowspan="4"> 23094 <TH>Compute Time <TD style="background-color:red">0.00631754<TD>0.015052<TD>0.0247514 <TD> -<TD>0.0214425<TD>0.0217988
|
||||
<TR><TH>Solve Time <TD style="background-color:red">0.000478424<TD>0.000337998<TD>0.0010291 <TD> -<TD>0.00243152<TD>0.00246152
|
||||
<TR><TH>Total Time <TD style="background-color:red">0.00679597<TD>0.01539<TD>0.0257805 <TD> -<TD>0.023874<TD>0.0242603
|
||||
<TR><TH>Error(Iter) <TD> 1.83099e-15 <TD> 8.19351e-15 <TD> 2.625e-14 <TD> 1.3678e+69 (1080) <TD> 4.1911e-12 (7) <TD> 5.0299e-13 (12)
|
||||
<TR><TH rowspan="4">bcsstk01_SPD <TD rowspan="4"> 48 <TD rowspan="4"> 400 <TH>Compute Time <TD>0.000169079<TD>0.00010789<TD>0.000572538<TD>1.425e-06<TD>9.1612e-05<TD>8.3985e-05<TD style="background-color:red">5.6489e-05<TD>7.0913e-05<TD>0.000468251<TD>5.7389e-05<TD>8.0212e-05<TD>5.8394e-05<TD>0.000463017<TD>1.333e-06
|
||||
<TR><TH>Solve Time <TD>1.2288e-05<TD>1.1124e-05<TD>0.000286387<TD>8.5896e-05<TD>1.6381e-05<TD>1.6984e-05<TD style="background-color:red">3.095e-06<TD>4.115e-06<TD>0.000325438<TD>3.504e-06<TD>7.369e-06<TD>3.454e-06<TD>0.000294095<TD>6.0516e-05
|
||||
<TR><TH>Total Time <TD>0.000181367<TD>0.000119014<TD>0.000858925<TD>8.7321e-05<TD>0.000107993<TD>0.000100969<TD style="background-color:red">5.9584e-05<TD>7.5028e-05<TD>0.000793689<TD>6.0893e-05<TD>8.7581e-05<TD>6.1848e-05<TD>0.000757112<TD>6.1849e-05
|
||||
<TR><TH>Error(Iter) <TD> 1.03474e-16 <TD> 2.23046e-16 <TD> 2.01273e-16 <TD> 4.87455e-07 (48) <TD> 1.03553e-16 (2) <TD> 3.55965e-16 (2) <TD> 2.48189e-16 <TD> 1.88808e-16 <TD> 1.97976e-16 <TD> 2.37248e-16 <TD> 1.82701e-16 <TD> 2.71474e-16 <TD> 2.11322e-16 <TD> 3.547e-09 (48)
|
||||
<TR><TH rowspan="4">sherman1 <TD rowspan="4"> 1000 <TD rowspan="4"> 3750 <TH>Compute Time <TD>0.00228805<TD>0.00209231<TD>0.00528268<TD>9.846e-06<TD>0.00163522<TD>0.00162155<TD>0.000789259<TD style="background-color:red">0.000804495<TD>0.00438269
|
||||
<TR><TH>Solve Time <TD>0.000213788<TD>9.7983e-05<TD>0.000938831<TD>0.00629835<TD>0.000361764<TD>0.00078794<TD>4.3989e-05<TD style="background-color:red">2.5331e-05<TD>0.000917166
|
||||
<TR><TH>Total Time <TD>0.00250184<TD>0.00219029<TD>0.00622151<TD>0.0063082<TD>0.00199698<TD>0.00240949<TD>0.000833248<TD style="background-color:red">0.000829826<TD>0.00529986
|
||||
<TR><TH>Error(Iter) <TD> 1.16839e-16 <TD> 2.25968e-16 <TD> 2.59116e-16 <TD> 3.76779e-11 (248) <TD> 4.13343e-11 (4) <TD> 2.22347e-14 (10) <TD> 2.05861e-16 <TD> 1.83555e-16 <TD> 1.02917e-15
|
||||
<TR><TH rowspan="4">young1c <TD rowspan="4"> 841 <TD rowspan="4"> 4089 <TH>Compute Time <TD>0.00235843<TD style="background-color:red">0.00217228<TD>0.00568075<TD>1.2735e-05<TD>0.00264866<TD>0.00258236
|
||||
<TR><TH>Solve Time <TD>0.000329599<TD style="background-color:red">0.000168634<TD>0.00080118<TD>0.0534738<TD>0.00187193<TD>0.00450211
|
||||
<TR><TH>Total Time <TD>0.00268803<TD style="background-color:red">0.00234091<TD>0.00648193<TD>0.0534865<TD>0.00452059<TD>0.00708447
|
||||
<TR><TH>Error(Iter) <TD> 1.27029e-16 <TD> 2.81321e-16 <TD> 5.0492e-15 <TD> 8.0507e-11 (706) <TD> 3.00447e-12 (8) <TD> 1.46532e-12 (16)
|
||||
<TR><TH rowspan="4">mhd1280b <TD rowspan="4"> 1280 <TD rowspan="4"> 22778 <TH>Compute Time <TD>0.00234898<TD>0.00207079<TD>0.00570918<TD>2.5976e-05<TD>0.00302563<TD>0.00298036<TD>0.00144525<TD style="background-color:red">0.000919922<TD>0.00426444
|
||||
<TR><TH>Solve Time <TD>0.00103392<TD>0.000211911<TD>0.00105<TD>0.0110432<TD>0.000628287<TD>0.00392089<TD>0.000138303<TD style="background-color:red">6.2446e-05<TD>0.00097564
|
||||
<TR><TH>Total Time <TD>0.0033829<TD>0.0022827<TD>0.00675918<TD>0.0110692<TD>0.00365392<TD>0.00690124<TD>0.00158355<TD style="background-color:red">0.000982368<TD>0.00524008
|
||||
<TR><TH>Error(Iter) <TD> 1.32953e-16 <TD> 3.08646e-16 <TD> 6.734e-16 <TD> 8.83132e-11 (40) <TD> 1.51153e-16 (1) <TD> 6.08556e-16 (8) <TD> 1.89264e-16 <TD> 1.97477e-16 <TD> 6.68126e-09
|
||||
<TR><TH rowspan="4">crashbasis <TD rowspan="4"> 160000 <TD rowspan="4"> 1750416 <TH>Compute Time <TD>3.2019<TD>5.7892<TD>15.7573<TD style="background-color:red">0.00383515<TD>3.1006<TD>3.09921
|
||||
<TR><TH>Solve Time <TD>0.261915<TD>0.106225<TD>0.402141<TD style="background-color:red">1.49089<TD>0.24888<TD>0.443673
|
||||
<TR><TH>Total Time <TD>3.46381<TD>5.89542<TD>16.1594<TD style="background-color:red">1.49473<TD>3.34948<TD>3.54288
|
||||
<TR><TH>Error(Iter) <TD> 1.76348e-16 <TD> 4.58395e-16 <TD> 1.67982e-14 <TD> 8.64144e-11 (61) <TD> 8.5996e-12 (2) <TD> 6.04042e-14 (5)
|
||||
|
||||
</TABLE>
|
||||
*/
|
||||
}
|
|
@ -1,248 +0,0 @@
|
|||
namespace Eigen {
|
||||
/** \eigenManualPage SparseQuickRefPage Quick reference guide for sparse matrices
|
||||
\eigenAutoToc
|
||||
|
||||
<hr>
|
||||
|
||||
In this page, we give a quick summary of the main operations available for sparse matrices in the class SparseMatrix. First, it is recommended to read the introductory tutorial at \ref TutorialSparse. The important point to have in mind when working on sparse matrices is how they are stored :
|
||||
i.e either row major or column major. The default is column major. Most arithmetic operations on sparse matrices will assert that they have the same storage order.
|
||||
|
||||
\section SparseMatrixInit Sparse Matrix Initialization
|
||||
<table class="manual">
|
||||
<tr><th> Category </th> <th> Operations</th> <th>Notes</th></tr>
|
||||
<tr><td>Constructor</td>
|
||||
<td>
|
||||
\code
|
||||
SparseMatrix<double> sm1(1000,1000);
|
||||
SparseMatrix<std::complex<double>,RowMajor> sm2;
|
||||
\endcode
|
||||
</td> <td> Default is ColMajor</td> </tr>
|
||||
<tr class="alt">
|
||||
<td> Resize/Reserve</td>
|
||||
<td>
|
||||
\code
|
||||
sm1.resize(m,n); //Change sm1 to a m x n matrix.
|
||||
sm1.reserve(nnz); // Allocate room for nnz nonzeros elements.
|
||||
\endcode
|
||||
</td>
|
||||
<td> Note that when calling reserve(), it is not required that nnz is the exact number of nonzero elements in the final matrix. However, an exact estimation will avoid multiple reallocations during the insertion phase. </td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td> Assignment </td>
|
||||
<td>
|
||||
\code
|
||||
SparseMatrix<double,Colmajor> sm1;
|
||||
// Initialize sm2 with sm1.
|
||||
SparseMatrix<double,Rowmajor> sm2(sm1), sm3;
|
||||
// Assignment and evaluations modify the storage order.
|
||||
sm3 = sm1;
|
||||
\endcode
|
||||
</td>
|
||||
<td> The copy constructor can be used to convert from a storage order to another</td>
|
||||
</tr>
|
||||
<tr class="alt">
|
||||
<td> Element-wise Insertion</td>
|
||||
<td>
|
||||
\code
|
||||
// Insert a new element;
|
||||
sm1.insert(i, j) = v_ij;
|
||||
|
||||
// Update the value v_ij
|
||||
sm1.coeffRef(i,j) = v_ij;
|
||||
sm1.coeffRef(i,j) += v_ij;
|
||||
sm1.coeffRef(i,j) -= v_ij;
|
||||
\endcode
|
||||
</td>
|
||||
<td> insert() assumes that the element does not already exist; otherwise, use coeffRef()</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td> Batch insertion</td>
|
||||
<td>
|
||||
\code
|
||||
std::vector< Eigen::Triplet<double> > tripletList;
|
||||
tripletList.reserve(estimation_of_entries);
|
||||
// -- Fill tripletList with nonzero elements...
|
||||
sm1.setFromTriplets(TripletList.begin(), TripletList.end());
|
||||
\endcode
|
||||
</td>
|
||||
<td>A complete example is available at \link TutorialSparseFilling Triplet Insertion \endlink.</td>
|
||||
</tr>
|
||||
<tr class="alt">
|
||||
<td> Constant or Random Insertion</td>
|
||||
<td>
|
||||
\code
|
||||
sm1.setZero();
|
||||
\endcode
|
||||
</td>
|
||||
<td>Remove all non-zero coefficients</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
|
||||
\section SparseBasicInfos Matrix properties
|
||||
Beyond the basic functions rows() and cols(), there are some useful functions that are available to easily get some informations from the matrix.
|
||||
<table class="manual">
|
||||
<tr>
|
||||
<td> \code
|
||||
sm1.rows(); // Number of rows
|
||||
sm1.cols(); // Number of columns
|
||||
sm1.nonZeros(); // Number of non zero values
|
||||
sm1.outerSize(); // Number of columns (resp. rows) for a column major (resp. row major )
|
||||
sm1.innerSize(); // Number of rows (resp. columns) for a row major (resp. column major)
|
||||
sm1.norm(); // Euclidian norm of the matrix
|
||||
sm1.squaredNorm(); // Squared norm of the matrix
|
||||
sm1.blueNorm();
|
||||
sm1.isVector(); // Check if sm1 is a sparse vector or a sparse matrix
|
||||
sm1.isCompressed(); // Check if sm1 is in compressed form
|
||||
...
|
||||
\endcode </td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
\section SparseBasicOps Arithmetic operations
|
||||
It is easy to perform arithmetic operations on sparse matrices provided that the dimensions are adequate and that the matrices have the same storage order. Note that the evaluation can always be done in a matrix with a different storage order. In the following, \b sm denotes a sparse matrix, \b dm a dense matrix and \b dv a dense vector.
|
||||
<table class="manual">
|
||||
<tr><th> Operations </th> <th> Code </th> <th> Notes </th></tr>
|
||||
|
||||
<tr>
|
||||
<td> add subtract </td>
|
||||
<td> \code
|
||||
sm3 = sm1 + sm2;
|
||||
sm3 = sm1 - sm2;
|
||||
sm2 += sm1;
|
||||
sm2 -= sm1; \endcode
|
||||
</td>
|
||||
<td>
|
||||
sm1 and sm2 should have the same storage order
|
||||
</td>
|
||||
</tr>
|
||||
|
||||
<tr class="alt"><td>
|
||||
scalar product</td><td>\code
|
||||
sm3 = sm1 * s1; sm3 *= s1;
|
||||
sm3 = s1 * sm1 + s2 * sm2; sm3 /= s1;\endcode
|
||||
</td>
|
||||
<td>
|
||||
Many combinations are possible if the dimensions and the storage order agree.
|
||||
</tr>
|
||||
|
||||
<tr>
|
||||
<td> %Sparse %Product </td>
|
||||
<td> \code
|
||||
sm3 = sm1 * sm2;
|
||||
dm2 = sm1 * dm1;
|
||||
dv2 = sm1 * dv1;
|
||||
\endcode </td>
|
||||
<td>
|
||||
</td>
|
||||
</tr>
|
||||
|
||||
<tr class='alt'>
|
||||
<td> transposition, adjoint</td>
|
||||
<td> \code
|
||||
sm2 = sm1.transpose();
|
||||
sm2 = sm1.adjoint();
|
||||
\endcode </td>
|
||||
<td>
|
||||
Note that the transposition change the storage order. There is no support for transposeInPlace().
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td> Permutation </td>
|
||||
<td>
|
||||
\code
|
||||
perm.indices(); // Reference to the vector of indices
|
||||
sm1.twistedBy(perm); // Permute rows and columns
|
||||
sm2 = sm1 * perm; //Permute the columns
|
||||
sm2 = perm * sm1; // Permute the columns
|
||||
\endcode
|
||||
</td>
|
||||
<td>
|
||||
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>
|
||||
Component-wise ops
|
||||
</td>
|
||||
<td>\code
|
||||
sm1.cwiseProduct(sm2);
|
||||
sm1.cwiseQuotient(sm2);
|
||||
sm1.cwiseMin(sm2);
|
||||
sm1.cwiseMax(sm2);
|
||||
sm1.cwiseAbs();
|
||||
sm1.cwiseSqrt();
|
||||
\endcode</td>
|
||||
<td>
|
||||
sm1 and sm2 should have the same storage order
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
\section sparseotherops Other supported operations
|
||||
<table class="manual">
|
||||
<tr><th>Operations</th> <th> Code </th> <th> Notes</th> </tr>
|
||||
<tr>
|
||||
<td>Sub-matrices</td>
|
||||
<td>
|
||||
\code
|
||||
sm1.block(startRow, startCol, rows, cols);
|
||||
sm1.block(startRow, startCol);
|
||||
sm1.topLeftCorner(rows, cols);
|
||||
sm1.topRightCorner(rows, cols);
|
||||
sm1.bottomLeftCorner( rows, cols);
|
||||
sm1.bottomRightCorner( rows, cols);
|
||||
\endcode
|
||||
</td> <td> </td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td> Range </td>
|
||||
<td>
|
||||
\code
|
||||
sm1.innerVector(outer);
|
||||
sm1.innerVectors(start, size);
|
||||
sm1.leftCols(size);
|
||||
sm2.rightCols(size);
|
||||
sm1.middleRows(start, numRows);
|
||||
sm1.middleCols(start, numCols);
|
||||
sm1.col(j);
|
||||
\endcode
|
||||
</td>
|
||||
<td>A inner vector is either a row (for row-major) or a column (for column-major). As stated earlier, the evaluation can be done in a matrix with different storage order </td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td> Triangular and selfadjoint views</td>
|
||||
<td>
|
||||
\code
|
||||
sm2 = sm1.triangularview<Lower>();
|
||||
sm2 = sm1.selfadjointview<Lower>();
|
||||
\endcode
|
||||
</td>
|
||||
<td> Several combination between triangular views and blocks views are possible
|
||||
\code
|
||||
\endcode </td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Triangular solve </td>
|
||||
<td>
|
||||
\code
|
||||
dv2 = sm1.triangularView<Upper>().solve(dv1);
|
||||
dv2 = sm1.topLeftCorner(size, size).triangularView<Lower>().solve(dv1);
|
||||
\endcode
|
||||
</td>
|
||||
<td> For general sparse solve, Use any suitable module described at \ref TopicSparseSystems </td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td> Low-level API</td>
|
||||
<td>
|
||||
\code
|
||||
sm1.valuePtr(); // Pointer to the values
|
||||
sm1.innerIndextr(); // Pointer to the indices.
|
||||
sm1.outerIndexPtr(); //Pointer to the beginning of each inner vector
|
||||
\endcode
|
||||
</td>
|
||||
<td> If the matrix is not in compressed form, makeCompressed() should be called before. Note that these functions are mostly provided for interoperability purposes with external libraries. A better access to the values of the matrix is done by using the InnerIterator class as described in \link TutorialSparse the Tutorial Sparse \endlink section</td>
|
||||
</tr>
|
||||
</table>
|
||||
*/
|
||||
}
|
|
@ -1,62 +0,0 @@
|
|||
namespace Eigen {
|
||||
|
||||
/** \eigenManualPage TopicStlContainers Using STL Containers with Eigen
|
||||
|
||||
\eigenAutoToc
|
||||
|
||||
\section summary Executive summary
|
||||
|
||||
Using STL containers on \ref TopicFixedSizeVectorizable "fixed-size vectorizable Eigen types", or classes having members of such types, requires taking the following two steps:
|
||||
|
||||
\li A 16-byte-aligned allocator must be used. Eigen does provide one ready for use: aligned_allocator.
|
||||
\li If you want to use the std::vector container, you need to \#include <Eigen/StdVector>.
|
||||
|
||||
These issues arise only with \ref TopicFixedSizeVectorizable "fixed-size vectorizable Eigen types" and \ref TopicStructHavingEigenMembers "structures having such Eigen objects as member". For other Eigen types, such as Vector3f or MatrixXd, no special care is needed when using STL containers.
|
||||
|
||||
\section allocator Using an aligned allocator
|
||||
|
||||
STL containers take an optional template parameter, the allocator type. When using STL containers on \ref TopicFixedSizeVectorizable "fixed-size vectorizable Eigen types", you need tell the container to use an allocator that will always allocate memory at 16-byte-aligned locations. Fortunately, Eigen does provide such an allocator: Eigen::aligned_allocator.
|
||||
|
||||
For example, instead of
|
||||
\code
|
||||
std::map<int, Eigen::Vector4f>
|
||||
\endcode
|
||||
you need to use
|
||||
\code
|
||||
std::map<int, Eigen::Vector4f, std::less<int>,
|
||||
Eigen::aligned_allocator<std::pair<const int, Eigen::Vector4f> > >
|
||||
\endcode
|
||||
Note that the third parameter "std::less<int>" is just the default value, but we have to include it because we want to specify the fourth parameter, which is the allocator type.
|
||||
|
||||
\section vector The case of std::vector
|
||||
|
||||
The situation with std::vector was even worse (explanation below) so we had to specialize it for the Eigen::aligned_allocator type. In practice you \b must use the Eigen::aligned_allocator (not another aligned allocator), \b and \#include <Eigen/StdVector>.
|
||||
|
||||
Here is an example:
|
||||
\code
|
||||
#include<Eigen/StdVector>
|
||||
/* ... */
|
||||
std::vector<Eigen::Vector4f,Eigen::aligned_allocator<Eigen::Vector4f> >
|
||||
\endcode
|
||||
|
||||
\subsection vector_spec An alternative - specializing std::vector for Eigen types
|
||||
|
||||
As an alternative to the recommended approach described above, you have the option to specialize std::vector for Eigen types requiring alignment.
|
||||
The advantage is that you won't need to declare std::vector all over with Eigen::allocator. One drawback on the other hand side is that
|
||||
the specialization needs to be defined before all code pieces in which e.g. std::vector<Vector2d> is used. Otherwise, without knowing the specialization
|
||||
the compiler will compile that particular instance with the default std::allocator and you program is most likely to crash.
|
||||
|
||||
Here is an example:
|
||||
\code
|
||||
#include<Eigen/StdVector>
|
||||
/* ... */
|
||||
EIGEN_DEFINE_STL_VECTOR_SPECIALIZATION(Matrix2d)
|
||||
std::vector<Eigen::Vector2d>
|
||||
\endcode
|
||||
|
||||
<span class="note">\b Explanation: The resize() method of std::vector takes a value_type argument (defaulting to value_type()). So with std::vector<Eigen::Vector4f>, some Eigen::Vector4f objects will be passed by value, which discards any alignment modifiers, so a Eigen::Vector4f can be created at an unaligned location. In order to avoid that, the only solution we saw was to specialize std::vector to make it work on a slight modification of, here, Eigen::Vector4f, that is able to deal properly with this situation.
|
||||
</span>
|
||||
|
||||
*/
|
||||
|
||||
}
|
|
@ -1,86 +0,0 @@
|
|||
namespace Eigen {
|
||||
|
||||
/** \eigenManualPage TopicStorageOrders Storage orders
|
||||
|
||||
There are two different storage orders for matrices and two-dimensional arrays: column-major and row-major.
|
||||
This page explains these storage orders and how to specify which one should be used.
|
||||
|
||||
\eigenAutoToc
|
||||
|
||||
|
||||
\section TopicStorageOrdersIntro Column-major and row-major storage
|
||||
|
||||
The entries of a matrix form a two-dimensional grid. However, when the matrix is stored in memory, the entries
|
||||
have to somehow be laid out linearly. There are two main ways to do this, by row and by column.
|
||||
|
||||
We say that a matrix is stored in \b row-major order if it is stored row by row. The entire first row is
|
||||
stored first, followed by the entire second row, and so on. Consider for example the matrix
|
||||
|
||||
\f[
|
||||
A = \begin{bmatrix}
|
||||
8 & 2 & 2 & 9 \\
|
||||
9 & 1 & 4 & 4 \\
|
||||
3 & 5 & 4 & 5
|
||||
\end{bmatrix}.
|
||||
\f]
|
||||
|
||||
If this matrix is stored in row-major order, then the entries are laid out in memory as follows:
|
||||
|
||||
\code 8 2 2 9 9 1 4 4 3 5 4 5 \endcode
|
||||
|
||||
On the other hand, a matrix is stored in \b column-major order if it is stored column by column, starting with
|
||||
the entire first column, followed by the entire second column, and so on. If the above matrix is stored in
|
||||
column-major order, it is laid out as follows:
|
||||
|
||||
\code 8 9 3 2 1 5 2 4 4 9 4 5 \endcode
|
||||
|
||||
This example is illustrated by the following Eigen code. It uses the PlainObjectBase::data() function, which
|
||||
returns a pointer to the memory location of the first entry of the matrix.
|
||||
|
||||
<table class="example">
|
||||
<tr><th>Example</th><th>Output</th></tr>
|
||||
<tr><td>
|
||||
\include TopicStorageOrders_example.cpp
|
||||
</td>
|
||||
<td>
|
||||
\verbinclude TopicStorageOrders_example.out
|
||||
</td></tr></table>
|
||||
|
||||
|
||||
\section TopicStorageOrdersInEigen Storage orders in Eigen
|
||||
|
||||
The storage order of a matrix or a two-dimensional array can be set by specifying the \c Options template
|
||||
parameter for Matrix or Array. As \ref TutorialMatrixClass explains, the %Matrix class template has six
|
||||
template parameters, of which three are compulsory (\c Scalar, \c RowsAtCompileTime and \c ColsAtCompileTime)
|
||||
and three are optional (\c Options, \c MaxRowsAtCompileTime and \c MaxColsAtCompileTime). If the \c Options
|
||||
parameter is set to \c RowMajor, then the matrix or array is stored in row-major order; if it is set to
|
||||
\c ColMajor, then it is stored in column-major order. This mechanism is used in the above Eigen program to
|
||||
specify the storage order.
|
||||
|
||||
If the storage order is not specified, then Eigen defaults to storing the entry in column-major. This is also
|
||||
the case if one of the convenience typedefs (\c Matrix3f, \c ArrayXXd, etc.) is used.
|
||||
|
||||
Matrices and arrays using one storage order can be assigned to matrices and arrays using the other storage
|
||||
order, as happens in the above program when \c Arowmajor is initialized using \c Acolmajor. Eigen will reorder
|
||||
the entries automatically. More generally, row-major and column-major matrices can be mixed in an expression
|
||||
as we want.
|
||||
|
||||
|
||||
\section TopicStorageOrdersWhich Which storage order to choose?
|
||||
|
||||
So, which storage order should you use in your program? There is no simple answer to this question; it depends
|
||||
on your application. Here are some points to keep in mind:
|
||||
|
||||
- Your users may expect you to use a specific storage order. Alternatively, you may use other libraries than
|
||||
Eigen, and these other libraries may expect a certain storage order. In these cases it may be easiest and
|
||||
fastest to use this storage order in your whole program.
|
||||
- Algorithms that traverse a matrix row by row will go faster when the matrix is stored in row-major order
|
||||
because of better data locality. Similarly, column-by-column traversal is faster for column-major
|
||||
matrices. It may be worthwhile to experiment a bit to find out what is faster for your particular
|
||||
application.
|
||||
- The default in Eigen is column-major. Naturally, most of the development and testing of the Eigen library
|
||||
is thus done with column-major matrices. This means that, even though we aim to support column-major and
|
||||
row-major storage orders transparently, the Eigen library may well work best with column-major matrices.
|
||||
|
||||
*/
|
||||
}
|
|
@ -1,190 +0,0 @@
|
|||
namespace Eigen {
|
||||
|
||||
/** \eigenManualPage TopicStructHavingEigenMembers Structures Having Eigen Members
|
||||
|
||||
\eigenAutoToc
|
||||
|
||||
\section summary Executive Summary
|
||||
|
||||
If you define a structure having members of \ref TopicFixedSizeVectorizable "fixed-size vectorizable Eigen types", you must overload its "operator new" so that it generates 16-bytes-aligned pointers. Fortunately, Eigen provides you with a macro EIGEN_MAKE_ALIGNED_OPERATOR_NEW that does that for you.
|
||||
|
||||
\section what What kind of code needs to be changed?
|
||||
|
||||
The kind of code that needs to be changed is this:
|
||||
|
||||
\code
|
||||
class Foo
|
||||
{
|
||||
...
|
||||
Eigen::Vector2d v;
|
||||
...
|
||||
};
|
||||
|
||||
...
|
||||
|
||||
Foo *foo = new Foo;
|
||||
\endcode
|
||||
|
||||
In other words: you have a class that has as a member a \ref TopicFixedSizeVectorizable "fixed-size vectorizable Eigen object", and then you dynamically create an object of that class.
|
||||
|
||||
\section how How should such code be modified?
|
||||
|
||||
Very easy, you just need to put a EIGEN_MAKE_ALIGNED_OPERATOR_NEW macro in a public part of your class, like this:
|
||||
|
||||
\code
|
||||
class Foo
|
||||
{
|
||||
...
|
||||
Eigen::Vector2d v;
|
||||
...
|
||||
public:
|
||||
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
|
||||
};
|
||||
|
||||
...
|
||||
|
||||
Foo *foo = new Foo;
|
||||
\endcode
|
||||
|
||||
This macro makes "new Foo" always return an aligned pointer.
|
||||
|
||||
If this approach is too intrusive, see also the \ref othersolutions.
|
||||
|
||||
\section why Why is this needed?
|
||||
|
||||
OK let's say that your code looks like this:
|
||||
|
||||
\code
|
||||
class Foo
|
||||
{
|
||||
...
|
||||
Eigen::Vector2d v;
|
||||
...
|
||||
};
|
||||
|
||||
...
|
||||
|
||||
Foo *foo = new Foo;
|
||||
\endcode
|
||||
|
||||
A Eigen::Vector2d consists of 2 doubles, which is 128 bits. Which is exactly the size of a SSE packet, which makes it possible to use SSE for all sorts of operations on this vector. But SSE instructions (at least the ones that Eigen uses, which are the fast ones) require 128-bit alignment. Otherwise you get a segmentation fault.
|
||||
|
||||
For this reason, Eigen takes care by itself to require 128-bit alignment for Eigen::Vector2d, by doing two things:
|
||||
\li Eigen requires 128-bit alignment for the Eigen::Vector2d's array (of 2 doubles). With GCC, this is done with a __attribute__ ((aligned(16))).
|
||||
\li Eigen overloads the "operator new" of Eigen::Vector2d so it will always return 128-bit aligned pointers.
|
||||
|
||||
Thus, normally, you don't have to worry about anything, Eigen handles alignment for you...
|
||||
|
||||
... except in one case. When you have a class Foo like above, and you dynamically allocate a new Foo as above, then, since Foo doesn't have aligned "operator new", the returned pointer foo is not necessarily 128-bit aligned.
|
||||
|
||||
The alignment attribute of the member v is then relative to the start of the class, foo. If the foo pointer wasn't aligned, then foo->v won't be aligned either!
|
||||
|
||||
The solution is to let class Foo have an aligned "operator new", as we showed in the previous section.
|
||||
|
||||
\section movetotop Should I then put all the members of Eigen types at the beginning of my class?
|
||||
|
||||
That's not required. Since Eigen takes care of declaring 128-bit alignment, all members that need it are automatically 128-bit aligned relatively to the class. So code like this works fine:
|
||||
|
||||
\code
|
||||
class Foo
|
||||
{
|
||||
double x;
|
||||
Eigen::Vector2d v;
|
||||
public:
|
||||
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
|
||||
};
|
||||
\endcode
|
||||
|
||||
\section dynamicsize What about dynamic-size matrices and vectors?
|
||||
|
||||
Dynamic-size matrices and vectors, such as Eigen::VectorXd, allocate dynamically their own array of coefficients, so they take care of requiring absolute alignment automatically. So they don't cause this issue. The issue discussed here is only with \ref TopicFixedSizeVectorizable "fixed-size vectorizable matrices and vectors".
|
||||
|
||||
\section bugineigen So is this a bug in Eigen?
|
||||
|
||||
No, it's not our bug. It's more like an inherent problem of the C++98 language specification, and seems to be taken care of in the upcoming language revision: <a href="http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2341.pdf">see this document</a>.
|
||||
|
||||
\section conditional What if I want to do this conditionnally (depending on template parameters) ?
|
||||
|
||||
For this situation, we offer the macro EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(NeedsToAlign). It will generate aligned operators like EIGEN_MAKE_ALIGNED_OPERATOR_NEW if NeedsToAlign is true. It will generate operators with the default alignment if NeedsToAlign is false.
|
||||
|
||||
Example:
|
||||
|
||||
\code
|
||||
template<int n> class Foo
|
||||
{
|
||||
typedef Eigen::Matrix<float,n,1> Vector;
|
||||
enum { NeedsToAlign = (sizeof(Vector)%16)==0 };
|
||||
...
|
||||
Vector v;
|
||||
...
|
||||
public:
|
||||
EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(NeedsToAlign)
|
||||
};
|
||||
|
||||
...
|
||||
|
||||
Foo<4> *foo4 = new Foo<4>; // foo4 is guaranteed to be 128bit-aligned
|
||||
Foo<3> *foo3 = new Foo<3>; // foo3 has only the system default alignment guarantee
|
||||
\endcode
|
||||
|
||||
|
||||
\section othersolutions Other solutions
|
||||
|
||||
In case putting the EIGEN_MAKE_ALIGNED_OPERATOR_NEW macro everywhere is too intrusive, there exists at least two other solutions.
|
||||
|
||||
\subsection othersolutions1 Disabling alignment
|
||||
|
||||
The first is to disable alignment requirement for the fixed size members:
|
||||
\code
|
||||
class Foo
|
||||
{
|
||||
...
|
||||
Eigen::Matrix<double,2,1,Eigen::DontAlign> v;
|
||||
...
|
||||
};
|
||||
\endcode
|
||||
This has for effect to disable vectorization when using \c v.
|
||||
If a function of Foo uses it several times, then it still possible to re-enable vectorization by copying it into an aligned temporary vector:
|
||||
\code
|
||||
void Foo::bar()
|
||||
{
|
||||
Eigen::Vector2d av(v);
|
||||
// use av instead of v
|
||||
...
|
||||
// if av changed, then do:
|
||||
v = av;
|
||||
}
|
||||
\endcode
|
||||
|
||||
\subsection othersolutions2 Private structure
|
||||
|
||||
The second consist in storing the fixed-size objects into a private struct which will be dynamically allocated at the construction time of the main object:
|
||||
|
||||
\code
|
||||
struct Foo_d
|
||||
{
|
||||
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
|
||||
Vector2d v;
|
||||
...
|
||||
};
|
||||
|
||||
|
||||
struct Foo {
|
||||
Foo() { init_d(); }
|
||||
~Foo() { delete d; }
|
||||
void bar()
|
||||
{
|
||||
// use d->v instead of v
|
||||
...
|
||||
}
|
||||
private:
|
||||
void init_d() { d = new Foo_d; }
|
||||
Foo_d* d;
|
||||
};
|
||||
\endcode
|
||||
|
||||
The clear advantage here is that the class Foo remains unchanged regarding alignment issues. The drawback is that a heap allocation will be required whatsoever.
|
||||
|
||||
*/
|
||||
|
||||
}
|
|
@ -1,132 +0,0 @@
|
|||
namespace Eigen {
|
||||
|
||||
/** \page TopicTemplateKeyword The template and typename keywords in C++
|
||||
|
||||
There are two uses for the \c template and \c typename keywords in C++. One of them is fairly well known
|
||||
amongst programmers: to define templates. The other use is more obscure: to specify that an expression refers
|
||||
to a template function or a type. This regularly trips up programmers that use the %Eigen library, often
|
||||
leading to error messages from the compiler that are difficult to understand.
|
||||
|
||||
\eigenAutoToc
|
||||
|
||||
|
||||
\section TopicTemplateKeywordToDefineTemplates Using the template and typename keywords to define templates
|
||||
|
||||
The \c template and \c typename keywords are routinely used to define templates. This is not the topic of this
|
||||
page as we assume that the reader is aware of this (otherwise consult a C++ book). The following example
|
||||
should illustrate this use of the \c template keyword.
|
||||
|
||||
\code
|
||||
template <typename T>
|
||||
bool isPositive(T x)
|
||||
{
|
||||
return x > 0;
|
||||
}
|
||||
\endcode
|
||||
|
||||
We could just as well have written <tt>template <class T></tt>; the keywords \c typename and \c class have the
|
||||
same meaning in this context.
|
||||
|
||||
|
||||
\section TopicTemplateKeywordExample An example showing the second use of the template keyword
|
||||
|
||||
Let us illustrate the second use of the \c template keyword with an example. Suppose we want to write a
|
||||
function which copies all entries in the upper triangular part of a matrix into another matrix, while keeping
|
||||
the lower triangular part unchanged. A straightforward implementation would be as follows:
|
||||
|
||||
<table class="example">
|
||||
<tr><th>Example:</th><th>Output:</th></tr>
|
||||
<tr><td>
|
||||
\include TemplateKeyword_simple.cpp
|
||||
</td>
|
||||
<td>
|
||||
\verbinclude TemplateKeyword_simple.out
|
||||
</td></tr></table>
|
||||
|
||||
That works fine, but it is not very flexible. First, it only works with dynamic-size matrices of
|
||||
single-precision floats; the function \c copyUpperTriangularPart() does not accept static-size matrices or
|
||||
matrices with double-precision numbers. Second, if you use an expression such as
|
||||
<tt>mat.topLeftCorner(3,3)</tt> as the parameter \c src, then this is copied into a temporary variable of type
|
||||
MatrixXf; this copy can be avoided.
|
||||
|
||||
As explained in \ref TopicFunctionTakingEigenTypes, both issues can be resolved by making
|
||||
\c copyUpperTriangularPart() accept any object of type MatrixBase. This leads to the following code:
|
||||
|
||||
<table class="example">
|
||||
<tr><th>Example:</th><th>Output:</th></tr>
|
||||
<tr><td>
|
||||
\include TemplateKeyword_flexible.cpp
|
||||
</td>
|
||||
<td>
|
||||
\verbinclude TemplateKeyword_flexible.out
|
||||
</td></tr></table>
|
||||
|
||||
The one line in the body of the function \c copyUpperTriangularPart() shows the second, more obscure use of
|
||||
the \c template keyword in C++. Even though it may look strange, the \c template keywords are necessary
|
||||
according to the standard. Without it, the compiler may reject the code with an error message like "no match
|
||||
for operator<".
|
||||
|
||||
|
||||
\section TopicTemplateKeywordExplanation Explanation
|
||||
|
||||
The reason that the \c template keyword is necessary in the last example has to do with the rules for how
|
||||
templates are supposed to be compiled in C++. The compiler has to check the code for correct syntax at the
|
||||
point where the template is defined, without knowing the actual value of the template arguments (\c Derived1
|
||||
and \c Derived2 in the example). That means that the compiler cannot know that <tt>dst.triangularPart</tt> is
|
||||
a member template and that the following < symbol is part of the delimiter for the template
|
||||
parameter. Another possibility would be that <tt>dst.triangularPart</tt> is a member variable with the <
|
||||
symbol refering to the <tt>operator<()</tt> function. In fact, the compiler should choose the second
|
||||
possibility, according to the standard. If <tt>dst.triangularPart</tt> is a member template (as in our case),
|
||||
the programmer should specify this explicitly with the \c template keyword and write <tt>dst.template
|
||||
triangularPart</tt>.
|
||||
|
||||
The precise rules are rather complicated, but ignoring some subtleties we can summarize them as follows:
|
||||
- A <em>dependent name</em> is name that depends (directly or indirectly) on a template parameter. In the
|
||||
example, \c dst is a dependent name because it is of type <tt>MatrixBase<Derived1></tt> which depends
|
||||
on the template parameter \c Derived1.
|
||||
- If the code contains either one of the contructions <tt>xxx.yyy</tt> or <tt>xxx->yyy</tt> and \c xxx is a
|
||||
dependent name and \c yyy refers to a member template, then the \c template keyword must be used before
|
||||
\c yyy, leading to <tt>xxx.template yyy</tt> or <tt>xxx->template yyy</tt>.
|
||||
- If the code contains the contruction <tt>xxx::yyy</tt> and \c xxx is a dependent name and \c yyy refers to a
|
||||
member typedef, then the \c typename keyword must be used before the whole construction, leading to
|
||||
<tt>typename xxx::yyy</tt>.
|
||||
|
||||
As an example where the \c typename keyword is required, consider the following code in \ref TutorialSparse
|
||||
for iterating over the non-zero entries of a sparse matrix type:
|
||||
|
||||
\code
|
||||
SparseMatrixType mat(rows,cols);
|
||||
for (int k=0; k<mat.outerSize(); ++k)
|
||||
for (SparseMatrixType::InnerIterator it(mat,k); it; ++it)
|
||||
{
|
||||
/* ... */
|
||||
}
|
||||
\endcode
|
||||
|
||||
If \c SparseMatrixType depends on a template parameter, then the \c typename keyword is required:
|
||||
|
||||
\code
|
||||
template <typename T>
|
||||
void iterateOverSparseMatrix(const SparseMatrix<T>& mat;
|
||||
{
|
||||
for (int k=0; k<m1.outerSize(); ++k)
|
||||
for (typename SparseMatrix<T>::InnerIterator it(mat,k); it; ++it)
|
||||
{
|
||||
/* ... */
|
||||
}
|
||||
}
|
||||
\endcode
|
||||
|
||||
|
||||
\section TopicTemplateKeywordResources Resources for further reading
|
||||
|
||||
For more information and a fuller explanation of this topic, the reader may consult the following sources:
|
||||
- The book "C++ Template Metaprogramming" by David Abrahams and Aleksey Gurtovoy contains a very good
|
||||
explanation in Appendix B ("The typename and template Keywords") which formed the basis for this page.
|
||||
- http://pages.cs.wisc.edu/~driscoll/typename.html
|
||||
- http://www.parashift.com/c++-faq-lite/templates.html#faq-35.18
|
||||
- http://www.comeaucomputing.com/techtalk/templates/#templateprefix
|
||||
- http://www.comeaucomputing.com/techtalk/templates/#typename
|
||||
|
||||
*/
|
||||
}
|
|
@ -1,215 +0,0 @@
|
|||
namespace Eigen {
|
||||
|
||||
/** \eigenManualPage TopicAliasing Aliasing
|
||||
|
||||
In %Eigen, aliasing refers to assignment statement in which the same matrix (or array or vector) appears on the
|
||||
left and on the right of the assignment operators. Statements like <tt>mat = 2 * mat;</tt> or <tt>mat =
|
||||
mat.transpose();</tt> exhibit aliasing. The aliasing in the first example is harmless, but the aliasing in the
|
||||
second example leads to unexpected results. This page explains what aliasing is, when it is harmful, and what
|
||||
to do about it.
|
||||
|
||||
\eigenAutoToc
|
||||
|
||||
|
||||
\section TopicAliasingExamples Examples
|
||||
|
||||
Here is a simple example exhibiting aliasing:
|
||||
|
||||
<table class="example">
|
||||
<tr><th>Example</th><th>Output</th></tr>
|
||||
<tr><td>
|
||||
\include TopicAliasing_block.cpp
|
||||
</td>
|
||||
<td>
|
||||
\verbinclude TopicAliasing_block.out
|
||||
</td></tr></table>
|
||||
|
||||
The output is not what one would expect. The problem is the assignment
|
||||
\code
|
||||
mat.bottomRightCorner(2,2) = mat.topLeftCorner(2,2);
|
||||
\endcode
|
||||
This assignment exhibits aliasing: the coefficient \c mat(1,1) appears both in the block
|
||||
<tt>mat.bottomRightCorner(2,2)</tt> on the left-hand side of the assignment and the block
|
||||
<tt>mat.topLeftCorner(2,2)</tt> on the right-hand side. After the assignment, the (2,2) entry in the bottom
|
||||
right corner should have the value of \c mat(1,1) before the assignment, which is 5. However, the output shows
|
||||
that \c mat(2,2) is actually 1. The problem is that %Eigen uses lazy evaluation (see
|
||||
\ref TopicEigenExpressionTemplates) for <tt>mat.topLeftCorner(2,2)</tt>. The result is similar to
|
||||
\code
|
||||
mat(1,1) = mat(0,0);
|
||||
mat(1,2) = mat(0,1);
|
||||
mat(2,1) = mat(1,0);
|
||||
mat(2,2) = mat(1,1);
|
||||
\endcode
|
||||
Thus, \c mat(2,2) is assigned the \e new value of \c mat(1,1) instead of the old value. The next section
|
||||
explains how to solve this problem by calling \link DenseBase::eval() eval()\endlink.
|
||||
|
||||
Aliasing occurs more naturally when trying to shrink a matrix. For example, the expressions <tt>vec =
|
||||
vec.head(n)</tt> and <tt>mat = mat.block(i,j,r,c)</tt> exhibit aliasing.
|
||||
|
||||
In general, aliasing cannot be detected at compile time: if \c mat in the first example were a bit bigger,
|
||||
then the blocks would not overlap, and there would be no aliasing problem. However, %Eigen does detect some
|
||||
instances of aliasing, albeit at run time. The following example exhibiting aliasing was mentioned in \ref
|
||||
TutorialMatrixArithmetic :
|
||||
|
||||
<table class="example">
|
||||
<tr><th>Example</th><th>Output</th></tr>
|
||||
<tr><td>
|
||||
\include tut_arithmetic_transpose_aliasing.cpp
|
||||
</td>
|
||||
<td>
|
||||
\verbinclude tut_arithmetic_transpose_aliasing.out
|
||||
</td></tr></table>
|
||||
|
||||
Again, the output shows the aliasing issue. However, by default %Eigen uses a run-time assertion to detect this
|
||||
and exits with a message like
|
||||
|
||||
\verbatim
|
||||
void Eigen::DenseBase<Derived>::checkTransposeAliasing(const OtherDerived&) const
|
||||
[with OtherDerived = Eigen::Transpose<Eigen::Matrix<int, 2, 2, 0, 2, 2> >, Derived = Eigen::Matrix<int, 2, 2, 0, 2, 2>]:
|
||||
Assertion `(!internal::check_transpose_aliasing_selector<Scalar,internal::blas_traits<Derived>::IsTransposed,OtherDerived>::run(internal::extract_data(derived()), other))
|
||||
&& "aliasing detected during transposition, use transposeInPlace() or evaluate the rhs into a temporary using .eval()"' failed.
|
||||
\endverbatim
|
||||
|
||||
The user can turn %Eigen's run-time assertions like the one to detect this aliasing problem off by defining the
|
||||
EIGEN_NO_DEBUG macro, and the above program was compiled with this macro turned off in order to illustrate the
|
||||
aliasing problem. See \ref TopicAssertions for more information about %Eigen's run-time assertions.
|
||||
|
||||
|
||||
\section TopicAliasingSolution Resolving aliasing issues
|
||||
|
||||
If you understand the cause of the aliasing issue, then it is obvious what must happen to solve it: %Eigen has
|
||||
to evaluate the right-hand side fully into a temporary matrix/array and then assign it to the left-hand
|
||||
side. The function \link DenseBase::eval() eval() \endlink does precisely that.
|
||||
|
||||
For example, here is the corrected version of the first example above:
|
||||
|
||||
<table class="example">
|
||||
<tr><th>Example</th><th>Output</th></tr>
|
||||
<tr><td>
|
||||
\include TopicAliasing_block_correct.cpp
|
||||
</td>
|
||||
<td>
|
||||
\verbinclude TopicAliasing_block_correct.out
|
||||
</td></tr></table>
|
||||
|
||||
Now, \c mat(2,2) equals 5 after the assignment, as it should be.
|
||||
|
||||
The same solution also works for the second example, with the transpose: simply replace the line
|
||||
<tt>a = a.transpose();</tt> with <tt>a = a.transpose().eval();</tt>. However, in this common case there is a
|
||||
better solution. %Eigen provides the special-purpose function
|
||||
\link DenseBase::transposeInPlace() transposeInPlace() \endlink which replaces a matrix by its transpose.
|
||||
This is shown below:
|
||||
|
||||
<table class="example">
|
||||
<tr><th>Example</th><th>Output</th></tr>
|
||||
<tr><td>
|
||||
\include tut_arithmetic_transpose_inplace.cpp
|
||||
</td>
|
||||
<td>
|
||||
\verbinclude tut_arithmetic_transpose_inplace.out
|
||||
</td></tr></table>
|
||||
|
||||
If an xxxInPlace() function is available, then it is best to use it, because it indicates more clearly what you
|
||||
are doing. This may also allow %Eigen to optimize more aggressively. These are some of the xxxInPlace()
|
||||
functions provided:
|
||||
|
||||
<table class="manual">
|
||||
<tr><th>Original function</th><th>In-place function</th></tr>
|
||||
<tr> <td> MatrixBase::adjoint() </td> <td> MatrixBase::adjointInPlace() </td> </tr>
|
||||
<tr class="alt"> <td> DenseBase::reverse() </td> <td> DenseBase::reverseInPlace() </td> </tr>
|
||||
<tr> <td> LDLT::solve() </td> <td> LDLT::solveInPlace() </td> </tr>
|
||||
<tr class="alt"> <td> LLT::solve() </td> <td> LLT::solveInPlace() </td> </tr>
|
||||
<tr> <td> TriangularView::solve() </td> <td> TriangularView::solveInPlace() </td> </tr>
|
||||
<tr class="alt"> <td> DenseBase::transpose() </td> <td> DenseBase::transposeInPlace() </td> </tr>
|
||||
</table>
|
||||
|
||||
In the special case where a matrix or vector is shrunk using an expression like <tt>vec = vec.head(n)</tt>,
|
||||
you can use \link PlainObjectBase::conservativeResize() conservativeResize() \endlink.
|
||||
|
||||
|
||||
\section TopicAliasingCwise Aliasing and component-wise operations
|
||||
|
||||
As explained above, it may be dangerous if the same matrix or array occurs on both the left-hand side and the
|
||||
right-hand side of an assignment operator, and it is then often necessary to evaluate the right-hand side
|
||||
explicitly. However, applying component-wise operations (such as matrix addition, scalar multiplication and
|
||||
array multiplication) is safe.
|
||||
|
||||
The following example has only component-wise operations. Thus, there is no need for \link DenseBase::eval()
|
||||
eval() \endlink even though the same matrix appears on both sides of the assignments.
|
||||
|
||||
<table class="example">
|
||||
<tr><th>Example</th><th>Output</th></tr>
|
||||
<tr><td>
|
||||
\include TopicAliasing_cwise.cpp
|
||||
</td>
|
||||
<td>
|
||||
\verbinclude TopicAliasing_cwise.out
|
||||
</td></tr></table>
|
||||
|
||||
In general, an assignment is safe if the (i,j) entry of the expression on the right-hand side depends only on
|
||||
the (i,j) entry of the matrix or array on the left-hand side and not on any other entries. In that case it is
|
||||
not necessary to evaluate the right-hand side explicitly.
|
||||
|
||||
|
||||
\section TopicAliasingMatrixMult Aliasing and matrix multiplication
|
||||
|
||||
Matrix multiplication is the only operation in %Eigen that assumes aliasing by default. Thus, if \c matA is a
|
||||
matrix, then the statement <tt>matA = matA * matA;</tt> is safe. All other operations in %Eigen assume that
|
||||
there are no aliasing problems, either because the result is assigned to a different matrix or because it is a
|
||||
component-wise operation.
|
||||
|
||||
<table class="example">
|
||||
<tr><th>Example</th><th>Output</th></tr>
|
||||
<tr><td>
|
||||
\include TopicAliasing_mult1.cpp
|
||||
</td>
|
||||
<td>
|
||||
\verbinclude TopicAliasing_mult1.out
|
||||
</td></tr></table>
|
||||
|
||||
However, this comes at a price. When executing the expression <tt>matA = matA * matA</tt>, %Eigen evaluates the
|
||||
product in a temporary matrix which is assigned to \c matA after the computation. This is fine. But %Eigen does
|
||||
the same when the product is assigned to a different matrix (e.g., <tt>matB = matA * matA</tt>). In that case,
|
||||
it is more efficient to evaluate the product directly into \c matB instead of evaluating it first into a
|
||||
temporary matrix and copying that matrix to \c matB.
|
||||
|
||||
The user can indicate with the \link MatrixBase::noalias() noalias()\endlink function that there is no
|
||||
aliasing, as follows: <tt>matB.noalias() = matA * matA</tt>. This allows %Eigen to evaluate the matrix product
|
||||
<tt>matA * matA</tt> directly into \c matB.
|
||||
|
||||
<table class="example">
|
||||
<tr><th>Example</th><th>Output</th></tr>
|
||||
<tr><td>
|
||||
\include TopicAliasing_mult2.cpp
|
||||
</td>
|
||||
<td>
|
||||
\verbinclude TopicAliasing_mult2.out
|
||||
</td></tr></table>
|
||||
|
||||
Of course, you should not use \c noalias() when there is in fact aliasing taking place. If you do, then you
|
||||
may get wrong results:
|
||||
|
||||
<table class="example">
|
||||
<tr><th>Example</th><th>Output</th></tr>
|
||||
<tr><td>
|
||||
\include TopicAliasing_mult3.cpp
|
||||
</td>
|
||||
<td>
|
||||
\verbinclude TopicAliasing_mult3.out
|
||||
</td></tr></table>
|
||||
|
||||
|
||||
\section TopicAliasingSummary Summary
|
||||
|
||||
Aliasing occurs when the same matrix or array coefficients appear both on the left- and the right-hand side of
|
||||
an assignment operator.
|
||||
- Aliasing is harmless with coefficient-wise computations; this includes scalar multiplication and matrix or
|
||||
array addition.
|
||||
- When you multiply two matrices, %Eigen assumes that aliasing occurs. If you know that there is no aliasing,
|
||||
then you can use \link MatrixBase::noalias() noalias()\endlink.
|
||||
- In all other situations, %Eigen assumes that there is no aliasing issue and thus gives the wrong result if
|
||||
aliasing does in fact occur. To prevent this, you have to use \link DenseBase::eval() eval() \endlink or
|
||||
one of the xxxInPlace() functions.
|
||||
|
||||
*/
|
||||
}
|
|
@ -1,108 +0,0 @@
|
|||
namespace Eigen {
|
||||
|
||||
/** \page TopicAssertions Assertions
|
||||
|
||||
\eigenAutoToc
|
||||
|
||||
\section PlainAssert Assertions
|
||||
|
||||
The macro eigen_assert is defined to be \c eigen_plain_assert by default. We use eigen_plain_assert instead of \c assert to work around a known bug for GCC <= 4.3. Basically, eigen_plain_assert \a is \c assert.
|
||||
|
||||
\subsection RedefineAssert Redefining assertions
|
||||
|
||||
Both eigen_assert and eigen_plain_assert are defined in Macros.h. Defining eigen_assert indirectly gives you a chance to change its behavior. You can redefine this macro if you want to do something else such as throwing an exception, and fall back to its default behavior with eigen_plain_assert. The code below tells Eigen to throw an std::runtime_error:
|
||||
|
||||
\code
|
||||
#include <stdexcept>
|
||||
#undef eigen_assert
|
||||
#define eigen_assert(x) \
|
||||
if (!x) { throw (std::runtime_error("Put your message here")); }
|
||||
\endcode
|
||||
|
||||
\subsection DisableAssert Disabling assertions
|
||||
|
||||
Assertions cost run time and can be turned off. You can suppress eigen_assert by defining \c EIGEN_NO_DEBUG \b before including Eigen headers. \c EIGEN_NO_DEBUG is undefined by default unless \c NDEBUG is defined.
|
||||
|
||||
\section StaticAssert Static assertions
|
||||
|
||||
Static assertions are not standardized until C++11. However, in the Eigen library, there are many conditions can and should be detectedat compile time. For instance, we use static assertions to prevent the code below from compiling.
|
||||
|
||||
\code
|
||||
Matrix3d() + Matrix4d(); // adding matrices of different sizes
|
||||
Matrix4cd() * Vector3cd(); // invalid product known at compile time
|
||||
\endcode
|
||||
|
||||
Static assertions are defined in StaticAssert.h. If there is native static_assert, we use it. Otherwise, we have implemented an assertion macro that can show a limited range of messages.
|
||||
|
||||
One can easily come up with static assertions without messages, such as:
|
||||
|
||||
\code
|
||||
#define STATIC_ASSERT(x) \
|
||||
switch(0) { case 0: case x:; }
|
||||
\endcode
|
||||
|
||||
However, the example above obviously cannot tell why the assertion failed. Therefore, we define a \c struct in namespace Eigen::internal to handle available messages.
|
||||
|
||||
\code
|
||||
template<bool condition>
|
||||
struct static_assertion {};
|
||||
|
||||
template<>
|
||||
struct static_assertion<true>
|
||||
{
|
||||
enum {
|
||||
YOU_TRIED_CALLING_A_VECTOR_METHOD_ON_A_MATRIX,
|
||||
YOU_MIXED_VECTORS_OF_DIFFERENT_SIZES,
|
||||
// see StaticAssert.h for all enums.
|
||||
};
|
||||
};
|
||||
\endcode
|
||||
|
||||
And then, we define EIGEN_STATIC_ASSERT(CONDITION,MSG) to access Eigen::internal::static_assertion<bool(CONDITION)>::MSG. If the condition evaluates into \c false, your compiler displays a lot of messages explaining there is no MSG in static_assert<false>. Nevertheless, this is \a not in what we are interested. As you can see, all members of static_assert<true> are ALL_CAPS_AND_THEY_ARE_SHOUTING.
|
||||
|
||||
\warning
|
||||
When using this macro, MSG should be a member of static_assertion<true>, or the static assertion \b always fails.
|
||||
Currently, it can only be used in function scope.
|
||||
|
||||
\subsection DerivedStaticAssert Derived static assertions
|
||||
|
||||
There are other macros derived from EIGEN_STATIC_ASSERT to enhance readability. Their names are self-explanatory.
|
||||
|
||||
- \b EIGEN_STATIC_ASSERT_FIXED_SIZE(TYPE) - passes if \a TYPE is fixed size.
|
||||
- \b EIGEN_STATIC_ASSERT_DYNAMIC_SIZE(TYPE) - passes if \a TYPE is dynamic size.
|
||||
- \b EIGEN_STATIC_ASSERT_LVALUE(Derived) - failes if \a Derived is read-only.
|
||||
- \b EIGEN_STATIC_ASSERT_ARRAYXPR(Derived) - passes if \a Derived is an array expression.
|
||||
- <b>EIGEN_STATIC_ASSERT_SAME_XPR_KIND(Derived1, Derived2)</b> - failes if the two expressions are an array one and a matrix one.
|
||||
|
||||
Because Eigen handles both fixed-size and dynamic-size expressions, some conditions cannot be clearly determined at compile time. We classify them into strict assertions and permissive assertions.
|
||||
|
||||
\subsubsection StrictAssertions Strict assertions
|
||||
|
||||
These assertions fail if the condition <b>may not</b> be met. For example, MatrixXd may not be a vector, so it fails EIGEN_STATIC_ASSERT_VECTOR_ONLY.
|
||||
|
||||
- \b EIGEN_STATIC_ASSERT_VECTOR_ONLY(TYPE) - passes if \a TYPE must be a vector type.
|
||||
- <b>EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(TYPE, SIZE)</b> - passes if \a TYPE must be a vector of the given size.
|
||||
- <b>EIGEN_STATIC_ASSERT_MATRIX_SPECIFIC_SIZE(TYPE, ROWS, COLS)</b> - passes if \a TYPE must be a matrix with given rows and columns.
|
||||
|
||||
\subsubsection PermissiveAssertions Permissive assertions
|
||||
|
||||
These assertions fail if the condition \b cannot be met. For example, MatrixXd and Matrix4d may have the same size, so they pass EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE.
|
||||
|
||||
- \b EIGEN_STATIC_ASSERT_SAME_VECTOR_SIZE(TYPE0,TYPE1) - fails if the two vector expression types must have different sizes.
|
||||
- \b EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE(TYPE0,TYPE1) - fails if the two matrix expression types must have different sizes.
|
||||
- \b EIGEN_STATIC_ASSERT_SIZE_1x1(TYPE) - fails if \a TYPE cannot be an 1x1 expression.
|
||||
|
||||
See StaticAssert.h for details such as what messages they throw.
|
||||
|
||||
\subsection DisableStaticAssert Disabling static assertions
|
||||
|
||||
If \c EIGEN_NO_STATIC_ASSERT is defined, static assertions turn into <tt>eigen_assert</tt>'s, working like:
|
||||
|
||||
\code
|
||||
#define EIGEN_STATIC_ASSERT(CONDITION,MSG) eigen_assert((CONDITION) && #MSG);
|
||||
\endcode
|
||||
|
||||
This saves compile time but consumes more run time. \c EIGEN_NO_STATIC_ASSERT is undefined by default.
|
||||
|
||||
*/
|
||||
}
|
|
@ -1,12 +0,0 @@
|
|||
namespace Eigen {
|
||||
|
||||
/** \page TopicEigenExpressionTemplates Expression templates in Eigen
|
||||
|
||||
|
||||
TODO: write this dox page!
|
||||
|
||||
Is linked from the tutorial on arithmetic ops.
|
||||
|
||||
*/
|
||||
|
||||
}
|
|
@ -1,65 +0,0 @@
|
|||
namespace Eigen {
|
||||
|
||||
/** \page TopicLazyEvaluation Lazy Evaluation and Aliasing
|
||||
|
||||
Executive summary: Eigen has intelligent compile-time mechanisms to enable lazy evaluation and removing temporaries where appropriate.
|
||||
It will handle aliasing automatically in most cases, for example with matrix products. The automatic behavior can be overridden
|
||||
manually by using the MatrixBase::eval() and MatrixBase::noalias() methods.
|
||||
|
||||
When you write a line of code involving a complex expression such as
|
||||
|
||||
\code mat1 = mat2 + mat3 * (mat4 + mat5); \endcode
|
||||
|
||||
Eigen determines automatically, for each sub-expression, whether to evaluate it into a temporary variable. Indeed, in certain cases it is better to evaluate immediately a sub-expression into a temporary variable, while in other cases it is better to avoid that.
|
||||
|
||||
A traditional math library without expression templates always evaluates all sub-expressions into temporaries. So with this code,
|
||||
|
||||
\code vec1 = vec2 + vec3; \endcode
|
||||
|
||||
a traditional library would evaluate \c vec2 + vec3 into a temporary \c vec4 and then copy \c vec4 into \c vec1. This is of course inefficient: the arrays are traversed twice, so there are a lot of useless load/store operations.
|
||||
|
||||
Expression-templates-based libraries can avoid evaluating sub-expressions into temporaries, which in many cases results in large speed improvements. This is called <i>lazy evaluation</i> as an expression is getting evaluated as late as possible, instead of immediately. However, most other expression-templates-based libraries <i>always</i> choose lazy evaluation. There are two problems with that: first, lazy evaluation is not always a good choice for performance; second, lazy evaluation can be very dangerous, for example with matrix products: doing <tt>matrix = matrix*matrix</tt> gives a wrong result if the matrix product is lazy-evaluated, because of the way matrix product works.
|
||||
|
||||
For these reasons, Eigen has intelligent compile-time mechanisms to determine automatically when to use lazy evaluation, and when on the contrary it should evaluate immediately into a temporary variable.
|
||||
|
||||
So in the basic example,
|
||||
|
||||
\code matrix1 = matrix2 + matrix3; \endcode
|
||||
|
||||
Eigen chooses lazy evaluation. Thus the arrays are traversed only once, producing optimized code. If you really want to force immediate evaluation, use \link MatrixBase::eval() eval()\endlink:
|
||||
|
||||
\code matrix1 = (matrix2 + matrix3).eval(); \endcode
|
||||
|
||||
Here is now a more involved example:
|
||||
|
||||
\code matrix1 = -matrix2 + matrix3 + 5 * matrix4; \endcode
|
||||
|
||||
Eigen chooses lazy evaluation at every stage in that example, which is clearly the correct choice. In fact, lazy evaluation is the "default choice" and Eigen will choose it except in a few circumstances.
|
||||
|
||||
<b>The first circumstance</b> in which Eigen chooses immediate evaluation, is when it sees an assignment <tt>a = b;</tt> and the expression \c b has the evaluate-before-assigning \link flags flag\endlink. The most important example of such an expression is the \link GeneralProduct matrix product expression\endlink. For example, when you do
|
||||
|
||||
\code matrix = matrix * matrix; \endcode
|
||||
|
||||
Eigen first evaluates <tt>matrix * matrix</tt> into a temporary matrix, and then copies it into the original \c matrix. This guarantees a correct result as we saw above that lazy evaluation gives wrong results with matrix products. It also doesn't cost much, as the cost of the matrix product itself is much higher.
|
||||
|
||||
What if you know that the result does no alias the operand of the product and want to force lazy evaluation? Then use \link MatrixBase::noalias() .noalias()\endlink instead. Here is an example:
|
||||
|
||||
\code matrix1.noalias() = matrix2 * matrix2; \endcode
|
||||
|
||||
Here, since we know that matrix2 is not the same matrix as matrix1, we know that lazy evaluation is not dangerous, so we may force lazy evaluation. Concretely, the effect of noalias() here is to bypass the evaluate-before-assigning \link flags flag\endlink.
|
||||
|
||||
<b>The second circumstance</b> in which Eigen chooses immediate evaluation, is when it sees a nested expression such as <tt>a + b</tt> where \c b is already an expression having the evaluate-before-nesting \link flags flag\endlink. Again, the most important example of such an expression is the \link GeneralProduct matrix product expression\endlink. For example, when you do
|
||||
|
||||
\code matrix1 = matrix2 + matrix3 * matrix4; \endcode
|
||||
|
||||
the product <tt>matrix3 * matrix4</tt> gets evaluated immediately into a temporary matrix. Indeed, experiments showed that it is often beneficial for performance to evaluate immediately matrix products when they are nested into bigger expressions.
|
||||
|
||||
<b>The third circumstance</b> in which Eigen chooses immediate evaluation, is when its cost model shows that the total cost of an operation is reduced if a sub-expression gets evaluated into a temporary. Indeed, in certain cases, an intermediate result is sufficiently costly to compute and is reused sufficiently many times, that is worth "caching". Here is an example:
|
||||
|
||||
\code matrix1 = matrix2 * (matrix3 + matrix4); \endcode
|
||||
|
||||
Here, provided the matrices have at least 2 rows and 2 columns, each coefficienct of the expression <tt>matrix3 + matrix4</tt> is going to be used several times in the matrix product. Instead of computing the sum everytime, it is much better to compute it once and store it in a temporary variable. Eigen understands this and evaluates <tt>matrix3 + matrix4</tt> into a temporary variable before evaluating the product.
|
||||
|
||||
*/
|
||||
|
||||
}
|
|
@ -1,261 +0,0 @@
|
|||
namespace Eigen {
|
||||
|
||||
/** \eigenManualPage TopicLinearAlgebraDecompositions Catalogue of dense decompositions
|
||||
|
||||
This page presents a catalogue of the dense matrix decompositions offered by Eigen.
|
||||
For an introduction on linear solvers and decompositions, check this \link TutorialLinearAlgebra page \endlink.
|
||||
|
||||
\section TopicLinAlgBigTable Catalogue of decompositions offered by Eigen
|
||||
|
||||
<table class="manual-vl">
|
||||
<tr>
|
||||
<th class="meta"></th>
|
||||
<th class="meta" colspan="5">Generic information, not Eigen-specific</th>
|
||||
<th class="meta" colspan="3">Eigen-specific</th>
|
||||
</tr>
|
||||
|
||||
<tr>
|
||||
<th>Decomposition</th>
|
||||
<th>Requirements on the matrix</th>
|
||||
<th>Speed</th>
|
||||
<th>Algorithm reliability and accuracy</th>
|
||||
<th>Rank-revealing</th>
|
||||
<th>Allows to compute (besides linear solving)</th>
|
||||
<th>Linear solver provided by Eigen</th>
|
||||
<th>Maturity of Eigen's implementation</th>
|
||||
<th>Optimizations</th>
|
||||
</tr>
|
||||
|
||||
<tr>
|
||||
<td>PartialPivLU</td>
|
||||
<td>Invertible</td>
|
||||
<td>Fast</td>
|
||||
<td>Depends on condition number</td>
|
||||
<td>-</td>
|
||||
<td>-</td>
|
||||
<td>Yes</td>
|
||||
<td>Excellent</td>
|
||||
<td>Blocking, Implicit MT</td>
|
||||
</tr>
|
||||
|
||||
<tr class="alt">
|
||||
<td>FullPivLU</td>
|
||||
<td>-</td>
|
||||
<td>Slow</td>
|
||||
<td>Proven</td>
|
||||
<td>Yes</td>
|
||||
<td>-</td>
|
||||
<td>Yes</td>
|
||||
<td>Excellent</td>
|
||||
<td>-</td>
|
||||
</tr>
|
||||
|
||||
<tr>
|
||||
<td>HouseholderQR</td>
|
||||
<td>-</td>
|
||||
<td>Fast</td>
|
||||
<td>Depends on condition number</td>
|
||||
<td>-</td>
|
||||
<td>Orthogonalization</td>
|
||||
<td>Yes</td>
|
||||
<td>Excellent</td>
|
||||
<td>Blocking</td>
|
||||
</tr>
|
||||
|
||||
<tr class="alt">
|
||||
<td>ColPivHouseholderQR</td>
|
||||
<td>-</td>
|
||||
<td>Fast</td>
|
||||
<td>Good</td>
|
||||
<td>Yes</td>
|
||||
<td>Orthogonalization</td>
|
||||
<td>Yes</td>
|
||||
<td>Excellent</td>
|
||||
<td><em>Soon: blocking</em></td>
|
||||
</tr>
|
||||
|
||||
<tr>
|
||||
<td>FullPivHouseholderQR</td>
|
||||
<td>-</td>
|
||||
<td>Slow</td>
|
||||
<td>Proven</td>
|
||||
<td>Yes</td>
|
||||
<td>Orthogonalization</td>
|
||||
<td>Yes</td>
|
||||
<td>Average</td>
|
||||
<td>-</td>
|
||||
</tr>
|
||||
|
||||
<tr class="alt">
|
||||
<td>LLT</td>
|
||||
<td>Positive definite</td>
|
||||
<td>Very fast</td>
|
||||
<td>Depends on condition number</td>
|
||||
<td>-</td>
|
||||
<td>-</td>
|
||||
<td>Yes</td>
|
||||
<td>Excellent</td>
|
||||
<td>Blocking</td>
|
||||
</tr>
|
||||
|
||||
<tr>
|
||||
<td>LDLT</td>
|
||||
<td>Positive or negative semidefinite<sup><a href="#note1">1</a></sup></td>
|
||||
<td>Very fast</td>
|
||||
<td>Good</td>
|
||||
<td>-</td>
|
||||
<td>-</td>
|
||||
<td>Yes</td>
|
||||
<td>Excellent</td>
|
||||
<td><em>Soon: blocking</em></td>
|
||||
</tr>
|
||||
|
||||
<tr><th class="inter" colspan="9">\n Singular values and eigenvalues decompositions</th></tr>
|
||||
|
||||
<tr>
|
||||
<td>JacobiSVD (two-sided)</td>
|
||||
<td>-</td>
|
||||
<td>Slow (but fast for small matrices)</td>
|
||||
<td>Excellent-Proven<sup><a href="#note3">3</a></sup></td>
|
||||
<td>Yes</td>
|
||||
<td>Singular values/vectors, least squares</td>
|
||||
<td>Yes (and does least squares)</td>
|
||||
<td>Excellent</td>
|
||||
<td>R-SVD</td>
|
||||
</tr>
|
||||
|
||||
<tr class="alt">
|
||||
<td>SelfAdjointEigenSolver</td>
|
||||
<td>Self-adjoint</td>
|
||||
<td>Fast-average<sup><a href="#note2">2</a></sup></td>
|
||||
<td>Good</td>
|
||||
<td>Yes</td>
|
||||
<td>Eigenvalues/vectors</td>
|
||||
<td>-</td>
|
||||
<td>Good</td>
|
||||
<td><em>Closed forms for 2x2 and 3x3</em></td>
|
||||
</tr>
|
||||
|
||||
<tr>
|
||||
<td>ComplexEigenSolver</td>
|
||||
<td>Square</td>
|
||||
<td>Slow-very slow<sup><a href="#note2">2</a></sup></td>
|
||||
<td>Depends on condition number</td>
|
||||
<td>Yes</td>
|
||||
<td>Eigenvalues/vectors</td>
|
||||
<td>-</td>
|
||||
<td>Average</td>
|
||||
<td>-</td>
|
||||
</tr>
|
||||
|
||||
<tr class="alt">
|
||||
<td>EigenSolver</td>
|
||||
<td>Square and real</td>
|
||||
<td>Average-slow<sup><a href="#note2">2</a></sup></td>
|
||||
<td>Depends on condition number</td>
|
||||
<td>Yes</td>
|
||||
<td>Eigenvalues/vectors</td>
|
||||
<td>-</td>
|
||||
<td>Average</td>
|
||||
<td>-</td>
|
||||
</tr>
|
||||
|
||||
<tr>
|
||||
<td>GeneralizedSelfAdjointEigenSolver</td>
|
||||
<td>Square</td>
|
||||
<td>Fast-average<sup><a href="#note2">2</a></sup></td>
|
||||
<td>Depends on condition number</td>
|
||||
<td>-</td>
|
||||
<td>Generalized eigenvalues/vectors</td>
|
||||
<td>-</td>
|
||||
<td>Good</td>
|
||||
<td>-</td>
|
||||
</tr>
|
||||
|
||||
<tr><th class="inter" colspan="9">\n Helper decompositions</th></tr>
|
||||
|
||||
<tr>
|
||||
<td>RealSchur</td>
|
||||
<td>Square and real</td>
|
||||
<td>Average-slow<sup><a href="#note2">2</a></sup></td>
|
||||
<td>Depends on condition number</td>
|
||||
<td>Yes</td>
|
||||
<td>-</td>
|
||||
<td>-</td>
|
||||
<td>Average</td>
|
||||
<td>-</td>
|
||||
</tr>
|
||||
|
||||
<tr class="alt">
|
||||
<td>ComplexSchur</td>
|
||||
<td>Square</td>
|
||||
<td>Slow-very slow<sup><a href="#note2">2</a></sup></td>
|
||||
<td>Depends on condition number</td>
|
||||
<td>Yes</td>
|
||||
<td>-</td>
|
||||
<td>-</td>
|
||||
<td>Average</td>
|
||||
<td>-</td>
|
||||
</tr>
|
||||
|
||||
<tr class="alt">
|
||||
<td>Tridiagonalization</td>
|
||||
<td>Self-adjoint</td>
|
||||
<td>Fast</td>
|
||||
<td>Good</td>
|
||||
<td>-</td>
|
||||
<td>-</td>
|
||||
<td>-</td>
|
||||
<td>Good</td>
|
||||
<td><em>Soon: blocking</em></td>
|
||||
</tr>
|
||||
|
||||
<tr>
|
||||
<td>HessenbergDecomposition</td>
|
||||
<td>Square</td>
|
||||
<td>Average</td>
|
||||
<td>Good</td>
|
||||
<td>-</td>
|
||||
<td>-</td>
|
||||
<td>-</td>
|
||||
<td>Good</td>
|
||||
<td><em>Soon: blocking</em></td>
|
||||
</tr>
|
||||
|
||||
</table>
|
||||
|
||||
\b Notes:
|
||||
<ul>
|
||||
<li><a name="note1">\b 1: </a>There exist two variants of the LDLT algorithm. Eigen's one produces a pure diagonal D matrix, and therefore it cannot handle indefinite matrices, unlike Lapack's one which produces a block diagonal D matrix.</li>
|
||||
<li><a name="note2">\b 2: </a>Eigenvalues, SVD and Schur decompositions rely on iterative algorithms. Their convergence speed depends on how well the eigenvalues are separated.</li>
|
||||
<li><a name="note3">\b 3: </a>Our JacobiSVD is two-sided, making for proven and optimal precision for square matrices. For non-square matrices, we have to use a QR preconditioner first. The default choice, ColPivHouseholderQR, is already very reliable, but if you want it to be proven, use FullPivHouseholderQR instead.
|
||||
</ul>
|
||||
|
||||
\section TopicLinAlgTerminology Terminology
|
||||
|
||||
<dl>
|
||||
<dt><b>Selfadjoint</b></dt>
|
||||
<dd>For a real matrix, selfadjoint is a synonym for symmetric. For a complex matrix, selfadjoint is a synonym for \em hermitian.
|
||||
More generally, a matrix \f$ A \f$ is selfadjoint if and only if it is equal to its adjoint \f$ A^* \f$. The adjoint is also called the \em conjugate \em transpose. </dd>
|
||||
<dt><b>Positive/negative definite</b></dt>
|
||||
<dd>A selfadjoint matrix \f$ A \f$ is positive definite if \f$ v^* A v > 0 \f$ for any non zero vector \f$ v \f$.
|
||||
In the same vein, it is negative definite if \f$ v^* A v < 0 \f$ for any non zero vector \f$ v \f$ </dd>
|
||||
<dt><b>Positive/negative semidefinite</b></dt>
|
||||
<dd>A selfadjoint matrix \f$ A \f$ is positive semi-definite if \f$ v^* A v \ge 0 \f$ for any non zero vector \f$ v \f$.
|
||||
In the same vein, it is negative semi-definite if \f$ v^* A v \le 0 \f$ for any non zero vector \f$ v \f$ </dd>
|
||||
|
||||
<dt><b>Blocking</b></dt>
|
||||
<dd>Means the algorithm can work per block, whence guaranteeing a good scaling of the performance for large matrices.</dd>
|
||||
<dt><b>Implicit Multi Threading (MT)</b></dt>
|
||||
<dd>Means the algorithm can take advantage of multicore processors via OpenMP. "Implicit" means the algortihm itself is not parallelized, but that it relies on parallelized matrix-matrix product rountines.</dd>
|
||||
<dt><b>Explicit Multi Threading (MT)</b></dt>
|
||||
<dd>Means the algorithm is explicitely parallelized to take advantage of multicore processors via OpenMP.</dd>
|
||||
<dt><b>Meta-unroller</b></dt>
|
||||
<dd>Means the algorithm is automatically and explicitly unrolled for very small fixed size matrices.</dd>
|
||||
<dt><b></b></dt>
|
||||
<dd></dd>
|
||||
</dl>
|
||||
|
||||
*/
|
||||
|
||||
}
|
|
@ -1,46 +0,0 @@
|
|||
namespace Eigen {
|
||||
|
||||
/** \page TopicMultiThreading Eigen and multi-threading
|
||||
|
||||
\section TopicMultiThreading_MakingEigenMT Make Eigen run in parallel
|
||||
|
||||
Some Eigen's algorithms can exploit the multiple cores present in your hardware. To this end, it is enough to enable OpenMP on your compiler, for instance:
|
||||
* GCC: \c -fopenmp
|
||||
* ICC: \c -openmp
|
||||
* MSVC: check the respective option in the build properties.
|
||||
You can control the number of thread that will be used using either the OpenMP API or Eiegn's API using the following priority:
|
||||
\code
|
||||
OMP_NUM_THREADS=n ./my_program
|
||||
omp_set_num_threads(n);
|
||||
Eigen::setNbThreads(n);
|
||||
\endcode
|
||||
Unless setNbThreads has been called, Eigen uses the number of threads specified by OpenMP. You can restore this bahavior by calling \code setNbThreads(0); \endcode
|
||||
You can query the number of threads that will be used with:
|
||||
\code
|
||||
n = Eigen::nbThreads(n);
|
||||
\endcode
|
||||
You can disable Eigen's multi threading at compile time by defining the EIGEN_DONT_PARALLELIZE preprocessor token.
|
||||
|
||||
Currently, the following algorithms can make use of multi-threading:
|
||||
* general matrix - matrix products
|
||||
* PartialPivLU
|
||||
|
||||
\section TopicMultiThreading_UsingEigenWithMT Using Eigen in a multi-threaded application
|
||||
|
||||
In the case your own application is multithreaded, and multiple threads make calls to Eigen, then you have to initialize Eigen by calling the following routine \b before creating the threads:
|
||||
\code
|
||||
#include <Eigen/Core>
|
||||
|
||||
int main(int argc, char** argv)
|
||||
{
|
||||
Eigen::initParallel();
|
||||
|
||||
...
|
||||
}
|
||||
\endcode
|
||||
|
||||
In the case your application is parallelized with OpenMP, you might want to disable Eigen's own parallization as detailed in the previous section.
|
||||
|
||||
*/
|
||||
|
||||
}
|
|
@ -1,11 +0,0 @@
|
|||
namespace Eigen {
|
||||
|
||||
/** \page TopicResizing Resizing
|
||||
|
||||
|
||||
TODO: write this dox page!
|
||||
|
||||
Is linked from the tutorial on the Matrix class.
|
||||
|
||||
*/
|
||||
}
|
|
@ -1,12 +0,0 @@
|
|||
namespace Eigen {
|
||||
|
||||
/** \page TopicScalarTypes Scalar types
|
||||
|
||||
|
||||
TODO: write this dox page!
|
||||
|
||||
Is linked from the tutorial on the Matrix class.
|
||||
|
||||
*/
|
||||
|
||||
}
|
|
@ -1,9 +0,0 @@
|
|||
namespace Eigen {
|
||||
|
||||
/** \page TopicVectorization Vectorization
|
||||
|
||||
|
||||
TODO: write this dox page!
|
||||
|
||||
*/
|
||||
}
|
|
@ -1,162 +0,0 @@
|
|||
namespace Eigen {
|
||||
|
||||
/** \eigenManualPage TutorialAdvancedInitialization Advanced initialization
|
||||
|
||||
This page discusses several advanced methods for initializing matrices. It gives more details on the
|
||||
comma-initializer, which was introduced before. It also explains how to get special matrices such as the
|
||||
identity matrix and the zero matrix.
|
||||
|
||||
\eigenAutoToc
|
||||
|
||||
\section TutorialAdvancedInitializationCommaInitializer The comma initializer
|
||||
|
||||
Eigen offers a comma initializer syntax which allows the user to easily set all the coefficients of a matrix,
|
||||
vector or array. Simply list the coefficients, starting at the top-left corner and moving from left to right
|
||||
and from the top to the bottom. The size of the object needs to be specified beforehand. If you list too few
|
||||
or too many coefficients, Eigen will complain.
|
||||
|
||||
<table class="example">
|
||||
<tr><th>Example:</th><th>Output:</th></tr>
|
||||
<tr><td>
|
||||
\include Tutorial_commainit_01.cpp
|
||||
</td>
|
||||
<td>
|
||||
\verbinclude Tutorial_commainit_01.out
|
||||
</td></tr></table>
|
||||
|
||||
Moreover, the elements of the initialization list may themselves be vectors or matrices. A common use is
|
||||
to join vectors or matrices together. For example, here is how to join two row vectors together. Remember
|
||||
that you have to set the size before you can use the comma initializer.
|
||||
|
||||
<table class="example">
|
||||
<tr><th>Example:</th><th>Output:</th></tr>
|
||||
<tr><td>
|
||||
\include Tutorial_AdvancedInitialization_Join.cpp
|
||||
</td>
|
||||
<td>
|
||||
\verbinclude Tutorial_AdvancedInitialization_Join.out
|
||||
</td></tr></table>
|
||||
|
||||
We can use the same technique to initialize matrices with a block structure.
|
||||
|
||||
<table class="example">
|
||||
<tr><th>Example:</th><th>Output:</th></tr>
|
||||
<tr><td>
|
||||
\include Tutorial_AdvancedInitialization_Block.cpp
|
||||
</td>
|
||||
<td>
|
||||
\verbinclude Tutorial_AdvancedInitialization_Block.out
|
||||
</td></tr></table>
|
||||
|
||||
The comma initializer can also be used to fill block expressions such as <tt>m.row(i)</tt>. Here is a more
|
||||
complicated way to get the same result as in the first example above:
|
||||
|
||||
<table class="example">
|
||||
<tr><th>Example:</th><th>Output:</th></tr>
|
||||
<tr><td>
|
||||
\include Tutorial_commainit_01b.cpp
|
||||
</td>
|
||||
<td>
|
||||
\verbinclude Tutorial_commainit_01b.out
|
||||
</td></tr></table>
|
||||
|
||||
|
||||
\section TutorialAdvancedInitializationSpecialMatrices Special matrices and arrays
|
||||
|
||||
The Matrix and Array classes have static methods like \link DenseBase::Zero() Zero()\endlink, which can be
|
||||
used to initialize all coefficients to zero. There are three variants. The first variant takes no arguments
|
||||
and can only be used for fixed-size objects. If you want to initialize a dynamic-size object to zero, you need
|
||||
to specify the size. Thus, the second variant requires one argument and can be used for one-dimensional
|
||||
dynamic-size objects, while the third variant requires two arguments and can be used for two-dimensional
|
||||
objects. All three variants are illustrated in the following example:
|
||||
|
||||
<table class="example">
|
||||
<tr><th>Example:</th><th>Output:</th></tr>
|
||||
<tr><td>
|
||||
\include Tutorial_AdvancedInitialization_Zero.cpp
|
||||
</td>
|
||||
<td>
|
||||
\verbinclude Tutorial_AdvancedInitialization_Zero.out
|
||||
</td></tr></table>
|
||||
|
||||
Similarly, the static method \link DenseBase::Constant() Constant\endlink(value) sets all coefficients to \c value.
|
||||
If the size of the object needs to be specified, the additional arguments go before the \c value
|
||||
argument, as in <tt>MatrixXd::Constant(rows, cols, value)</tt>. The method \link DenseBase::Random() Random()
|
||||
\endlink fills the matrix or array with random coefficients. The identity matrix can be obtained by calling
|
||||
\link MatrixBase::Identity() Identity()\endlink; this method is only available for Matrix, not for Array,
|
||||
because "identity matrix" is a linear algebra concept. The method
|
||||
\link DenseBase::LinSpaced LinSpaced\endlink(size, low, high) is only available for vectors and
|
||||
one-dimensional arrays; it yields a vector of the specified size whose coefficients are equally spaced between
|
||||
\c low and \c high. The method \c LinSpaced() is illustrated in the following example, which prints a table
|
||||
with angles in degrees, the corresponding angle in radians, and their sine and cosine.
|
||||
|
||||
<table class="example">
|
||||
<tr><th>Example:</th><th>Output:</th></tr>
|
||||
<tr><td>
|
||||
\include Tutorial_AdvancedInitialization_LinSpaced.cpp
|
||||
</td>
|
||||
<td>
|
||||
\verbinclude Tutorial_AdvancedInitialization_LinSpaced.out
|
||||
</td></tr></table>
|
||||
|
||||
This example shows that objects like the ones returned by LinSpaced() can be assigned to variables (and
|
||||
expressions). Eigen defines utility functions like \link DenseBase::setZero() setZero()\endlink,
|
||||
\link MatrixBase::setIdentity() \endlink and \link DenseBase::setLinSpaced() \endlink to do this
|
||||
conveniently. The following example contrasts three ways to construct the matrix
|
||||
\f$ J = \bigl[ \begin{smallmatrix} O & I \\ I & O \end{smallmatrix} \bigr] \f$: using static methods and
|
||||
assignment, using static methods and the comma-initializer, or using the setXxx() methods.
|
||||
|
||||
<table class="example">
|
||||
<tr><th>Example:</th><th>Output:</th></tr>
|
||||
<tr><td>
|
||||
\include Tutorial_AdvancedInitialization_ThreeWays.cpp
|
||||
</td>
|
||||
<td>
|
||||
\verbinclude Tutorial_AdvancedInitialization_ThreeWays.out
|
||||
</td></tr></table>
|
||||
|
||||
A summary of all pre-defined matrix, vector and array objects can be found in the \ref QuickRefPage.
|
||||
|
||||
|
||||
\section TutorialAdvancedInitializationTemporaryObjects Usage as temporary objects
|
||||
|
||||
As shown above, static methods as Zero() and Constant() can be used to initialize variables at the time of
|
||||
declaration or at the right-hand side of an assignment operator. You can think of these methods as returning a
|
||||
matrix or array; in fact, they return so-called \ref TopicEigenExpressionTemplates "expression objects" which
|
||||
evaluate to a matrix or array when needed, so that this syntax does not incur any overhead.
|
||||
|
||||
These expressions can also be used as a temporary object. The second example in
|
||||
the \ref GettingStarted guide, which we reproduce here, already illustrates this.
|
||||
|
||||
<table class="example">
|
||||
<tr><th>Example:</th><th>Output:</th></tr>
|
||||
<tr><td>
|
||||
\include QuickStart_example2_dynamic.cpp
|
||||
</td>
|
||||
<td>
|
||||
\verbinclude QuickStart_example2_dynamic.out
|
||||
</td></tr></table>
|
||||
|
||||
The expression <tt>m + MatrixXf::Constant(3,3,1.2)</tt> constructs the 3-by-3 matrix expression with all its coefficients
|
||||
equal to 1.2 plus the corresponding coefficient of \a m.
|
||||
|
||||
The comma-initializer, too, can also be used to construct temporary objects. The following example constructs a random
|
||||
matrix of size 2-by-3, and then multiplies this matrix on the left with
|
||||
\f$ \bigl[ \begin{smallmatrix} 0 & 1 \\ 1 & 0 \end{smallmatrix} \bigr] \f$.
|
||||
|
||||
<table class="example">
|
||||
<tr><th>Example:</th><th>Output:</th></tr>
|
||||
<tr><td>
|
||||
\include Tutorial_AdvancedInitialization_CommaTemporary.cpp
|
||||
</td>
|
||||
<td>
|
||||
\verbinclude Tutorial_AdvancedInitialization_CommaTemporary.out
|
||||
</td></tr></table>
|
||||
|
||||
The \link CommaInitializer::finished() finished() \endlink method is necessary here to get the actual matrix
|
||||
object once the comma initialization of our temporary submatrix is done.
|
||||
|
||||
|
||||
*/
|
||||
|
||||
}
|
|
@ -1,192 +0,0 @@
|
|||
namespace Eigen {
|
||||
|
||||
/** \eigenManualPage TutorialArrayClass The Array class and coefficient-wise operations
|
||||
|
||||
This page aims to provide an overview and explanations on how to use
|
||||
Eigen's Array class.
|
||||
|
||||
\eigenAutoToc
|
||||
|
||||
\section TutorialArrayClassIntro What is the Array class?
|
||||
|
||||
The Array class provides general-purpose arrays, as opposed to the Matrix class which
|
||||
is intended for linear algebra. Furthermore, the Array class provides an easy way to
|
||||
perform coefficient-wise operations, which might not have a linear algebraic meaning,
|
||||
such as adding a constant to every coefficient in the array or multiplying two arrays coefficient-wise.
|
||||
|
||||
|
||||
\section TutorialArrayClassTypes Array types
|
||||
Array is a class template taking the same template parameters as Matrix.
|
||||
As with Matrix, the first three template parameters are mandatory:
|
||||
\code
|
||||
Array<typename Scalar, int RowsAtCompileTime, int ColsAtCompileTime>
|
||||
\endcode
|
||||
The last three template parameters are optional. Since this is exactly the same as for Matrix,
|
||||
we won't explain it again here and just refer to \ref TutorialMatrixClass.
|
||||
|
||||
Eigen also provides typedefs for some common cases, in a way that is similar to the Matrix typedefs
|
||||
but with some slight differences, as the word "array" is used for both 1-dimensional and 2-dimensional arrays.
|
||||
We adopt the convention that typedefs of the form ArrayNt stand for 1-dimensional arrays, where N and t are
|
||||
the size and the scalar type, as in the Matrix typedefs explained on \ref TutorialMatrixClass "this page". For 2-dimensional arrays, we
|
||||
use typedefs of the form ArrayNNt. Some examples are shown in the following table:
|
||||
|
||||
<table class="manual">
|
||||
<tr>
|
||||
<th>Type </th>
|
||||
<th>Typedef </th>
|
||||
</tr>
|
||||
<tr>
|
||||
<td> \code Array<float,Dynamic,1> \endcode </td>
|
||||
<td> \code ArrayXf \endcode </td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td> \code Array<float,3,1> \endcode </td>
|
||||
<td> \code Array3f \endcode </td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td> \code Array<double,Dynamic,Dynamic> \endcode </td>
|
||||
<td> \code ArrayXXd \endcode </td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td> \code Array<double,3,3> \endcode </td>
|
||||
<td> \code Array33d \endcode </td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
|
||||
\section TutorialArrayClassAccess Accessing values inside an Array
|
||||
|
||||
The parenthesis operator is overloaded to provide write and read access to the coefficients of an array, just as with matrices.
|
||||
Furthermore, the \c << operator can be used to initialize arrays (via the comma initializer) or to print them.
|
||||
|
||||
<table class="example">
|
||||
<tr><th>Example:</th><th>Output:</th></tr>
|
||||
<tr><td>
|
||||
\include Tutorial_ArrayClass_accessors.cpp
|
||||
</td>
|
||||
<td>
|
||||
\verbinclude Tutorial_ArrayClass_accessors.out
|
||||
</td></tr></table>
|
||||
|
||||
For more information about the comma initializer, see \ref TutorialAdvancedInitialization.
|
||||
|
||||
|
||||
\section TutorialArrayClassAddSub Addition and subtraction
|
||||
|
||||
Adding and subtracting two arrays is the same as for matrices.
|
||||
The operation is valid if both arrays have the same size, and the addition or subtraction is done coefficient-wise.
|
||||
|
||||
Arrays also support expressions of the form <tt>array + scalar</tt> which add a scalar to each coefficient in the array.
|
||||
This provides a functionality that is not directly available for Matrix objects.
|
||||
|
||||
<table class="example">
|
||||
<tr><th>Example:</th><th>Output:</th></tr>
|
||||
<tr><td>
|
||||
\include Tutorial_ArrayClass_addition.cpp
|
||||
</td>
|
||||
<td>
|
||||
\verbinclude Tutorial_ArrayClass_addition.out
|
||||
</td></tr></table>
|
||||
|
||||
|
||||
\section TutorialArrayClassMult Array multiplication
|
||||
|
||||
First of all, of course you can multiply an array by a scalar, this works in the same way as matrices. Where arrays
|
||||
are fundamentally different from matrices, is when you multiply two together. Matrices interpret
|
||||
multiplication as matrix product and arrays interpret multiplication as coefficient-wise product. Thus, two
|
||||
arrays can be multiplied if and only if they have the same dimensions.
|
||||
|
||||
<table class="example">
|
||||
<tr><th>Example:</th><th>Output:</th></tr>
|
||||
<tr><td>
|
||||
\include Tutorial_ArrayClass_mult.cpp
|
||||
</td>
|
||||
<td>
|
||||
\verbinclude Tutorial_ArrayClass_mult.out
|
||||
</td></tr></table>
|
||||
|
||||
|
||||
\section TutorialArrayClassCwiseOther Other coefficient-wise operations
|
||||
|
||||
The Array class defines other coefficient-wise operations besides the addition, subtraction and multiplication
|
||||
operators described above. For example, the \link ArrayBase::abs() .abs() \endlink method takes the absolute
|
||||
value of each coefficient, while \link ArrayBase::sqrt() .sqrt() \endlink computes the square root of the
|
||||
coefficients. If you have two arrays of the same size, you can call \link ArrayBase::min(const Eigen::ArrayBase<OtherDerived>&) const .min(.) \endlink to
|
||||
construct the array whose coefficients are the minimum of the corresponding coefficients of the two given
|
||||
arrays. These operations are illustrated in the following example.
|
||||
|
||||
<table class="example">
|
||||
<tr><th>Example:</th><th>Output:</th></tr>
|
||||
<tr><td>
|
||||
\include Tutorial_ArrayClass_cwise_other.cpp
|
||||
</td>
|
||||
<td>
|
||||
\verbinclude Tutorial_ArrayClass_cwise_other.out
|
||||
</td></tr></table>
|
||||
|
||||
More coefficient-wise operations can be found in the \ref QuickRefPage.
|
||||
|
||||
|
||||
\section TutorialArrayClassConvert Converting between array and matrix expressions
|
||||
|
||||
When should you use objects of the Matrix class and when should you use objects of the Array class? You cannot
|
||||
apply Matrix operations on arrays, or Array operations on matrices. Thus, if you need to do linear algebraic
|
||||
operations such as matrix multiplication, then you should use matrices; if you need to do coefficient-wise
|
||||
operations, then you should use arrays. However, sometimes it is not that simple, but you need to use both
|
||||
Matrix and Array operations. In that case, you need to convert a matrix to an array or reversely. This gives
|
||||
access to all operations regardless of the choice of declaring objects as arrays or as matrices.
|
||||
|
||||
\link MatrixBase Matrix expressions \endlink have an \link MatrixBase::array() .array() \endlink method that
|
||||
'converts' them into \link ArrayBase array expressions\endlink, so that coefficient-wise operations
|
||||
can be applied easily. Conversely, \link ArrayBase array expressions \endlink
|
||||
have a \link ArrayBase::matrix() .matrix() \endlink method. As with all Eigen expression abstractions,
|
||||
this doesn't have any runtime cost (provided that you let your compiler optimize).
|
||||
Both \link MatrixBase::array() .array() \endlink and \link ArrayBase::matrix() .matrix() \endlink
|
||||
can be used as rvalues and as lvalues.
|
||||
|
||||
Mixing matrices and arrays in an expression is forbidden with Eigen. For instance, you cannot add a matrix and
|
||||
array directly; the operands of a \c + operator should either both be matrices or both be arrays. However,
|
||||
it is easy to convert from one to the other with \link MatrixBase::array() .array() \endlink and
|
||||
\link ArrayBase::matrix() .matrix()\endlink. The exception to this rule is the assignment operator: it is
|
||||
allowed to assign a matrix expression to an array variable, or to assign an array expression to a matrix
|
||||
variable.
|
||||
|
||||
The following example shows how to use array operations on a Matrix object by employing the
|
||||
\link MatrixBase::array() .array() \endlink method. For example, the statement
|
||||
<tt>result = m.array() * n.array()</tt> takes two matrices \c m and \c n, converts them both to an array, uses
|
||||
* to multiply them coefficient-wise and assigns the result to the matrix variable \c result (this is legal
|
||||
because Eigen allows assigning array expressions to matrix variables).
|
||||
|
||||
As a matter of fact, this usage case is so common that Eigen provides a \link MatrixBase::cwiseProduct() const
|
||||
.cwiseProduct(.) \endlink method for matrices to compute the coefficient-wise product. This is also shown in
|
||||
the example program.
|
||||
|
||||
<table class="example">
|
||||
<tr><th>Example:</th><th>Output:</th></tr>
|
||||
<tr><td>
|
||||
\include Tutorial_ArrayClass_interop_matrix.cpp
|
||||
</td>
|
||||
<td>
|
||||
\verbinclude Tutorial_ArrayClass_interop_matrix.out
|
||||
</td></tr></table>
|
||||
|
||||
Similarly, if \c array1 and \c array2 are arrays, then the expression <tt>array1.matrix() * array2.matrix()</tt>
|
||||
computes their matrix product.
|
||||
|
||||
Here is a more advanced example. The expression <tt>(m.array() + 4).matrix() * m</tt> adds 4 to every
|
||||
coefficient in the matrix \c m and then computes the matrix product of the result with \c m. Similarly, the
|
||||
expression <tt>(m.array() * n.array()).matrix() * m</tt> computes the coefficient-wise product of the matrices
|
||||
\c m and \c n and then the matrix product of the result with \c m.
|
||||
|
||||
<table class="example">
|
||||
<tr><th>Example:</th><th>Output:</th></tr>
|
||||
<tr><td>
|
||||
\include Tutorial_ArrayClass_interop.cpp
|
||||
</td>
|
||||
<td>
|
||||
\verbinclude Tutorial_ArrayClass_interop.out
|
||||
</td></tr></table>
|
||||
|
||||
*/
|
||||
|
||||
}
|
|
@ -1,228 +0,0 @@
|
|||
namespace Eigen {
|
||||
|
||||
/** \eigenManualPage TutorialBlockOperations Block operations
|
||||
|
||||
This page explains the essentials of block operations.
|
||||
A block is a rectangular part of a matrix or array. Blocks expressions can be used both
|
||||
as rvalues and as lvalues. As usual with Eigen expressions, this abstraction has zero runtime cost
|
||||
provided that you let your compiler optimize.
|
||||
|
||||
\eigenAutoToc
|
||||
|
||||
\section TutorialBlockOperationsUsing Using block operations
|
||||
|
||||
The most general block operation in Eigen is called \link DenseBase::block() .block() \endlink.
|
||||
There are two versions, whose syntax is as follows:
|
||||
|
||||
<table class="manual">
|
||||
<tr><th>\b %Block \b operation</td>
|
||||
<th>Version constructing a \n dynamic-size block expression</th>
|
||||
<th>Version constructing a \n fixed-size block expression</th></tr>
|
||||
<tr><td>%Block of size <tt>(p,q)</tt>, starting at <tt>(i,j)</tt></td>
|
||||
<td>\code
|
||||
matrix.block(i,j,p,q);\endcode </td>
|
||||
<td>\code
|
||||
matrix.block<p,q>(i,j);\endcode </td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
As always in Eigen, indices start at 0.
|
||||
|
||||
Both versions can be used on fixed-size and dynamic-size matrices and arrays.
|
||||
These two expressions are semantically equivalent.
|
||||
The only difference is that the fixed-size version will typically give you faster code if the block size is small,
|
||||
but requires this size to be known at compile time.
|
||||
|
||||
The following program uses the dynamic-size and fixed-size versions to print the values of several blocks inside a
|
||||
matrix.
|
||||
|
||||
<table class="example">
|
||||
<tr><th>Example:</th><th>Output:</th></tr>
|
||||
<tr><td>
|
||||
\include Tutorial_BlockOperations_print_block.cpp
|
||||
</td>
|
||||
<td>
|
||||
\verbinclude Tutorial_BlockOperations_print_block.out
|
||||
</td></tr></table>
|
||||
|
||||
In the above example the \link DenseBase::block() .block() \endlink function was employed as a \em rvalue, i.e.
|
||||
it was only read from. However, blocks can also be used as \em lvalues, meaning that you can assign to a block.
|
||||
|
||||
This is illustrated in the following example. This example also demonstrates blocks in arrays, which works exactly like the above-demonstrated blocks in matrices.
|
||||
|
||||
<table class="example">
|
||||
<tr><th>Example:</th><th>Output:</th></tr>
|
||||
<tr><td>
|
||||
\include Tutorial_BlockOperations_block_assignment.cpp
|
||||
</td>
|
||||
<td>
|
||||
\verbinclude Tutorial_BlockOperations_block_assignment.out
|
||||
</td></tr></table>
|
||||
|
||||
While the \link DenseBase::block() .block() \endlink method can be used for any block operation, there are
|
||||
other methods for special cases, providing more specialized API and/or better performance. On the topic of performance, all what
|
||||
matters is that you give Eigen as much information as possible at compile time. For example, if your block is a single whole column in a matrix,
|
||||
using the specialized \link DenseBase::col() .col() \endlink function described below lets Eigen know that, which can give it optimization opportunities.
|
||||
|
||||
The rest of this page describes these specialized methods.
|
||||
|
||||
\section TutorialBlockOperationsSyntaxColumnRows Columns and rows
|
||||
|
||||
Individual columns and rows are special cases of blocks. Eigen provides methods to easily address them:
|
||||
\link DenseBase::col() .col() \endlink and \link DenseBase::row() .row()\endlink.
|
||||
|
||||
<table class="manual">
|
||||
<tr><th>%Block operation</th>
|
||||
<th>Method</th>
|
||||
<tr><td>i<sup>th</sup> row
|
||||
\link DenseBase::row() * \endlink</td>
|
||||
<td>\code
|
||||
matrix.row(i);\endcode </td>
|
||||
</tr>
|
||||
<tr><td>j<sup>th</sup> column
|
||||
\link DenseBase::col() * \endlink</td>
|
||||
<td>\code
|
||||
matrix.col(j);\endcode </td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
The argument for \p col() and \p row() is the index of the column or row to be accessed. As always in Eigen, indices start at 0.
|
||||
|
||||
<table class="example">
|
||||
<tr><th>Example:</th><th>Output:</th></tr>
|
||||
<tr><td>
|
||||
\include Tutorial_BlockOperations_colrow.cpp
|
||||
</td>
|
||||
<td>
|
||||
\verbinclude Tutorial_BlockOperations_colrow.out
|
||||
</td></tr></table>
|
||||
|
||||
That example also demonstrates that block expressions (here columns) can be used in arithmetic like any other expression.
|
||||
|
||||
|
||||
\section TutorialBlockOperationsSyntaxCorners Corner-related operations
|
||||
|
||||
Eigen also provides special methods for blocks that are flushed against one of the corners or sides of a
|
||||
matrix or array. For instance, \link DenseBase::topLeftCorner() .topLeftCorner() \endlink can be used to refer
|
||||
to a block in the top-left corner of a matrix.
|
||||
|
||||
The different possibilities are summarized in the following table:
|
||||
|
||||
<table class="manual">
|
||||
<tr><th>%Block \b operation</td>
|
||||
<th>Version constructing a \n dynamic-size block expression</th>
|
||||
<th>Version constructing a \n fixed-size block expression</th></tr>
|
||||
<tr><td>Top-left p by q block \link DenseBase::topLeftCorner() * \endlink</td>
|
||||
<td>\code
|
||||
matrix.topLeftCorner(p,q);\endcode </td>
|
||||
<td>\code
|
||||
matrix.topLeftCorner<p,q>();\endcode </td>
|
||||
</tr>
|
||||
<tr><td>Bottom-left p by q block
|
||||
\link DenseBase::bottomLeftCorner() * \endlink</td>
|
||||
<td>\code
|
||||
matrix.bottomLeftCorner(p,q);\endcode </td>
|
||||
<td>\code
|
||||
matrix.bottomLeftCorner<p,q>();\endcode </td>
|
||||
</tr>
|
||||
<tr><td>Top-right p by q block
|
||||
\link DenseBase::topRightCorner() * \endlink</td>
|
||||
<td>\code
|
||||
matrix.topRightCorner(p,q);\endcode </td>
|
||||
<td>\code
|
||||
matrix.topRightCorner<p,q>();\endcode </td>
|
||||
</tr>
|
||||
<tr><td>Bottom-right p by q block
|
||||
\link DenseBase::bottomRightCorner() * \endlink</td>
|
||||
<td>\code
|
||||
matrix.bottomRightCorner(p,q);\endcode </td>
|
||||
<td>\code
|
||||
matrix.bottomRightCorner<p,q>();\endcode </td>
|
||||
</tr>
|
||||
<tr><td>%Block containing the first q rows
|
||||
\link DenseBase::topRows() * \endlink</td>
|
||||
<td>\code
|
||||
matrix.topRows(q);\endcode </td>
|
||||
<td>\code
|
||||
matrix.topRows<q>();\endcode </td>
|
||||
</tr>
|
||||
<tr><td>%Block containing the last q rows
|
||||
\link DenseBase::bottomRows() * \endlink</td>
|
||||
<td>\code
|
||||
matrix.bottomRows(q);\endcode </td>
|
||||
<td>\code
|
||||
matrix.bottomRows<q>();\endcode </td>
|
||||
</tr>
|
||||
<tr><td>%Block containing the first p columns
|
||||
\link DenseBase::leftCols() * \endlink</td>
|
||||
<td>\code
|
||||
matrix.leftCols(p);\endcode </td>
|
||||
<td>\code
|
||||
matrix.leftCols<p>();\endcode </td>
|
||||
</tr>
|
||||
<tr><td>%Block containing the last q columns
|
||||
\link DenseBase::rightCols() * \endlink</td>
|
||||
<td>\code
|
||||
matrix.rightCols(q);\endcode </td>
|
||||
<td>\code
|
||||
matrix.rightCols<q>();\endcode </td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
Here is a simple example illustrating the use of the operations presented above:
|
||||
|
||||
<table class="example">
|
||||
<tr><th>Example:</th><th>Output:</th></tr>
|
||||
<tr><td>
|
||||
\include Tutorial_BlockOperations_corner.cpp
|
||||
</td>
|
||||
<td>
|
||||
\verbinclude Tutorial_BlockOperations_corner.out
|
||||
</td></tr></table>
|
||||
|
||||
|
||||
\section TutorialBlockOperationsSyntaxVectors Block operations for vectors
|
||||
|
||||
Eigen also provides a set of block operations designed specifically for the special case of vectors and one-dimensional arrays:
|
||||
|
||||
<table class="manual">
|
||||
<tr><th> %Block operation</th>
|
||||
<th>Version constructing a \n dynamic-size block expression</th>
|
||||
<th>Version constructing a \n fixed-size block expression</th></tr>
|
||||
<tr><td>%Block containing the first \p n elements
|
||||
\link DenseBase::head() * \endlink</td>
|
||||
<td>\code
|
||||
vector.head(n);\endcode </td>
|
||||
<td>\code
|
||||
vector.head<n>();\endcode </td>
|
||||
</tr>
|
||||
<tr><td>%Block containing the last \p n elements
|
||||
\link DenseBase::tail() * \endlink</td>
|
||||
<td>\code
|
||||
vector.tail(n);\endcode </td>
|
||||
<td>\code
|
||||
vector.tail<n>();\endcode </td>
|
||||
</tr>
|
||||
<tr><td>%Block containing \p n elements, starting at position \p i
|
||||
\link DenseBase::segment() * \endlink</td>
|
||||
<td>\code
|
||||
vector.segment(i,n);\endcode </td>
|
||||
<td>\code
|
||||
vector.segment<n>(i);\endcode </td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
|
||||
An example is presented below:
|
||||
<table class="example">
|
||||
<tr><th>Example:</th><th>Output:</th></tr>
|
||||
<tr><td>
|
||||
\include Tutorial_BlockOperations_vector.cpp
|
||||
</td>
|
||||
<td>
|
||||
\verbinclude Tutorial_BlockOperations_vector.out
|
||||
</td></tr></table>
|
||||
|
||||
*/
|
||||
|
||||
}
|
|
@ -1,241 +0,0 @@
|
|||
namespace Eigen {
|
||||
|
||||
/** \eigenManualPage TutorialGeometry Space transformations
|
||||
|
||||
In this page, we will introduce the many possibilities offered by the \ref Geometry_Module "geometry module" to deal with 2D and 3D rotations and projective or affine transformations.
|
||||
|
||||
\eigenAutoToc
|
||||
|
||||
Eigen's Geometry module provides two different kinds of geometric transformations:
|
||||
- Abstract transformations, such as rotations (represented by \ref AngleAxis "angle and axis" or by a \ref Quaternion "quaternion"), \ref Translation "translations", \ref Scaling "scalings". These transformations are NOT represented as matrices, but you can nevertheless mix them with matrices and vectors in expressions, and convert them to matrices if you wish.
|
||||
- Projective or affine transformation matrices: see the Transform class. These are really matrices.
|
||||
|
||||
\note If you are working with OpenGL 4x4 matrices then Affine3f and Affine3d are what you want. Since Eigen defaults to column-major storage, you can directly use the Transform::data() method to pass your transformation matrix to OpenGL.
|
||||
|
||||
You can construct a Transform from an abstract transformation, like this:
|
||||
\code
|
||||
Transform t(AngleAxis(angle,axis));
|
||||
\endcode
|
||||
or like this:
|
||||
\code
|
||||
Transform t;
|
||||
t = AngleAxis(angle,axis);
|
||||
\endcode
|
||||
But note that unfortunately, because of how C++ works, you can \b not do this:
|
||||
\code
|
||||
Transform t = AngleAxis(angle,axis);
|
||||
\endcode
|
||||
<span class="note">\b Explanation: In the C++ language, this would require Transform to have a non-explicit conversion constructor from AngleAxis, but we really don't want to allow implicit casting here.
|
||||
</span>
|
||||
|
||||
\section TutorialGeoElementaryTransformations Transformation types
|
||||
|
||||
<table class="manual">
|
||||
<tr><th>Transformation type</th><th>Typical initialization code</th></tr>
|
||||
<tr><td>
|
||||
\ref Rotation2D "2D rotation" from an angle</td><td>\code
|
||||
Rotation2D<float> rot2(angle_in_radian);\endcode</td></tr>
|
||||
<tr class="alt"><td>
|
||||
3D rotation as an \ref AngleAxis "angle + axis"</td><td>\code
|
||||
AngleAxis<float> aa(angle_in_radian, Vector3f(ax,ay,az));\endcode
|
||||
<span class="note">The axis vector must be normalized.</span></td></tr>
|
||||
<tr><td>
|
||||
3D rotation as a \ref Quaternion "quaternion"</td><td>\code
|
||||
Quaternion<float> q; q = AngleAxis<float>(angle_in_radian, axis);\endcode</td></tr>
|
||||
<tr class="alt"><td>
|
||||
N-D Scaling</td><td>\code
|
||||
Scaling(sx, sy)
|
||||
Scaling(sx, sy, sz)
|
||||
Scaling(s)
|
||||
Scaling(vecN)\endcode</td></tr>
|
||||
<tr><td>
|
||||
N-D Translation</td><td>\code
|
||||
Translation<float,2>(tx, ty)
|
||||
Translation<float,3>(tx, ty, tz)
|
||||
Translation<float,N>(s)
|
||||
Translation<float,N>(vecN)\endcode</td></tr>
|
||||
<tr class="alt"><td>
|
||||
N-D \ref TutorialGeoTransform "Affine transformation"</td><td>\code
|
||||
Transform<float,N,Affine> t = concatenation_of_any_transformations;
|
||||
Transform<float,3,Affine> t = Translation3f(p) * AngleAxisf(a,axis) * Scaling(s);\endcode</td></tr>
|
||||
<tr><td>
|
||||
N-D Linear transformations \n
|
||||
<em class=note>(pure rotations, \n scaling, etc.)</em></td><td>\code
|
||||
Matrix<float,N> t = concatenation_of_rotations_and_scalings;
|
||||
Matrix<float,2> t = Rotation2Df(a) * Scaling(s);
|
||||
Matrix<float,3> t = AngleAxisf(a,axis) * Scaling(s);\endcode</td></tr>
|
||||
</table>
|
||||
|
||||
<strong>Notes on rotations</strong>\n To transform more than a single vector the preferred
|
||||
representations are rotation matrices, while for other usages Quaternion is the
|
||||
representation of choice as they are compact, fast and stable. Finally Rotation2D and
|
||||
AngleAxis are mainly convenient types to create other rotation objects.
|
||||
|
||||
<strong>Notes on Translation and Scaling</strong>\n Like AngleAxis, these classes were
|
||||
designed to simplify the creation/initialization of linear (Matrix) and affine (Transform)
|
||||
transformations. Nevertheless, unlike AngleAxis which is inefficient to use, these classes
|
||||
might still be interesting to write generic and efficient algorithms taking as input any
|
||||
kind of transformations.
|
||||
|
||||
Any of the above transformation types can be converted to any other types of the same nature,
|
||||
or to a more generic type. Here are some additional examples:
|
||||
<table class="manual">
|
||||
<tr><td>\code
|
||||
Rotation2Df r; r = Matrix2f(..); // assumes a pure rotation matrix
|
||||
AngleAxisf aa; aa = Quaternionf(..);
|
||||
AngleAxisf aa; aa = Matrix3f(..); // assumes a pure rotation matrix
|
||||
Matrix2f m; m = Rotation2Df(..);
|
||||
Matrix3f m; m = Quaternionf(..); Matrix3f m; m = Scaling(..);
|
||||
Affine3f m; m = AngleAxis3f(..); Affine3f m; m = Scaling(..);
|
||||
Affine3f m; m = Translation3f(..); Affine3f m; m = Matrix3f(..);
|
||||
\endcode</td></tr>
|
||||
</table>
|
||||
|
||||
|
||||
<a href="#" class="top">top</a>\section TutorialGeoCommontransformationAPI Common API across transformation types
|
||||
|
||||
To some extent, Eigen's \ref Geometry_Module "geometry module" allows you to write
|
||||
generic algorithms working on any kind of transformation representations:
|
||||
<table class="manual">
|
||||
<tr><td>
|
||||
Concatenation of two transformations</td><td>\code
|
||||
gen1 * gen2;\endcode</td></tr>
|
||||
<tr class="alt"><td>Apply the transformation to a vector</td><td>\code
|
||||
vec2 = gen1 * vec1;\endcode</td></tr>
|
||||
<tr><td>Get the inverse of the transformation</td><td>\code
|
||||
gen2 = gen1.inverse();\endcode</td></tr>
|
||||
<tr class="alt"><td>Spherical interpolation \n (Rotation2D and Quaternion only)</td><td>\code
|
||||
rot3 = rot1.slerp(alpha,rot2);\endcode</td></tr>
|
||||
</table>
|
||||
|
||||
|
||||
|
||||
<a href="#" class="top">top</a>\section TutorialGeoTransform Affine transformations
|
||||
Generic affine transformations are represented by the Transform class which internaly
|
||||
is a (Dim+1)^2 matrix. In Eigen we have chosen to not distinghish between points and
|
||||
vectors such that all points are actually represented by displacement vectors from the
|
||||
origin ( \f$ \mathbf{p} \equiv \mathbf{p}-0 \f$ ). With that in mind, real points and
|
||||
vector distinguish when the transformation is applied.
|
||||
<table class="manual">
|
||||
<tr><td>
|
||||
Apply the transformation to a \b point </td><td>\code
|
||||
VectorNf p1, p2;
|
||||
p2 = t * p1;\endcode</td></tr>
|
||||
<tr class="alt"><td>
|
||||
Apply the transformation to a \b vector </td><td>\code
|
||||
VectorNf vec1, vec2;
|
||||
vec2 = t.linear() * vec1;\endcode</td></tr>
|
||||
<tr><td>
|
||||
Apply a \em general transformation \n to a \b normal \b vector
|
||||
(<a href="http://femto.cs.uiuc.edu/faqs/cga-faq.html#S5.27">explanations</a>)</td><td>\code
|
||||
VectorNf n1, n2;
|
||||
MatrixNf normalMatrix = t.linear().inverse().transpose();
|
||||
n2 = (normalMatrix * n1).normalized();\endcode</td></tr>
|
||||
<tr class="alt"><td>
|
||||
Apply a transformation with \em pure \em rotation \n to a \b normal \b vector
|
||||
(no scaling, no shear)</td><td>\code
|
||||
n2 = t.linear() * n1;\endcode</td></tr>
|
||||
<tr><td>
|
||||
OpenGL compatibility \b 3D </td><td>\code
|
||||
glLoadMatrixf(t.data());\endcode</td></tr>
|
||||
<tr class="alt"><td>
|
||||
OpenGL compatibility \b 2D </td><td>\code
|
||||
Affine3f aux(Affine3f::Identity());
|
||||
aux.linear().topLeftCorner<2,2>() = t.linear();
|
||||
aux.translation().start<2>() = t.translation();
|
||||
glLoadMatrixf(aux.data());\endcode</td></tr>
|
||||
</table>
|
||||
|
||||
\b Component \b accessors
|
||||
<table class="manual">
|
||||
<tr><td>
|
||||
full read-write access to the internal matrix</td><td>\code
|
||||
t.matrix() = matN1xN1; // N1 means N+1
|
||||
matN1xN1 = t.matrix();
|
||||
\endcode</td></tr>
|
||||
<tr class="alt"><td>
|
||||
coefficient accessors</td><td>\code
|
||||
t(i,j) = scalar; <=> t.matrix()(i,j) = scalar;
|
||||
scalar = t(i,j); <=> scalar = t.matrix()(i,j);
|
||||
\endcode</td></tr>
|
||||
<tr><td>
|
||||
translation part</td><td>\code
|
||||
t.translation() = vecN;
|
||||
vecN = t.translation();
|
||||
\endcode</td></tr>
|
||||
<tr class="alt"><td>
|
||||
linear part</td><td>\code
|
||||
t.linear() = matNxN;
|
||||
matNxN = t.linear();
|
||||
\endcode</td></tr>
|
||||
<tr><td>
|
||||
extract the rotation matrix</td><td>\code
|
||||
matNxN = t.rotation();
|
||||
\endcode</td></tr>
|
||||
</table>
|
||||
|
||||
|
||||
\b Transformation \b creation \n
|
||||
While transformation objects can be created and updated concatenating elementary transformations,
|
||||
the Transform class also features a procedural API:
|
||||
<table class="manual">
|
||||
<tr><th></th><th>procedural API</th><th>equivalent natural API </th></tr>
|
||||
<tr><td>Translation</td><td>\code
|
||||
t.translate(Vector_(tx,ty,..));
|
||||
t.pretranslate(Vector_(tx,ty,..));
|
||||
\endcode</td><td>\code
|
||||
t *= Translation_(tx,ty,..);
|
||||
t = Translation_(tx,ty,..) * t;
|
||||
\endcode</td></tr>
|
||||
<tr class="alt"><td>\b Rotation \n <em class="note">In 2D and for the procedural API, any_rotation can also \n be an angle in radian</em></td><td>\code
|
||||
t.rotate(any_rotation);
|
||||
t.prerotate(any_rotation);
|
||||
\endcode</td><td>\code
|
||||
t *= any_rotation;
|
||||
t = any_rotation * t;
|
||||
\endcode</td></tr>
|
||||
<tr><td>Scaling</td><td>\code
|
||||
t.scale(Vector_(sx,sy,..));
|
||||
t.scale(s);
|
||||
t.prescale(Vector_(sx,sy,..));
|
||||
t.prescale(s);
|
||||
\endcode</td><td>\code
|
||||
t *= Scaling(sx,sy,..);
|
||||
t *= Scaling(s);
|
||||
t = Scaling(sx,sy,..) * t;
|
||||
t = Scaling(s) * t;
|
||||
\endcode</td></tr>
|
||||
<tr class="alt"><td>Shear transformation \n ( \b 2D \b only ! )</td><td>\code
|
||||
t.shear(sx,sy);
|
||||
t.preshear(sx,sy);
|
||||
\endcode</td><td></td></tr>
|
||||
</table>
|
||||
|
||||
Note that in both API, any many transformations can be concatenated in a single expression as shown in the two following equivalent examples:
|
||||
<table class="manual">
|
||||
<tr><td>\code
|
||||
t.pretranslate(..).rotate(..).translate(..).scale(..);
|
||||
\endcode</td></tr>
|
||||
<tr><td>\code
|
||||
t = Translation_(..) * t * RotationType(..) * Translation_(..) * Scaling(..);
|
||||
\endcode</td></tr>
|
||||
</table>
|
||||
|
||||
|
||||
|
||||
<a href="#" class="top">top</a>\section TutorialGeoEulerAngles Euler angles
|
||||
<table class="manual">
|
||||
<tr><td style="max-width:30em;">
|
||||
Euler angles might be convenient to create rotation objects.
|
||||
On the other hand, since there exist 24 different conventions, they are pretty confusing to use. This example shows how
|
||||
to create a rotation matrix according to the 2-1-2 convention.</td><td>\code
|
||||
Matrix3f m;
|
||||
m = AngleAxisf(angle1, Vector3f::UnitZ())
|
||||
* * AngleAxisf(angle2, Vector3f::UnitY())
|
||||
* * AngleAxisf(angle3, Vector3f::UnitZ());
|
||||
\endcode</td></tr>
|
||||
</table>
|
||||
|
||||
*/
|
||||
|
||||
}
|
|
@ -1,255 +0,0 @@
|
|||
namespace Eigen {
|
||||
|
||||
/** \eigenManualPage TutorialLinearAlgebra Linear algebra and decompositions
|
||||
|
||||
This page explains how to solve linear systems, compute various decompositions such as LU,
|
||||
QR, %SVD, eigendecompositions... After reading this page, don't miss our
|
||||
\link TopicLinearAlgebraDecompositions catalogue \endlink of dense matrix decompositions.
|
||||
|
||||
\eigenAutoToc
|
||||
|
||||
\section TutorialLinAlgBasicSolve Basic linear solving
|
||||
|
||||
\b The \b problem: You have a system of equations, that you have written as a single matrix equation
|
||||
\f[ Ax \: = \: b \f]
|
||||
Where \a A and \a b are matrices (\a b could be a vector, as a special case). You want to find a solution \a x.
|
||||
|
||||
\b The \b solution: You can choose between various decompositions, depending on what your matrix \a A looks like,
|
||||
and depending on whether you favor speed or accuracy. However, let's start with an example that works in all cases,
|
||||
and is a good compromise:
|
||||
<table class="example">
|
||||
<tr><th>Example:</th><th>Output:</th></tr>
|
||||
<tr>
|
||||
<td>\include TutorialLinAlgExSolveColPivHouseholderQR.cpp </td>
|
||||
<td>\verbinclude TutorialLinAlgExSolveColPivHouseholderQR.out </td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
In this example, the colPivHouseholderQr() method returns an object of class ColPivHouseholderQR. Since here the
|
||||
matrix is of type Matrix3f, this line could have been replaced by:
|
||||
\code
|
||||
ColPivHouseholderQR<Matrix3f> dec(A);
|
||||
Vector3f x = dec.solve(b);
|
||||
\endcode
|
||||
|
||||
Here, ColPivHouseholderQR is a QR decomposition with column pivoting. It's a good compromise for this tutorial, as it
|
||||
works for all matrices while being quite fast. Here is a table of some other decompositions that you can choose from,
|
||||
depending on your matrix and the trade-off you want to make:
|
||||
|
||||
<table class="manual">
|
||||
<tr>
|
||||
<th>Decomposition</th>
|
||||
<th>Method</th>
|
||||
<th>Requirements on the matrix</th>
|
||||
<th>Speed</th>
|
||||
<th>Accuracy</th>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>PartialPivLU</td>
|
||||
<td>partialPivLu()</td>
|
||||
<td>Invertible</td>
|
||||
<td>++</td>
|
||||
<td>+</td>
|
||||
</tr>
|
||||
<tr class="alt">
|
||||
<td>FullPivLU</td>
|
||||
<td>fullPivLu()</td>
|
||||
<td>None</td>
|
||||
<td>-</td>
|
||||
<td>+++</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>HouseholderQR</td>
|
||||
<td>householderQr()</td>
|
||||
<td>None</td>
|
||||
<td>++</td>
|
||||
<td>+</td>
|
||||
</tr>
|
||||
<tr class="alt">
|
||||
<td>ColPivHouseholderQR</td>
|
||||
<td>colPivHouseholderQr()</td>
|
||||
<td>None</td>
|
||||
<td>+</td>
|
||||
<td>++</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>FullPivHouseholderQR</td>
|
||||
<td>fullPivHouseholderQr()</td>
|
||||
<td>None</td>
|
||||
<td>-</td>
|
||||
<td>+++</td>
|
||||
</tr>
|
||||
<tr class="alt">
|
||||
<td>LLT</td>
|
||||
<td>llt()</td>
|
||||
<td>Positive definite</td>
|
||||
<td>+++</td>
|
||||
<td>+</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>LDLT</td>
|
||||
<td>ldlt()</td>
|
||||
<td>Positive or negative semidefinite</td>
|
||||
<td>+++</td>
|
||||
<td>++</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
All of these decompositions offer a solve() method that works as in the above example.
|
||||
|
||||
For example, if your matrix is positive definite, the above table says that a very good
|
||||
choice is then the LDLT decomposition. Here's an example, also demonstrating that using a general
|
||||
matrix (not a vector) as right hand side is possible.
|
||||
|
||||
<table class="example">
|
||||
<tr><th>Example:</th><th>Output:</th></tr>
|
||||
<tr>
|
||||
<td>\include TutorialLinAlgExSolveLDLT.cpp </td>
|
||||
<td>\verbinclude TutorialLinAlgExSolveLDLT.out </td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
For a \ref TopicLinearAlgebraDecompositions "much more complete table" comparing all decompositions supported by Eigen (notice that Eigen
|
||||
supports many other decompositions), see our special page on
|
||||
\ref TopicLinearAlgebraDecompositions "this topic".
|
||||
|
||||
\section TutorialLinAlgSolutionExists Checking if a solution really exists
|
||||
|
||||
Only you know what error margin you want to allow for a solution to be considered valid.
|
||||
So Eigen lets you do this computation for yourself, if you want to, as in this example:
|
||||
|
||||
<table class="example">
|
||||
<tr><th>Example:</th><th>Output:</th></tr>
|
||||
<tr>
|
||||
<td>\include TutorialLinAlgExComputeSolveError.cpp </td>
|
||||
<td>\verbinclude TutorialLinAlgExComputeSolveError.out </td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
\section TutorialLinAlgEigensolving Computing eigenvalues and eigenvectors
|
||||
|
||||
You need an eigendecomposition here, see available such decompositions on \ref TopicLinearAlgebraDecompositions "this page".
|
||||
Make sure to check if your matrix is self-adjoint, as is often the case in these problems. Here's an example using
|
||||
SelfAdjointEigenSolver, it could easily be adapted to general matrices using EigenSolver or ComplexEigenSolver.
|
||||
|
||||
The computation of eigenvalues and eigenvectors does not necessarily converge, but such failure to converge is
|
||||
very rare. The call to info() is to check for this possibility.
|
||||
|
||||
<table class="example">
|
||||
<tr><th>Example:</th><th>Output:</th></tr>
|
||||
<tr>
|
||||
<td>\include TutorialLinAlgSelfAdjointEigenSolver.cpp </td>
|
||||
<td>\verbinclude TutorialLinAlgSelfAdjointEigenSolver.out </td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
\section TutorialLinAlgInverse Computing inverse and determinant
|
||||
|
||||
First of all, make sure that you really want this. While inverse and determinant are fundamental mathematical concepts,
|
||||
in \em numerical linear algebra they are not as popular as in pure mathematics. Inverse computations are often
|
||||
advantageously replaced by solve() operations, and the determinant is often \em not a good way of checking if a matrix
|
||||
is invertible.
|
||||
|
||||
However, for \em very \em small matrices, the above is not true, and inverse and determinant can be very useful.
|
||||
|
||||
While certain decompositions, such as PartialPivLU and FullPivLU, offer inverse() and determinant() methods, you can also
|
||||
call inverse() and determinant() directly on a matrix. If your matrix is of a very small fixed size (at most 4x4) this
|
||||
allows Eigen to avoid performing a LU decomposition, and instead use formulas that are more efficient on such small matrices.
|
||||
|
||||
Here is an example:
|
||||
<table class="example">
|
||||
<tr><th>Example:</th><th>Output:</th></tr>
|
||||
<tr>
|
||||
<td>\include TutorialLinAlgInverseDeterminant.cpp </td>
|
||||
<td>\verbinclude TutorialLinAlgInverseDeterminant.out </td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
\section TutorialLinAlgLeastsquares Least squares solving
|
||||
|
||||
The best way to do least squares solving is with a SVD decomposition. Eigen provides one as the JacobiSVD class, and its solve()
|
||||
is doing least-squares solving.
|
||||
|
||||
Here is an example:
|
||||
<table class="example">
|
||||
<tr><th>Example:</th><th>Output:</th></tr>
|
||||
<tr>
|
||||
<td>\include TutorialLinAlgSVDSolve.cpp </td>
|
||||
<td>\verbinclude TutorialLinAlgSVDSolve.out </td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
Another way, potentially faster but less reliable, is to use a LDLT decomposition
|
||||
of the normal matrix. In any case, just read any reference text on least squares, and it will be very easy for you
|
||||
to implement any linear least squares computation on top of Eigen.
|
||||
|
||||
\section TutorialLinAlgSeparateComputation Separating the computation from the construction
|
||||
|
||||
In the above examples, the decomposition was computed at the same time that the decomposition object was constructed.
|
||||
There are however situations where you might want to separate these two things, for example if you don't know,
|
||||
at the time of the construction, the matrix that you will want to decompose; or if you want to reuse an existing
|
||||
decomposition object.
|
||||
|
||||
What makes this possible is that:
|
||||
\li all decompositions have a default constructor,
|
||||
\li all decompositions have a compute(matrix) method that does the computation, and that may be called again
|
||||
on an already-computed decomposition, reinitializing it.
|
||||
|
||||
For example:
|
||||
|
||||
<table class="example">
|
||||
<tr><th>Example:</th><th>Output:</th></tr>
|
||||
<tr>
|
||||
<td>\include TutorialLinAlgComputeTwice.cpp </td>
|
||||
<td>\verbinclude TutorialLinAlgComputeTwice.out </td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
Finally, you can tell the decomposition constructor to preallocate storage for decomposing matrices of a given size,
|
||||
so that when you subsequently decompose such matrices, no dynamic memory allocation is performed (of course, if you
|
||||
are using fixed-size matrices, no dynamic memory allocation happens at all). This is done by just
|
||||
passing the size to the decomposition constructor, as in this example:
|
||||
\code
|
||||
HouseholderQR<MatrixXf> qr(50,50);
|
||||
MatrixXf A = MatrixXf::Random(50,50);
|
||||
qr.compute(A); // no dynamic memory allocation
|
||||
\endcode
|
||||
|
||||
\section TutorialLinAlgRankRevealing Rank-revealing decompositions
|
||||
|
||||
Certain decompositions are rank-revealing, i.e. are able to compute the rank of a matrix. These are typically
|
||||
also the decompositions that behave best in the face of a non-full-rank matrix (which in the square case means a
|
||||
singular matrix). On \ref TopicLinearAlgebraDecompositions "this table" you can see for all our decompositions
|
||||
whether they are rank-revealing or not.
|
||||
|
||||
Rank-revealing decompositions offer at least a rank() method. They can also offer convenience methods such as isInvertible(),
|
||||
and some are also providing methods to compute the kernel (null-space) and image (column-space) of the matrix, as is the
|
||||
case with FullPivLU:
|
||||
|
||||
<table class="example">
|
||||
<tr><th>Example:</th><th>Output:</th></tr>
|
||||
<tr>
|
||||
<td>\include TutorialLinAlgRankRevealing.cpp </td>
|
||||
<td>\verbinclude TutorialLinAlgRankRevealing.out </td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
Of course, any rank computation depends on the choice of an arbitrary threshold, since practically no
|
||||
floating-point matrix is \em exactly rank-deficient. Eigen picks a sensible default threshold, which depends
|
||||
on the decomposition but is typically the diagonal size times machine epsilon. While this is the best default we
|
||||
could pick, only you know what is the right threshold for your application. You can set this by calling setThreshold()
|
||||
on your decomposition object before calling rank() or any other method that needs to use such a threshold.
|
||||
The decomposition itself, i.e. the compute() method, is independent of the threshold. You don't need to recompute the
|
||||
decomposition after you've changed the threshold.
|
||||
|
||||
<table class="example">
|
||||
<tr><th>Example:</th><th>Output:</th></tr>
|
||||
<tr>
|
||||
<td>\include TutorialLinAlgSetThreshold.cpp </td>
|
||||
<td>\verbinclude TutorialLinAlgSetThreshold.out </td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
*/
|
||||
|
||||
}
|
|
@ -1,86 +0,0 @@
|
|||
namespace Eigen {
|
||||
|
||||
/** \eigenManualPage TutorialMapClass Interfacing with raw buffers: the Map class
|
||||
|
||||
This page explains how to work with "raw" C/C++ arrays.
|
||||
This can be useful in a variety of contexts, particularly when "importing" vectors and matrices from other libraries into %Eigen.
|
||||
|
||||
\eigenAutoToc
|
||||
|
||||
\section TutorialMapIntroduction Introduction
|
||||
|
||||
Occasionally you may have a pre-defined array of numbers that you want to use within %Eigen as a vector or matrix. While one option is to make a copy of the data, most commonly you probably want to re-use this memory as an %Eigen type. Fortunately, this is very easy with the Map class.
|
||||
|
||||
\section TutorialMapTypes Map types and declaring Map variables
|
||||
|
||||
A Map object has a type defined by its %Eigen equivalent:
|
||||
\code
|
||||
Map<Matrix<typename Scalar, int RowsAtCompileTime, int ColsAtCompileTime> >
|
||||
\endcode
|
||||
Note that, in this default case, a Map requires just a single template parameter.
|
||||
|
||||
To construct a Map variable, you need two other pieces of information: a pointer to the region of memory defining the array of coefficients, and the desired shape of the matrix or vector. For example, to define a matrix of \c float with sizes determined at compile time, you might do the following:
|
||||
\code
|
||||
Map<MatrixXf> mf(pf,rows,columns);
|
||||
\endcode
|
||||
where \c pf is a \c float \c * pointing to the array of memory. A fixed-size read-only vector of integers might be declared as
|
||||
\code
|
||||
Map<const Vector4i> mi(pi);
|
||||
\endcode
|
||||
where \c pi is an \c int \c *. In this case the size does not have to be passed to the constructor, because it is already specified by the Matrix/Array type.
|
||||
|
||||
Note that Map does not have a default constructor; you \em must pass a pointer to intialize the object. However, you can work around this requirement (see \ref TutorialMapPlacementNew).
|
||||
|
||||
Map is flexible enough to accomodate a variety of different data representations. There are two other (optional) template parameters:
|
||||
\code
|
||||
Map<typename MatrixType,
|
||||
int MapOptions,
|
||||
typename StrideType>
|
||||
\endcode
|
||||
\li \c MapOptions specifies whether the pointer is \c #Aligned, or \c #Unaligned. The default is \c #Unaligned.
|
||||
\li \c StrideType allows you to specify a custom layout for the memory array, using the Stride class. One example would be to specify that the data array is organized in row-major format:
|
||||
<table class="example">
|
||||
<tr><th>Example:</th><th>Output:</th></tr>
|
||||
<tr>
|
||||
<td>\include Tutorial_Map_rowmajor.cpp </td>
|
||||
<td>\verbinclude Tutorial_Map_rowmajor.out </td>
|
||||
</table>
|
||||
However, Stride is even more flexible than this; for details, see the documentation for the Map and Stride classes.
|
||||
|
||||
\section TutorialMapUsing Using Map variables
|
||||
|
||||
You can use a Map object just like any other %Eigen type:
|
||||
<table class="example">
|
||||
<tr><th>Example:</th><th>Output:</th></tr>
|
||||
<tr>
|
||||
<td>\include Tutorial_Map_using.cpp </td>
|
||||
<td>\verbinclude Tutorial_Map_using.out </td>
|
||||
</table>
|
||||
|
||||
All %Eigen functions are written to accept Map objects just like other %Eigen types. However, when writing your own functions taking %Eigen types, this does \em not happen automatically: a Map type is not identical to its Dense equivalent. See \ref TopicFunctionTakingEigenTypes for details.
|
||||
|
||||
\section TutorialMapPlacementNew Changing the mapped array
|
||||
|
||||
It is possible to change the array of a Map object after declaration, using the C++ "placement new" syntax:
|
||||
<table class="example">
|
||||
<tr><th>Example:</th><th>Output:</th></tr>
|
||||
<tr>
|
||||
<td>\include Map_placement_new.cpp </td>
|
||||
<td>\verbinclude Map_placement_new.out </td>
|
||||
</table>
|
||||
Despite appearances, this does not invoke the memory allocator, because the syntax specifies the location for storing the result.
|
||||
|
||||
This syntax makes it possible to declare a Map object without first knowing the mapped array's location in memory:
|
||||
\code
|
||||
Map<Matrix3f> A(NULL); // don't try to use this matrix yet!
|
||||
VectorXf b(n_matrices);
|
||||
for (int i = 0; i < n_matrices; i++)
|
||||
{
|
||||
new (&A) Map<Matrix3f>(get_matrix_pointer(i));
|
||||
b(i) = A.trace();
|
||||
}
|
||||
\endcode
|
||||
|
||||
*/
|
||||
|
||||
}
|
|
@ -1,214 +0,0 @@
|
|||
namespace Eigen {
|
||||
|
||||
/** \eigenManualPage TutorialMatrixArithmetic Matrix and vector arithmetic
|
||||
|
||||
This page aims to provide an overview and some details on how to perform arithmetic
|
||||
between matrices, vectors and scalars with Eigen.
|
||||
|
||||
\eigenAutoToc
|
||||
|
||||
\section TutorialArithmeticIntroduction Introduction
|
||||
|
||||
Eigen offers matrix/vector arithmetic operations either through overloads of common C++ arithmetic operators such as +, -, *,
|
||||
or through special methods such as dot(), cross(), etc.
|
||||
For the Matrix class (matrices and vectors), operators are only overloaded to support
|
||||
linear-algebraic operations. For example, \c matrix1 \c * \c matrix2 means matrix-matrix product,
|
||||
and \c vector \c + \c scalar is just not allowed. If you want to perform all kinds of array operations,
|
||||
not linear algebra, see the \ref TutorialArrayClass "next page".
|
||||
|
||||
\section TutorialArithmeticAddSub Addition and subtraction
|
||||
|
||||
The left hand side and right hand side must, of course, have the same numbers of rows and of columns. They must
|
||||
also have the same \c Scalar type, as Eigen doesn't do automatic type promotion. The operators at hand here are:
|
||||
\li binary operator + as in \c a+b
|
||||
\li binary operator - as in \c a-b
|
||||
\li unary operator - as in \c -a
|
||||
\li compound operator += as in \c a+=b
|
||||
\li compound operator -= as in \c a-=b
|
||||
|
||||
<table class="example">
|
||||
<tr><th>Example:</th><th>Output:</th></tr>
|
||||
<tr><td>
|
||||
\include tut_arithmetic_add_sub.cpp
|
||||
</td>
|
||||
<td>
|
||||
\verbinclude tut_arithmetic_add_sub.out
|
||||
</td></tr></table>
|
||||
|
||||
\section TutorialArithmeticScalarMulDiv Scalar multiplication and division
|
||||
|
||||
Multiplication and division by a scalar is very simple too. The operators at hand here are:
|
||||
\li binary operator * as in \c matrix*scalar
|
||||
\li binary operator * as in \c scalar*matrix
|
||||
\li binary operator / as in \c matrix/scalar
|
||||
\li compound operator *= as in \c matrix*=scalar
|
||||
\li compound operator /= as in \c matrix/=scalar
|
||||
|
||||
<table class="example">
|
||||
<tr><th>Example:</th><th>Output:</th></tr>
|
||||
<tr><td>
|
||||
\include tut_arithmetic_scalar_mul_div.cpp
|
||||
</td>
|
||||
<td>
|
||||
\verbinclude tut_arithmetic_scalar_mul_div.out
|
||||
</td></tr></table>
|
||||
|
||||
|
||||
\section TutorialArithmeticMentionXprTemplates A note about expression templates
|
||||
|
||||
This is an advanced topic that we explain on \ref TopicEigenExpressionTemplates "this page",
|
||||
but it is useful to just mention it now. In Eigen, arithmetic operators such as \c operator+ don't
|
||||
perform any computation by themselves, they just return an "expression object" describing the computation to be
|
||||
performed. The actual computation happens later, when the whole expression is evaluated, typically in \c operator=.
|
||||
While this might sound heavy, any modern optimizing compiler is able to optimize away that abstraction and
|
||||
the result is perfectly optimized code. For example, when you do:
|
||||
\code
|
||||
VectorXf a(50), b(50), c(50), d(50);
|
||||
...
|
||||
a = 3*b + 4*c + 5*d;
|
||||
\endcode
|
||||
Eigen compiles it to just one for loop, so that the arrays are traversed only once. Simplifying (e.g. ignoring
|
||||
SIMD optimizations), this loop looks like this:
|
||||
\code
|
||||
for(int i = 0; i < 50; ++i)
|
||||
a[i] = 3*b[i] + 4*c[i] + 5*d[i];
|
||||
\endcode
|
||||
Thus, you should not be afraid of using relatively large arithmetic expressions with Eigen: it only gives Eigen
|
||||
more opportunities for optimization.
|
||||
|
||||
\section TutorialArithmeticTranspose Transposition and conjugation
|
||||
|
||||
The transpose \f$ a^T \f$, conjugate \f$ \bar{a} \f$, and adjoint (i.e., conjugate transpose) \f$ a^* \f$ of a matrix or vector \f$ a \f$ are obtained by the member functions \link DenseBase::transpose() transpose()\endlink, \link MatrixBase::conjugate() conjugate()\endlink, and \link MatrixBase::adjoint() adjoint()\endlink, respectively.
|
||||
|
||||
<table class="example">
|
||||
<tr><th>Example:</th><th>Output:</th></tr>
|
||||
<tr><td>
|
||||
\include tut_arithmetic_transpose_conjugate.cpp
|
||||
</td>
|
||||
<td>
|
||||
\verbinclude tut_arithmetic_transpose_conjugate.out
|
||||
</td></tr></table>
|
||||
|
||||
For real matrices, \c conjugate() is a no-operation, and so \c adjoint() is equivalent to \c transpose().
|
||||
|
||||
As for basic arithmetic operators, \c transpose() and \c adjoint() simply return a proxy object without doing the actual transposition. If you do <tt>b = a.transpose()</tt>, then the transpose is evaluated at the same time as the result is written into \c b. However, there is a complication here. If you do <tt>a = a.transpose()</tt>, then Eigen starts writing the result into \c a before the evaluation of the transpose is finished. Therefore, the instruction <tt>a = a.transpose()</tt> does not replace \c a with its transpose, as one would expect:
|
||||
<table class="example">
|
||||
<tr><th>Example:</th><th>Output:</th></tr>
|
||||
<tr><td>
|
||||
\include tut_arithmetic_transpose_aliasing.cpp
|
||||
</td>
|
||||
<td>
|
||||
\verbinclude tut_arithmetic_transpose_aliasing.out
|
||||
</td></tr></table>
|
||||
This is the so-called \ref TopicAliasing "aliasing issue". In "debug mode", i.e., when \ref TopicAssertions "assertions" have not been disabled, such common pitfalls are automatically detected.
|
||||
|
||||
For \em in-place transposition, as for instance in <tt>a = a.transpose()</tt>, simply use the \link DenseBase::transposeInPlace() transposeInPlace()\endlink function:
|
||||
<table class="example">
|
||||
<tr><th>Example:</th><th>Output:</th></tr>
|
||||
<tr><td>
|
||||
\include tut_arithmetic_transpose_inplace.cpp
|
||||
</td>
|
||||
<td>
|
||||
\verbinclude tut_arithmetic_transpose_inplace.out
|
||||
</td></tr></table>
|
||||
There is also the \link MatrixBase::adjointInPlace() adjointInPlace()\endlink function for complex matrices.
|
||||
|
||||
\section TutorialArithmeticMatrixMul Matrix-matrix and matrix-vector multiplication
|
||||
|
||||
Matrix-matrix multiplication is again done with \c operator*. Since vectors are a special
|
||||
case of matrices, they are implicitly handled there too, so matrix-vector product is really just a special
|
||||
case of matrix-matrix product, and so is vector-vector outer product. Thus, all these cases are handled by just
|
||||
two operators:
|
||||
\li binary operator * as in \c a*b
|
||||
\li compound operator *= as in \c a*=b (this multiplies on the right: \c a*=b is equivalent to <tt>a = a*b</tt>)
|
||||
|
||||
<table class="example">
|
||||
<tr><th>Example:</th><th>Output:</th></tr>
|
||||
<tr><td>
|
||||
\include tut_arithmetic_matrix_mul.cpp
|
||||
</td>
|
||||
<td>
|
||||
\verbinclude tut_arithmetic_matrix_mul.out
|
||||
</td></tr></table>
|
||||
|
||||
Note: if you read the above paragraph on expression templates and are worried that doing \c m=m*m might cause
|
||||
aliasing issues, be reassured for now: Eigen treats matrix multiplication as a special case and takes care of
|
||||
introducing a temporary here, so it will compile \c m=m*m as:
|
||||
\code
|
||||
tmp = m*m;
|
||||
m = tmp;
|
||||
\endcode
|
||||
If you know your matrix product can be safely evaluated into the destination matrix without aliasing issue, then you can use the \link MatrixBase::noalias() noalias()\endlink function to avoid the temporary, e.g.:
|
||||
\code
|
||||
c.noalias() += a * b;
|
||||
\endcode
|
||||
For more details on this topic, see the page on \ref TopicAliasing "aliasing".
|
||||
|
||||
\b Note: for BLAS users worried about performance, expressions such as <tt>c.noalias() -= 2 * a.adjoint() * b;</tt> are fully optimized and trigger a single gemm-like function call.
|
||||
|
||||
\section TutorialArithmeticDotAndCross Dot product and cross product
|
||||
|
||||
For dot product and cross product, you need the \link MatrixBase::dot() dot()\endlink and \link MatrixBase::cross() cross()\endlink methods. Of course, the dot product can also be obtained as a 1x1 matrix as u.adjoint()*v.
|
||||
<table class="example">
|
||||
<tr><th>Example:</th><th>Output:</th></tr>
|
||||
<tr><td>
|
||||
\include tut_arithmetic_dot_cross.cpp
|
||||
</td>
|
||||
<td>
|
||||
\verbinclude tut_arithmetic_dot_cross.out
|
||||
</td></tr></table>
|
||||
|
||||
Remember that cross product is only for vectors of size 3. Dot product is for vectors of any sizes.
|
||||
When using complex numbers, Eigen's dot product is conjugate-linear in the first variable and linear in the
|
||||
second variable.
|
||||
|
||||
\section TutorialArithmeticRedux Basic arithmetic reduction operations
|
||||
Eigen also provides some reduction operations to reduce a given matrix or vector to a single value such as the sum (computed by \link DenseBase::sum() sum()\endlink), product (\link DenseBase::prod() prod()\endlink), or the maximum (\link DenseBase::maxCoeff() maxCoeff()\endlink) and minimum (\link DenseBase::minCoeff() minCoeff()\endlink) of all its coefficients.
|
||||
|
||||
<table class="example">
|
||||
<tr><th>Example:</th><th>Output:</th></tr>
|
||||
<tr><td>
|
||||
\include tut_arithmetic_redux_basic.cpp
|
||||
</td>
|
||||
<td>
|
||||
\verbinclude tut_arithmetic_redux_basic.out
|
||||
</td></tr></table>
|
||||
|
||||
The \em trace of a matrix, as returned by the function \link MatrixBase::trace() trace()\endlink, is the sum of the diagonal coefficients and can also be computed as efficiently using <tt>a.diagonal().sum()</tt>, as we will see later on.
|
||||
|
||||
There also exist variants of the \c minCoeff and \c maxCoeff functions returning the coordinates of the respective coefficient via the arguments:
|
||||
|
||||
<table class="example">
|
||||
<tr><th>Example:</th><th>Output:</th></tr>
|
||||
<tr><td>
|
||||
\include tut_arithmetic_redux_minmax.cpp
|
||||
</td>
|
||||
<td>
|
||||
\verbinclude tut_arithmetic_redux_minmax.out
|
||||
</td></tr></table>
|
||||
|
||||
|
||||
\section TutorialArithmeticValidity Validity of operations
|
||||
Eigen checks the validity of the operations that you perform. When possible,
|
||||
it checks them at compile time, producing compilation errors. These error messages can be long and ugly,
|
||||
but Eigen writes the important message in UPPERCASE_LETTERS_SO_IT_STANDS_OUT. For example:
|
||||
\code
|
||||
Matrix3f m;
|
||||
Vector4f v;
|
||||
v = m*v; // Compile-time error: YOU_MIXED_MATRICES_OF_DIFFERENT_SIZES
|
||||
\endcode
|
||||
|
||||
Of course, in many cases, for example when checking dynamic sizes, the check cannot be performed at compile time.
|
||||
Eigen then uses runtime assertions. This means that the program will abort with an error message when executing an illegal operation if it is run in "debug mode", and it will probably crash if assertions are turned off.
|
||||
|
||||
\code
|
||||
MatrixXf m(3,3);
|
||||
VectorXf v(4);
|
||||
v = m * v; // Run-time assertion failure here: "invalid matrix product"
|
||||
\endcode
|
||||
|
||||
For more details on this topic, see \ref TopicAssertions "this page".
|
||||
|
||||
*/
|
||||
|
||||
}
|
|
@ -1,265 +0,0 @@
|
|||
namespace Eigen {
|
||||
|
||||
/** \eigenManualPage TutorialMatrixClass The Matrix class
|
||||
|
||||
\eigenAutoToc
|
||||
|
||||
In Eigen, all matrices and vectors are objects of the Matrix template class.
|
||||
Vectors are just a special case of matrices, with either 1 row or 1 column.
|
||||
|
||||
\section TutorialMatrixFirst3Params The first three template parameters of Matrix
|
||||
|
||||
The Matrix class takes six template parameters, but for now it's enough to
|
||||
learn about the first three first parameters. The three remaining parameters have default
|
||||
values, which for now we will leave untouched, and which we
|
||||
\ref TutorialMatrixOptTemplParams "discuss below".
|
||||
|
||||
The three mandatory template parameters of Matrix are:
|
||||
\code
|
||||
Matrix<typename Scalar, int RowsAtCompileTime, int ColsAtCompileTime>
|
||||
\endcode
|
||||
\li \c Scalar is the scalar type, i.e. the type of the coefficients.
|
||||
That is, if you want a matrix of floats, choose \c float here.
|
||||
See \ref TopicScalarTypes "Scalar types" for a list of all supported
|
||||
scalar types and for how to extend support to new types.
|
||||
\li \c RowsAtCompileTime and \c ColsAtCompileTime are the number of rows
|
||||
and columns of the matrix as known at compile time (see
|
||||
\ref TutorialMatrixDynamic "below" for what to do if the number is not
|
||||
known at compile time).
|
||||
|
||||
We offer a lot of convenience typedefs to cover the usual cases. For example, \c Matrix4f is
|
||||
a 4x4 matrix of floats. Here is how it is defined by Eigen:
|
||||
\code
|
||||
typedef Matrix<float, 4, 4> Matrix4f;
|
||||
\endcode
|
||||
We discuss \ref TutorialMatrixTypedefs "below" these convenience typedefs.
|
||||
|
||||
\section TutorialMatrixVectors Vectors
|
||||
|
||||
As mentioned above, in Eigen, vectors are just a special case of
|
||||
matrices, with either 1 row or 1 column. The case where they have 1 column is the most common;
|
||||
such vectors are called column-vectors, often abbreviated as just vectors. In the other case
|
||||
where they have 1 row, they are called row-vectors.
|
||||
|
||||
For example, the convenience typedef \c Vector3f is a (column) vector of 3 floats. It is defined as follows by Eigen:
|
||||
\code
|
||||
typedef Matrix<float, 3, 1> Vector3f;
|
||||
\endcode
|
||||
We also offer convenience typedefs for row-vectors, for example:
|
||||
\code
|
||||
typedef Matrix<int, 1, 2> RowVector2i;
|
||||
\endcode
|
||||
|
||||
\section TutorialMatrixDynamic The special value Dynamic
|
||||
|
||||
Of course, Eigen is not limited to matrices whose dimensions are known at compile time.
|
||||
The \c RowsAtCompileTime and \c ColsAtCompileTime template parameters can take the special
|
||||
value \c Dynamic which indicates that the size is unknown at compile time, so must
|
||||
be handled as a run-time variable. In Eigen terminology, such a size is referred to as a
|
||||
\em dynamic \em size; while a size that is known at compile time is called a
|
||||
\em fixed \em size. For example, the convenience typedef \c MatrixXd, meaning
|
||||
a matrix of doubles with dynamic size, is defined as follows:
|
||||
\code
|
||||
typedef Matrix<double, Dynamic, Dynamic> MatrixXd;
|
||||
\endcode
|
||||
And similarly, we define a self-explanatory typedef \c VectorXi as follows:
|
||||
\code
|
||||
typedef Matrix<int, Dynamic, 1> VectorXi;
|
||||
\endcode
|
||||
You can perfectly have e.g. a fixed number of rows with a dynamic number of columns, as in:
|
||||
\code
|
||||
Matrix<float, 3, Dynamic>
|
||||
\endcode
|
||||
|
||||
\section TutorialMatrixConstructors Constructors
|
||||
|
||||
A default constructor is always available, never performs any dynamic memory allocation, and never initializes the matrix coefficients. You can do:
|
||||
\code
|
||||
Matrix3f a;
|
||||
MatrixXf b;
|
||||
\endcode
|
||||
Here,
|
||||
\li \c a is a 3-by-3 matrix, with a plain float[9] array of uninitialized coefficients,
|
||||
\li \c b is a dynamic-size matrix whose size is currently 0-by-0, and whose array of
|
||||
coefficients hasn't yet been allocated at all.
|
||||
|
||||
Constructors taking sizes are also available. For matrices, the number of rows is always passed first.
|
||||
For vectors, just pass the vector size. They allocate the array of coefficients
|
||||
with the given size, but don't initialize the coefficients themselves:
|
||||
\code
|
||||
MatrixXf a(10,15);
|
||||
VectorXf b(30);
|
||||
\endcode
|
||||
Here,
|
||||
\li \c a is a 10x15 dynamic-size matrix, with allocated but currently uninitialized coefficients.
|
||||
\li \c b is a dynamic-size vector of size 30, with allocated but currently uninitialized coefficients.
|
||||
|
||||
In order to offer a uniform API across fixed-size and dynamic-size matrices, it is legal to use these
|
||||
constructors on fixed-size matrices, even if passing the sizes is useless in this case. So this is legal:
|
||||
\code
|
||||
Matrix3f a(3,3);
|
||||
\endcode
|
||||
and is a no-operation.
|
||||
|
||||
Finally, we also offer some constructors to initialize the coefficients of small fixed-size vectors up to size 4:
|
||||
\code
|
||||
Vector2d a(5.0, 6.0);
|
||||
Vector3d b(5.0, 6.0, 7.0);
|
||||
Vector4d c(5.0, 6.0, 7.0, 8.0);
|
||||
\endcode
|
||||
|
||||
\section TutorialMatrixCoeffAccessors Coefficient accessors
|
||||
|
||||
The primary coefficient accessors and mutators in Eigen are the overloaded parenthesis operators.
|
||||
For matrices, the row index is always passed first. For vectors, just pass one index.
|
||||
The numbering starts at 0. This example is self-explanatory:
|
||||
|
||||
<table class="example">
|
||||
<tr><th>Example:</th><th>Output:</th></tr>
|
||||
<tr><td>
|
||||
\include tut_matrix_coefficient_accessors.cpp
|
||||
</td>
|
||||
<td>
|
||||
\verbinclude tut_matrix_coefficient_accessors.out
|
||||
</td></tr></table>
|
||||
|
||||
Note that the syntax <tt> m(index) </tt>
|
||||
is not restricted to vectors, it is also available for general matrices, meaning index-based access
|
||||
in the array of coefficients. This however depends on the matrix's storage order. All Eigen matrices default to
|
||||
column-major storage order, but this can be changed to row-major, see \ref TopicStorageOrders "Storage orders".
|
||||
|
||||
The operator[] is also overloaded for index-based access in vectors, but keep in mind that C++ doesn't allow operator[] to
|
||||
take more than one argument. We restrict operator[] to vectors, because an awkwardness in the C++ language
|
||||
would make matrix[i,j] compile to the same thing as matrix[j] !
|
||||
|
||||
\section TutorialMatrixCommaInitializer Comma-initialization
|
||||
|
||||
%Matrix and vector coefficients can be conveniently set using the so-called \em comma-initializer syntax.
|
||||
For now, it is enough to know this example:
|
||||
|
||||
<table class="example">
|
||||
<tr><th>Example:</th><th>Output:</th></tr>
|
||||
<tr>
|
||||
<td>\include Tutorial_commainit_01.cpp </td>
|
||||
<td>\verbinclude Tutorial_commainit_01.out </td>
|
||||
</tr></table>
|
||||
|
||||
|
||||
The right-hand side can also contain matrix expressions as discussed in \ref TutorialAdvancedInitialization "this page".
|
||||
|
||||
\section TutorialMatrixSizesResizing Resizing
|
||||
|
||||
The current size of a matrix can be retrieved by \link EigenBase::rows() rows()\endlink, \link EigenBase::cols() cols() \endlink and \link EigenBase::size() size()\endlink. These methods return the number of rows, the number of columns and the number of coefficients, respectively. Resizing a dynamic-size matrix is done by the \link PlainObjectBase::resize(Index,Index) resize() \endlink method.
|
||||
|
||||
<table class="example">
|
||||
<tr><th>Example:</th><th>Output:</th></tr>
|
||||
<tr>
|
||||
<td>\include tut_matrix_resize.cpp </td>
|
||||
<td>\verbinclude tut_matrix_resize.out </td>
|
||||
</tr></table>
|
||||
|
||||
The resize() method is a no-operation if the actual matrix size doesn't change; otherwise it is destructive: the values of the coefficients may change.
|
||||
If you want a conservative variant of resize() which does not change the coefficients, use \link PlainObjectBase::conservativeResize() conservativeResize()\endlink, see \ref TopicResizing "this page" for more details.
|
||||
|
||||
All these methods are still available on fixed-size matrices, for the sake of API uniformity. Of course, you can't actually
|
||||
resize a fixed-size matrix. Trying to change a fixed size to an actually different value will trigger an assertion failure;
|
||||
but the following code is legal:
|
||||
|
||||
<table class="example">
|
||||
<tr><th>Example:</th><th>Output:</th></tr>
|
||||
<tr>
|
||||
<td>\include tut_matrix_resize_fixed_size.cpp </td>
|
||||
<td>\verbinclude tut_matrix_resize_fixed_size.out </td>
|
||||
</tr></table>
|
||||
|
||||
|
||||
\section TutorialMatrixAssignment Assignment and resizing
|
||||
|
||||
Assignment is the action of copying a matrix into another, using \c operator=. Eigen resizes the matrix on the left-hand side automatically so that it matches the size of the matrix on the right-hand size. For example:
|
||||
|
||||
<table class="example">
|
||||
<tr><th>Example:</th><th>Output:</th></tr>
|
||||
<tr>
|
||||
<td>\include tut_matrix_assignment_resizing.cpp </td>
|
||||
<td>\verbinclude tut_matrix_assignment_resizing.out </td>
|
||||
</tr></table>
|
||||
|
||||
Of course, if the left-hand side is of fixed size, resizing it is not allowed.
|
||||
|
||||
If you do not want this automatic resizing to happen (for example for debugging purposes), you can disable it, see
|
||||
\ref TopicResizing "this page".
|
||||
|
||||
|
||||
\section TutorialMatrixFixedVsDynamic Fixed vs. Dynamic size
|
||||
|
||||
When should one use fixed sizes (e.g. \c Matrix4f), and when should one prefer dynamic sizes (e.g. \c MatrixXf)?
|
||||
The simple answer is: use fixed
|
||||
sizes for very small sizes where you can, and use dynamic sizes for larger sizes or where you have to. For small sizes,
|
||||
especially for sizes smaller than (roughly) 16, using fixed sizes is hugely beneficial
|
||||
to performance, as it allows Eigen to avoid dynamic memory allocation and to unroll
|
||||
loops. Internally, a fixed-size Eigen matrix is just a plain array, i.e. doing
|
||||
\code Matrix4f mymatrix; \endcode
|
||||
really amounts to just doing
|
||||
\code float mymatrix[16]; \endcode
|
||||
so this really has zero runtime cost. By contrast, the array of a dynamic-size matrix
|
||||
is always allocated on the heap, so doing
|
||||
\code MatrixXf mymatrix(rows,columns); \endcode
|
||||
amounts to doing
|
||||
\code float *mymatrix = new float[rows*columns]; \endcode
|
||||
and in addition to that, the MatrixXf object stores its number of rows and columns as
|
||||
member variables.
|
||||
|
||||
The limitation of using fixed sizes, of course, is that this is only possible
|
||||
when you know the sizes at compile time. Also, for large enough sizes, say for sizes
|
||||
greater than (roughly) 32, the performance benefit of using fixed sizes becomes negligible.
|
||||
Worse, trying to create a very large matrix using fixed sizes inside a function could result in a
|
||||
stack overflow, since Eigen will try to allocate the array automatically as a local variable, and
|
||||
this is normally done on the stack.
|
||||
Finally, depending on circumstances, Eigen can also be more aggressive trying to vectorize
|
||||
(use SIMD instructions) when dynamic sizes are used, see \ref TopicVectorization "Vectorization".
|
||||
|
||||
\section TutorialMatrixOptTemplParams Optional template parameters
|
||||
|
||||
We mentioned at the beginning of this page that the Matrix class takes six template parameters,
|
||||
but so far we only discussed the first three. The remaining three parameters are optional. Here is
|
||||
the complete list of template parameters:
|
||||
\code
|
||||
Matrix<typename Scalar,
|
||||
int RowsAtCompileTime,
|
||||
int ColsAtCompileTime,
|
||||
int Options = 0,
|
||||
int MaxRowsAtCompileTime = RowsAtCompileTime,
|
||||
int MaxColsAtCompileTime = ColsAtCompileTime>
|
||||
\endcode
|
||||
\li \c Options is a bit field. Here, we discuss only one bit: \c RowMajor. It specifies that the matrices
|
||||
of this type use row-major storage order; by default, the storage order is column-major. See the page on
|
||||
\ref TopicStorageOrders "storage orders". For example, this type means row-major 3x3 matrices:
|
||||
\code
|
||||
Matrix<float, 3, 3, RowMajor>
|
||||
\endcode
|
||||
\li \c MaxRowsAtCompileTime and \c MaxColsAtCompileTime are useful when you want to specify that, even though
|
||||
the exact sizes of your matrices are not known at compile time, a fixed upper bound is known at
|
||||
compile time. The biggest reason why you might want to do that is to avoid dynamic memory allocation.
|
||||
For example the following matrix type uses a plain array of 12 floats, without dynamic memory allocation:
|
||||
\code
|
||||
Matrix<float, Dynamic, Dynamic, 0, 3, 4>
|
||||
\endcode
|
||||
|
||||
\section TutorialMatrixTypedefs Convenience typedefs
|
||||
|
||||
Eigen defines the following Matrix typedefs:
|
||||
\li MatrixNt for Matrix<type, N, N>. For example, MatrixXi for Matrix<int, Dynamic, Dynamic>.
|
||||
\li VectorNt for Matrix<type, N, 1>. For example, Vector2f for Matrix<float, 2, 1>.
|
||||
\li RowVectorNt for Matrix<type, 1, N>. For example, RowVector3d for Matrix<double, 1, 3>.
|
||||
|
||||
Where:
|
||||
\li N can be any one of \c 2, \c 3, \c 4, or \c X (meaning \c Dynamic).
|
||||
\li t can be any one of \c i (meaning int), \c f (meaning float), \c d (meaning double),
|
||||
\c cf (meaning complex<float>), or \c cd (meaning complex<double>). The fact that typedefs are only
|
||||
defined for these five types doesn't mean that they are the only supported scalar types. For example,
|
||||
all standard integer types are supported, see \ref TopicScalarTypes "Scalar types".
|
||||
|
||||
|
||||
*/
|
||||
|
||||
}
|
|
@ -1,257 +0,0 @@
|
|||
namespace Eigen {
|
||||
|
||||
/** \eigenManualPage TutorialReductionsVisitorsBroadcasting Reductions, visitors and broadcasting
|
||||
|
||||
This page explains Eigen's reductions, visitors and broadcasting and how they are used with
|
||||
\link MatrixBase matrices \endlink and \link ArrayBase arrays \endlink.
|
||||
|
||||
\eigenAutoToc
|
||||
|
||||
\section TutorialReductionsVisitorsBroadcastingReductions Reductions
|
||||
In Eigen, a reduction is a function taking a matrix or array, and returning a single
|
||||
scalar value. One of the most used reductions is \link DenseBase::sum() .sum() \endlink,
|
||||
returning the sum of all the coefficients inside a given matrix or array.
|
||||
|
||||
<table class="example">
|
||||
<tr><th>Example:</th><th>Output:</th></tr>
|
||||
<tr><td>
|
||||
\include tut_arithmetic_redux_basic.cpp
|
||||
</td>
|
||||
<td>
|
||||
\verbinclude tut_arithmetic_redux_basic.out
|
||||
</td></tr></table>
|
||||
|
||||
The \em trace of a matrix, as returned by the function \c trace(), is the sum of the diagonal coefficients and can equivalently be computed <tt>a.diagonal().sum()</tt>.
|
||||
|
||||
|
||||
\subsection TutorialReductionsVisitorsBroadcastingReductionsNorm Norm computations
|
||||
|
||||
The (Euclidean a.k.a. \f$\ell^2\f$) squared norm of a vector can be obtained \link MatrixBase::squaredNorm() squaredNorm() \endlink. It is equal to the dot product of the vector by itself, and equivalently to the sum of squared absolute values of its coefficients.
|
||||
|
||||
Eigen also provides the \link MatrixBase::norm() norm() \endlink method, which returns the square root of \link MatrixBase::squaredNorm() squaredNorm() \endlink.
|
||||
|
||||
These operations can also operate on matrices; in that case, a n-by-p matrix is seen as a vector of size (n*p), so for example the \link MatrixBase::norm() norm() \endlink method returns the "Frobenius" or "Hilbert-Schmidt" norm. We refrain from speaking of the \f$\ell^2\f$ norm of a matrix because that can mean different things.
|
||||
|
||||
If you want other \f$\ell^p\f$ norms, use the \link MatrixBase::lpNorm() lpNnorm<p>() \endlink method. The template parameter \a p can take the special value \a Infinity if you want the \f$\ell^\infty\f$ norm, which is the maximum of the absolute values of the coefficients.
|
||||
|
||||
The following example demonstrates these methods.
|
||||
|
||||
<table class="example">
|
||||
<tr><th>Example:</th><th>Output:</th></tr>
|
||||
<tr><td>
|
||||
\include Tutorial_ReductionsVisitorsBroadcasting_reductions_norm.cpp
|
||||
</td>
|
||||
<td>
|
||||
\verbinclude Tutorial_ReductionsVisitorsBroadcasting_reductions_norm.out
|
||||
</td></tr></table>
|
||||
|
||||
\subsection TutorialReductionsVisitorsBroadcastingReductionsBool Boolean reductions
|
||||
|
||||
The following reductions operate on boolean values:
|
||||
- \link DenseBase::all() all() \endlink returns \b true if all of the coefficients in a given Matrix or Array evaluate to \b true .
|
||||
- \link DenseBase::any() any() \endlink returns \b true if at least one of the coefficients in a given Matrix or Array evaluates to \b true .
|
||||
- \link DenseBase::count() count() \endlink returns the number of coefficients in a given Matrix or Array that evaluate to \b true.
|
||||
|
||||
These are typically used in conjunction with the coefficient-wise comparison and equality operators provided by Array. For instance, <tt>array > 0</tt> is an %Array of the same size as \c array , with \b true at those positions where the corresponding coefficient of \c array is positive. Thus, <tt>(array > 0).all()</tt> tests whether all coefficients of \c array are positive. This can be seen in the following example:
|
||||
|
||||
<table class="example">
|
||||
<tr><th>Example:</th><th>Output:</th></tr>
|
||||
<tr><td>
|
||||
\include Tutorial_ReductionsVisitorsBroadcasting_reductions_bool.cpp
|
||||
</td>
|
||||
<td>
|
||||
\verbinclude Tutorial_ReductionsVisitorsBroadcasting_reductions_bool.out
|
||||
</td></tr></table>
|
||||
|
||||
\subsection TutorialReductionsVisitorsBroadcastingReductionsUserdefined User defined reductions
|
||||
|
||||
TODO
|
||||
|
||||
In the meantime you can have a look at the DenseBase::redux() function.
|
||||
|
||||
\section TutorialReductionsVisitorsBroadcastingVisitors Visitors
|
||||
Visitors are useful when one wants to obtain the location of a coefficient inside
|
||||
a Matrix or Array. The simplest examples are
|
||||
\link MatrixBase::maxCoeff() maxCoeff(&x,&y) \endlink and
|
||||
\link MatrixBase::minCoeff() minCoeff(&x,&y)\endlink, which can be used to find
|
||||
the location of the greatest or smallest coefficient in a Matrix or
|
||||
Array.
|
||||
|
||||
The arguments passed to a visitor are pointers to the variables where the
|
||||
row and column position are to be stored. These variables should be of type
|
||||
\link DenseBase::Index Index \endlink, as shown below:
|
||||
|
||||
<table class="example">
|
||||
<tr><th>Example:</th><th>Output:</th></tr>
|
||||
<tr><td>
|
||||
\include Tutorial_ReductionsVisitorsBroadcasting_visitors.cpp
|
||||
</td>
|
||||
<td>
|
||||
\verbinclude Tutorial_ReductionsVisitorsBroadcasting_visitors.out
|
||||
</td></tr></table>
|
||||
|
||||
Note that both functions also return the value of the minimum or maximum coefficient if needed,
|
||||
as if it was a typical reduction operation.
|
||||
|
||||
\section TutorialReductionsVisitorsBroadcastingPartialReductions Partial reductions
|
||||
Partial reductions are reductions that can operate column- or row-wise on a Matrix or
|
||||
Array, applying the reduction operation on each column or row and
|
||||
returning a column or row-vector with the corresponding values. Partial reductions are applied
|
||||
with \link DenseBase::colwise() colwise() \endlink or \link DenseBase::rowwise() rowwise() \endlink.
|
||||
|
||||
A simple example is obtaining the maximum of the elements
|
||||
in each column in a given matrix, storing the result in a row-vector:
|
||||
|
||||
<table class="example">
|
||||
<tr><th>Example:</th><th>Output:</th></tr>
|
||||
<tr><td>
|
||||
\include Tutorial_ReductionsVisitorsBroadcasting_colwise.cpp
|
||||
</td>
|
||||
<td>
|
||||
\verbinclude Tutorial_ReductionsVisitorsBroadcasting_colwise.out
|
||||
</td></tr></table>
|
||||
|
||||
The same operation can be performed row-wise:
|
||||
|
||||
<table class="example">
|
||||
<tr><th>Example:</th><th>Output:</th></tr>
|
||||
<tr><td>
|
||||
\include Tutorial_ReductionsVisitorsBroadcasting_rowwise.cpp
|
||||
</td>
|
||||
<td>
|
||||
\verbinclude Tutorial_ReductionsVisitorsBroadcasting_rowwise.out
|
||||
</td></tr></table>
|
||||
|
||||
<b>Note that column-wise operations return a 'row-vector' while row-wise operations
|
||||
return a 'column-vector'</b>
|
||||
|
||||
\subsection TutorialReductionsVisitorsBroadcastingPartialReductionsCombined Combining partial reductions with other operations
|
||||
It is also possible to use the result of a partial reduction to do further processing.
|
||||
Here is another example that finds the column whose sum of elements is the maximum
|
||||
within a matrix. With column-wise partial reductions this can be coded as:
|
||||
|
||||
<table class="example">
|
||||
<tr><th>Example:</th><th>Output:</th></tr>
|
||||
<tr><td>
|
||||
\include Tutorial_ReductionsVisitorsBroadcasting_maxnorm.cpp
|
||||
</td>
|
||||
<td>
|
||||
\verbinclude Tutorial_ReductionsVisitorsBroadcasting_maxnorm.out
|
||||
</td></tr></table>
|
||||
|
||||
The previous example applies the \link DenseBase::sum() sum() \endlink reduction on each column
|
||||
though the \link DenseBase::colwise() colwise() \endlink visitor, obtaining a new matrix whose
|
||||
size is 1x4.
|
||||
|
||||
Therefore, if
|
||||
\f[
|
||||
\mbox{m} = \begin{bmatrix} 1 & 2 & 6 & 9 \\
|
||||
3 & 1 & 7 & 2 \end{bmatrix}
|
||||
\f]
|
||||
|
||||
then
|
||||
|
||||
\f[
|
||||
\mbox{m.colwise().sum()} = \begin{bmatrix} 4 & 3 & 13 & 11 \end{bmatrix}
|
||||
\f]
|
||||
|
||||
The \link DenseBase::maxCoeff() maxCoeff() \endlink reduction is finally applied
|
||||
to obtain the column index where the maximum sum is found,
|
||||
which is the column index 2 (third column) in this case.
|
||||
|
||||
|
||||
\section TutorialReductionsVisitorsBroadcastingBroadcasting Broadcasting
|
||||
The concept behind broadcasting is similar to partial reductions, with the difference that broadcasting
|
||||
constructs an expression where a vector (column or row) is interpreted as a matrix by replicating it in
|
||||
one direction.
|
||||
|
||||
A simple example is to add a certain column-vector to each column in a matrix.
|
||||
This can be accomplished with:
|
||||
|
||||
<table class="example">
|
||||
<tr><th>Example:</th><th>Output:</th></tr>
|
||||
<tr><td>
|
||||
\include Tutorial_ReductionsVisitorsBroadcasting_broadcast_simple.cpp
|
||||
</td>
|
||||
<td>
|
||||
\verbinclude Tutorial_ReductionsVisitorsBroadcasting_broadcast_simple.out
|
||||
</td></tr></table>
|
||||
|
||||
We can interpret the instruction <tt>mat.colwise() += v</tt> in two equivalent ways. It adds the vector \c v
|
||||
to every column of the matrix. Alternatively, it can be interpreted as repeating the vector \c v four times to
|
||||
form a four-by-two matrix which is then added to \c mat:
|
||||
\f[
|
||||
\begin{bmatrix} 1 & 2 & 6 & 9 \\ 3 & 1 & 7 & 2 \end{bmatrix}
|
||||
+ \begin{bmatrix} 0 & 0 & 0 & 0 \\ 1 & 1 & 1 & 1 \end{bmatrix}
|
||||
= \begin{bmatrix} 1 & 2 & 6 & 9 \\ 4 & 2 & 8 & 3 \end{bmatrix}.
|
||||
\f]
|
||||
The operators <tt>-=</tt>, <tt>+</tt> and <tt>-</tt> can also be used column-wise and row-wise. On arrays, we
|
||||
can also use the operators <tt>*=</tt>, <tt>/=</tt>, <tt>*</tt> and <tt>/</tt> to perform coefficient-wise
|
||||
multiplication and division column-wise or row-wise. These operators are not available on matrices because it
|
||||
is not clear what they would do. If you want multiply column 0 of a matrix \c mat with \c v(0), column 1 with
|
||||
\c v(1), and so on, then use <tt>mat = mat * v.asDiagonal()</tt>.
|
||||
|
||||
It is important to point out that the vector to be added column-wise or row-wise must be of type Vector,
|
||||
and cannot be a Matrix. If this is not met then you will get compile-time error. This also means that
|
||||
broadcasting operations can only be applied with an object of type Vector, when operating with Matrix.
|
||||
The same applies for the Array class, where the equivalent for VectorXf is ArrayXf. As always, you should
|
||||
not mix arrays and matrices in the same expression.
|
||||
|
||||
To perform the same operation row-wise we can do:
|
||||
|
||||
<table class="example">
|
||||
<tr><th>Example:</th><th>Output:</th></tr>
|
||||
<tr><td>
|
||||
\include Tutorial_ReductionsVisitorsBroadcasting_broadcast_simple_rowwise.cpp
|
||||
</td>
|
||||
<td>
|
||||
\verbinclude Tutorial_ReductionsVisitorsBroadcasting_broadcast_simple_rowwise.out
|
||||
</td></tr></table>
|
||||
|
||||
\subsection TutorialReductionsVisitorsBroadcastingBroadcastingCombined Combining broadcasting with other operations
|
||||
Broadcasting can also be combined with other operations, such as Matrix or Array operations,
|
||||
reductions and partial reductions.
|
||||
|
||||
Now that broadcasting, reductions and partial reductions have been introduced, we can dive into a more advanced example that finds
|
||||
the nearest neighbour of a vector <tt>v</tt> within the columns of matrix <tt>m</tt>. The Euclidean distance will be used in this example,
|
||||
computing the squared Euclidean distance with the partial reduction named \link MatrixBase::squaredNorm() squaredNorm() \endlink:
|
||||
|
||||
<table class="example">
|
||||
<tr><th>Example:</th><th>Output:</th></tr>
|
||||
<tr><td>
|
||||
\include Tutorial_ReductionsVisitorsBroadcasting_broadcast_1nn.cpp
|
||||
</td>
|
||||
<td>
|
||||
\verbinclude Tutorial_ReductionsVisitorsBroadcasting_broadcast_1nn.out
|
||||
</td></tr></table>
|
||||
|
||||
The line that does the job is
|
||||
\code
|
||||
(m.colwise() - v).colwise().squaredNorm().minCoeff(&index);
|
||||
\endcode
|
||||
|
||||
We will go step by step to understand what is happening:
|
||||
|
||||
- <tt>m.colwise() - v</tt> is a broadcasting operation, subtracting <tt>v</tt> from each column in <tt>m</tt>. The result of this operation
|
||||
is a new matrix whose size is the same as matrix <tt>m</tt>: \f[
|
||||
\mbox{m.colwise() - v} =
|
||||
\begin{bmatrix}
|
||||
-1 & 21 & 4 & 7 \\
|
||||
0 & 8 & 4 & -1
|
||||
\end{bmatrix}
|
||||
\f]
|
||||
|
||||
- <tt>(m.colwise() - v).colwise().squaredNorm()</tt> is a partial reduction, computing the squared norm column-wise. The result of
|
||||
this operation is a row-vector where each coefficient is the squared Euclidean distance between each column in <tt>m</tt> and <tt>v</tt>: \f[
|
||||
\mbox{(m.colwise() - v).colwise().squaredNorm()} =
|
||||
\begin{bmatrix}
|
||||
1 & 505 & 32 & 50
|
||||
\end{bmatrix}
|
||||
\f]
|
||||
|
||||
- Finally, <tt>minCoeff(&index)</tt> is used to obtain the index of the column in <tt>m</tt> that is closest to <tt>v</tt> in terms of Euclidean
|
||||
distance.
|
||||
|
||||
*/
|
||||
|
||||
}
|
|
@ -1,341 +0,0 @@
|
|||
namespace Eigen {
|
||||
|
||||
/** \eigenManualPage TutorialSparse Sparse matrix manipulations
|
||||
|
||||
\eigenAutoToc
|
||||
|
||||
Manipulating and solving sparse problems involves various modules which are summarized below:
|
||||
|
||||
<table class="manual">
|
||||
<tr><th>Module</th><th>Header file</th><th>Contents</th></tr>
|
||||
<tr><td>\link SparseCore_Module SparseCore \endlink</td><td>\code#include <Eigen/SparseCore>\endcode</td><td>SparseMatrix and SparseVector classes, matrix assembly, basic sparse linear algebra (including sparse triangular solvers)</td></tr>
|
||||
<tr><td>\link SparseCholesky_Module SparseCholesky \endlink</td><td>\code#include <Eigen/SparseCholesky>\endcode</td><td>Direct sparse LLT and LDLT Cholesky factorization to solve sparse self-adjoint positive definite problems</td></tr>
|
||||
<tr><td>\link SparseLU_Module SparseLU \endlink</td><td>\code #include<Eigen/SparseLU> \endcode</td>
|
||||
<td>%Sparse LU factorization to solve general square sparse systems</td></tr>
|
||||
<tr><td>\link SparseQR_Module SparseQR \endlink</td><td>\code #include<Eigen/SparseQR>\endcode </td><td>%Sparse QR factorization for solving sparse linear least-squares problems</td></tr>
|
||||
<tr><td>\link IterativeLinearSolvers_Module IterativeLinearSolvers \endlink</td><td>\code#include <Eigen/IterativeLinearSolvers>\endcode</td><td>Iterative solvers to solve large general linear square problems (including self-adjoint positive definite problems)</td></tr>
|
||||
<tr><td>\link Sparse_Module Sparse \endlink</td><td>\code#include <Eigen/Sparse>\endcode</td><td>Includes all the above modules</td></tr>
|
||||
</table>
|
||||
|
||||
\section TutorialSparseIntro Sparse matrix format
|
||||
|
||||
In many applications (e.g., finite element methods) it is common to deal with very large matrices where only a few coefficients are different from zero. In such cases, memory consumption can be reduced and performance increased by using a specialized representation storing only the nonzero coefficients. Such a matrix is called a sparse matrix.
|
||||
|
||||
\b The \b %SparseMatrix \b class
|
||||
|
||||
The class SparseMatrix is the main sparse matrix representation of Eigen's sparse module; it offers high performance and low memory usage.
|
||||
It implements a more versatile variant of the widely-used Compressed Column (or Row) Storage scheme.
|
||||
It consists of four compact arrays:
|
||||
- \c Values: stores the coefficient values of the non-zeros.
|
||||
- \c InnerIndices: stores the row (resp. column) indices of the non-zeros.
|
||||
- \c OuterStarts: stores for each column (resp. row) the index of the first non-zero in the previous two arrays.
|
||||
- \c InnerNNZs: stores the number of non-zeros of each column (resp. row).
|
||||
The word \c inner refers to an \em inner \em vector that is a column for a column-major matrix, or a row for a row-major matrix.
|
||||
The word \c outer refers to the other direction.
|
||||
|
||||
This storage scheme is better explained on an example. The following matrix
|
||||
<table class="manual">
|
||||
<tr><td> 0</td><td>3</td><td> 0</td><td>0</td><td> 0</td></tr>
|
||||
<tr><td>22</td><td>0</td><td> 0</td><td>0</td><td>17</td></tr>
|
||||
<tr><td> 7</td><td>5</td><td> 0</td><td>1</td><td> 0</td></tr>
|
||||
<tr><td> 0</td><td>0</td><td> 0</td><td>0</td><td> 0</td></tr>
|
||||
<tr><td> 0</td><td>0</td><td>14</td><td>0</td><td> 8</td></tr>
|
||||
</table>
|
||||
|
||||
and one of its possible sparse, \b column \b major representation:
|
||||
<table class="manual">
|
||||
<tr><td>Values:</td> <td>22</td><td>7</td><td>_</td><td>3</td><td>5</td><td>14</td><td>_</td><td>_</td><td>1</td><td>_</td><td>17</td><td>8</td></tr>
|
||||
<tr><td>InnerIndices:</td> <td> 1</td><td>2</td><td>_</td><td>0</td><td>2</td><td> 4</td><td>_</td><td>_</td><td>2</td><td>_</td><td> 1</td><td>4</td></tr>
|
||||
</table>
|
||||
<table class="manual">
|
||||
<tr><td>OuterStarts:</td><td>0</td><td>3</td><td>5</td><td>8</td><td>10</td><td>\em 12 </td></tr>
|
||||
<tr><td>InnerNNZs:</td> <td>2</td><td>2</td><td>1</td><td>1</td><td> 2</td><td></td></tr>
|
||||
</table>
|
||||
|
||||
Currently the elements of a given inner vector are guaranteed to be always sorted by increasing inner indices.
|
||||
The \c "_" indicates available free space to quickly insert new elements.
|
||||
Assuming no reallocation is needed, the insertion of a random element is therefore in O(nnz_j) where nnz_j is the number of nonzeros of the respective inner vector.
|
||||
On the other hand, inserting elements with increasing inner indices in a given inner vector is much more efficient since this only requires to increase the respective \c InnerNNZs entry that is a O(1) operation.
|
||||
|
||||
The case where no empty space is available is a special case, and is refered as the \em compressed mode.
|
||||
It corresponds to the widely used Compressed Column (or Row) Storage schemes (CCS or CRS).
|
||||
Any SparseMatrix can be turned to this form by calling the SparseMatrix::makeCompressed() function.
|
||||
In this case, one can remark that the \c InnerNNZs array is redundant with \c OuterStarts because we the equality: \c InnerNNZs[j] = \c OuterStarts[j+1]-\c OuterStarts[j].
|
||||
Therefore, in practice a call to SparseMatrix::makeCompressed() frees this buffer.
|
||||
|
||||
It is worth noting that most of our wrappers to external libraries requires compressed matrices as inputs.
|
||||
|
||||
The results of %Eigen's operations always produces \b compressed sparse matrices.
|
||||
On the other hand, the insertion of a new element into a SparseMatrix converts this later to the \b uncompressed mode.
|
||||
|
||||
Here is the previous matrix represented in compressed mode:
|
||||
<table class="manual">
|
||||
<tr><td>Values:</td> <td>22</td><td>7</td><td>3</td><td>5</td><td>14</td><td>1</td><td>17</td><td>8</td></tr>
|
||||
<tr><td>InnerIndices:</td> <td> 1</td><td>2</td><td>0</td><td>2</td><td> 4</td><td>2</td><td> 1</td><td>4</td></tr>
|
||||
</table>
|
||||
<table class="manual">
|
||||
<tr><td>OuterStarts:</td><td>0</td><td>2</td><td>4</td><td>5</td><td>6</td><td>\em 8 </td></tr>
|
||||
</table>
|
||||
|
||||
A SparseVector is a special case of a SparseMatrix where only the \c Values and \c InnerIndices arrays are stored.
|
||||
There is no notion of compressed/uncompressed mode for a SparseVector.
|
||||
|
||||
|
||||
\section TutorialSparseExample First example
|
||||
|
||||
Before describing each individual class, let's start with the following typical example: solving the Laplace equation \f$ \nabla u = 0 \f$ on a regular 2D grid using a finite difference scheme and Dirichlet boundary conditions.
|
||||
Such problem can be mathematically expressed as a linear problem of the form \f$ Ax=b \f$ where \f$ x \f$ is the vector of \c m unknowns (in our case, the values of the pixels), \f$ b \f$ is the right hand side vector resulting from the boundary conditions, and \f$ A \f$ is an \f$ m \times m \f$ matrix containing only a few non-zero elements resulting from the discretization of the Laplacian operator.
|
||||
|
||||
<table class="manual">
|
||||
<tr><td>
|
||||
\include Tutorial_sparse_example.cpp
|
||||
</td>
|
||||
<td>
|
||||
\image html Tutorial_sparse_example.jpeg
|
||||
</td></tr></table>
|
||||
|
||||
In this example, we start by defining a column-major sparse matrix type of double \c SparseMatrix<double>, and a triplet list of the same scalar type \c Triplet<double>. A triplet is a simple object representing a non-zero entry as the triplet: \c row index, \c column index, \c value.
|
||||
|
||||
In the main function, we declare a list \c coefficients of triplets (as a std vector) and the right hand side vector \f$ b \f$ which are filled by the \a buildProblem function.
|
||||
The raw and flat list of non-zero entries is then converted to a true SparseMatrix object \c A.
|
||||
Note that the elements of the list do not have to be sorted, and possible duplicate entries will be summed up.
|
||||
|
||||
The last step consists of effectively solving the assembled problem.
|
||||
Since the resulting matrix \c A is symmetric by construction, we can perform a direct Cholesky factorization via the SimplicialLDLT class which behaves like its LDLT counterpart for dense objects.
|
||||
|
||||
The resulting vector \c x contains the pixel values as a 1D array which is saved to a jpeg file shown on the right of the code above.
|
||||
|
||||
Describing the \a buildProblem and \a save functions is out of the scope of this tutorial. They are given \ref TutorialSparse_example_details "here" for the curious and reproducibility purpose.
|
||||
|
||||
|
||||
|
||||
|
||||
\section TutorialSparseSparseMatrix The SparseMatrix class
|
||||
|
||||
\b %Matrix \b and \b vector \b properties \n
|
||||
|
||||
The SparseMatrix and SparseVector classes take three template arguments:
|
||||
* the scalar type (e.g., double)
|
||||
* the storage order (ColMajor or RowMajor, the default is ColMajor)
|
||||
* the inner index type (default is \c int).
|
||||
|
||||
As for dense Matrix objects, constructors takes the size of the object.
|
||||
Here are some examples:
|
||||
|
||||
\code
|
||||
SparseMatrix<std::complex<float> > mat(1000,2000); // declares a 1000x2000 column-major compressed sparse matrix of complex<float>
|
||||
SparseMatrix<double,RowMajor> mat(1000,2000); // declares a 1000x2000 row-major compressed sparse matrix of double
|
||||
SparseVector<std::complex<float> > vec(1000); // declares a column sparse vector of complex<float> of size 1000
|
||||
SparseVector<double,RowMajor> vec(1000); // declares a row sparse vector of double of size 1000
|
||||
\endcode
|
||||
|
||||
In the rest of the tutorial, \c mat and \c vec represent any sparse-matrix and sparse-vector objects, respectively.
|
||||
|
||||
The dimensions of a matrix can be queried using the following functions:
|
||||
<table class="manual">
|
||||
<tr><td>Standard \n dimensions</td><td>\code
|
||||
mat.rows()
|
||||
mat.cols()\endcode</td>
|
||||
<td>\code
|
||||
vec.size() \endcode</td>
|
||||
</tr>
|
||||
<tr><td>Sizes along the \n inner/outer dimensions</td><td>\code
|
||||
mat.innerSize()
|
||||
mat.outerSize()\endcode</td>
|
||||
<td></td>
|
||||
</tr>
|
||||
<tr><td>Number of non \n zero coefficients</td><td>\code
|
||||
mat.nonZeros() \endcode</td>
|
||||
<td>\code
|
||||
vec.nonZeros() \endcode</td></tr>
|
||||
</table>
|
||||
|
||||
|
||||
\b Iterating \b over \b the \b nonzero \b coefficients \n
|
||||
|
||||
Random access to the elements of a sparse object can be done through the \c coeffRef(i,j) function.
|
||||
However, this function involves a quite expensive binary search.
|
||||
In most cases, one only wants to iterate over the non-zeros elements. This is achieved by a standard loop over the outer dimension, and then by iterating over the non-zeros of the current inner vector via an InnerIterator. Thus, the non-zero entries have to be visited in the same order than the storage order.
|
||||
Here is an example:
|
||||
<table class="manual">
|
||||
<tr><td>
|
||||
\code
|
||||
SparseMatrix<double> mat(rows,cols);
|
||||
for (int k=0; k<mat.outerSize(); ++k)
|
||||
for (SparseMatrix<double>::InnerIterator it(mat,k); it; ++it)
|
||||
{
|
||||
it.value();
|
||||
it.row(); // row index
|
||||
it.col(); // col index (here it is equal to k)
|
||||
it.index(); // inner index, here it is equal to it.row()
|
||||
}
|
||||
\endcode
|
||||
</td><td>
|
||||
\code
|
||||
SparseVector<double> vec(size);
|
||||
for (SparseVector<double>::InnerIterator it(vec); it; ++it)
|
||||
{
|
||||
it.value(); // == vec[ it.index() ]
|
||||
it.index();
|
||||
}
|
||||
\endcode
|
||||
</td></tr>
|
||||
</table>
|
||||
For a writable expression, the referenced value can be modified using the valueRef() function.
|
||||
If the type of the sparse matrix or vector depends on a template parameter, then the \c typename keyword is
|
||||
required to indicate that \c InnerIterator denotes a type; see \ref TopicTemplateKeyword for details.
|
||||
|
||||
|
||||
\section TutorialSparseFilling Filling a sparse matrix
|
||||
|
||||
Because of the special storage scheme of a SparseMatrix, special care has to be taken when adding new nonzero entries.
|
||||
For instance, the cost of a single purely random insertion into a SparseMatrix is \c O(nnz), where \c nnz is the current number of non-zero coefficients.
|
||||
|
||||
The simplest way to create a sparse matrix while guaranteeing good performance is thus to first build a list of so-called \em triplets, and then convert it to a SparseMatrix.
|
||||
|
||||
Here is a typical usage example:
|
||||
\code
|
||||
typedef Eigen::Triplet<double> T;
|
||||
std::vector<T> tripletList;
|
||||
tripletList.reserve(estimation_of_entries);
|
||||
for(...)
|
||||
{
|
||||
// ...
|
||||
tripletList.push_back(T(i,j,v_ij));
|
||||
}
|
||||
SparseMatrixType mat(rows,cols);
|
||||
mat.setFromTriplets(tripletList.begin(), tripletList.end());
|
||||
// mat is ready to go!
|
||||
\endcode
|
||||
The \c std::vector of triplets might contain the elements in arbitrary order, and might even contain duplicated elements that will be summed up by setFromTriplets().
|
||||
See the SparseMatrix::setFromTriplets() function and class Triplet for more details.
|
||||
|
||||
|
||||
In some cases, however, slightly higher performance, and lower memory consumption can be reached by directly inserting the non-zeros into the destination matrix.
|
||||
A typical scenario of this approach is illustrated bellow:
|
||||
\code
|
||||
1: SparseMatrix<double> mat(rows,cols); // default is column major
|
||||
2: mat.reserve(VectorXi::Constant(cols,6));
|
||||
3: for each i,j such that v_ij != 0
|
||||
4: mat.insert(i,j) = v_ij; // alternative: mat.coeffRef(i,j) += v_ij;
|
||||
5: mat.makeCompressed(); // optional
|
||||
\endcode
|
||||
|
||||
- The key ingredient here is the line 2 where we reserve room for 6 non-zeros per column. In many cases, the number of non-zeros per column or row can easily be known in advance. If it varies significantly for each inner vector, then it is possible to specify a reserve size for each inner vector by providing a vector object with an operator[](int j) returning the reserve size of the \c j-th inner vector (e.g., via a VectorXi or std::vector<int>). If only a rought estimate of the number of nonzeros per inner-vector can be obtained, it is highly recommended to overestimate it rather than the opposite. If this line is omitted, then the first insertion of a new element will reserve room for 2 elements per inner vector.
|
||||
- The line 4 performs a sorted insertion. In this example, the ideal case is when the \c j-th column is not full and contains non-zeros whose inner-indices are smaller than \c i. In this case, this operation boils down to trivial O(1) operation.
|
||||
- When calling insert(i,j) the element \c i \c ,j must not already exists, otherwise use the coeffRef(i,j) method that will allow to, e.g., accumulate values. This method first performs a binary search and finally calls insert(i,j) if the element does not already exist. It is more flexible than insert() but also more costly.
|
||||
- The line 5 suppresses the remaining empty space and transforms the matrix into a compressed column storage.
|
||||
|
||||
|
||||
|
||||
\section TutorialSparseFeatureSet Supported operators and functions
|
||||
|
||||
Because of their special storage format, sparse matrices cannot offer the same level of flexibility than dense matrices.
|
||||
In Eigen's sparse module we chose to expose only the subset of the dense matrix API which can be efficiently implemented.
|
||||
In the following \em sm denotes a sparse matrix, \em sv a sparse vector, \em dm a dense matrix, and \em dv a dense vector.
|
||||
|
||||
\subsection TutorialSparse_BasicOps Basic operations
|
||||
|
||||
%Sparse expressions support most of the unary and binary coefficient wise operations:
|
||||
\code
|
||||
sm1.real() sm1.imag() -sm1 0.5*sm1
|
||||
sm1+sm2 sm1-sm2 sm1.cwiseProduct(sm2)
|
||||
\endcode
|
||||
However, a strong restriction is that the storage orders must match. For instance, in the following example:
|
||||
\code
|
||||
sm4 = sm1 + sm2 + sm3;
|
||||
\endcode
|
||||
sm1, sm2, and sm3 must all be row-major or all column major.
|
||||
On the other hand, there is no restriction on the target matrix sm4.
|
||||
For instance, this means that for computing \f$ A^T + A \f$, the matrix \f$ A^T \f$ must be evaluated into a temporary matrix of compatible storage order:
|
||||
\code
|
||||
SparseMatrix<double> A, B;
|
||||
B = SparseMatrix<double>(A.transpose()) + A;
|
||||
\endcode
|
||||
|
||||
Some binary coefficient-wise operators can also mix sparse and dense expressions:
|
||||
\code
|
||||
sm2 = sm1.cwiseProduct(dm1);
|
||||
dm1 += sm1;
|
||||
\endcode
|
||||
|
||||
However, it is not yet possible to add a sparse and a dense matrix as in <tt>dm2 = sm1 + dm1</tt>.
|
||||
Please write this as the equivalent <tt>dm2 = dm1; dm2 += sm1</tt> (we plan to lift this restriction
|
||||
in the next release of %Eigen).
|
||||
|
||||
%Sparse expressions also support transposition:
|
||||
\code
|
||||
sm1 = sm2.transpose();
|
||||
sm1 = sm2.adjoint();
|
||||
\endcode
|
||||
However, there is no transposeInPlace() method.
|
||||
|
||||
|
||||
\subsection TutorialSparse_Products Matrix products
|
||||
|
||||
%Eigen supports various kind of sparse matrix products which are summarize below:
|
||||
- \b sparse-dense:
|
||||
\code
|
||||
dv2 = sm1 * dv1;
|
||||
dm2 = dm1 * sm1.adjoint();
|
||||
dm2 = 2. * sm1 * dm1;
|
||||
\endcode
|
||||
- \b symmetric \b sparse-dense. The product of a sparse symmetric matrix with a dense matrix (or vector) can also be optimized by specifying the symmetry with selfadjointView():
|
||||
\code
|
||||
dm2 = sm1.selfadjointView<>() * dm1; // if all coefficients of A are stored
|
||||
dm2 = A.selfadjointView<Upper>() * dm1; // if only the upper part of A is stored
|
||||
dm2 = A.selfadjointView<Lower>() * dm1; // if only the lower part of A is stored
|
||||
\endcode
|
||||
- \b sparse-sparse. For sparse-sparse products, two different algorithms are available. The default one is conservative and preserve the explicit zeros that might appear:
|
||||
\code
|
||||
sm3 = sm1 * sm2;
|
||||
sm3 = 4 * sm1.adjoint() * sm2;
|
||||
\endcode
|
||||
The second algorithm prunes on the fly the explicit zeros, or the values smaller than a given threshold. It is enabled and controlled through the prune() functions:
|
||||
\code
|
||||
sm3 = (sm1 * sm2).pruned(); // removes numerical zeros
|
||||
sm3 = (sm1 * sm2).pruned(ref); // removes elements much smaller than ref
|
||||
sm3 = (sm1 * sm2).pruned(ref,epsilon); // removes elements smaller than ref*epsilon
|
||||
\endcode
|
||||
|
||||
- \b permutations. Finally, permutations can be applied to sparse matrices too:
|
||||
\code
|
||||
PermutationMatrix<Dynamic,Dynamic> P = ...;
|
||||
sm2 = P * sm1;
|
||||
sm2 = sm1 * P.inverse();
|
||||
sm2 = sm1.transpose() * P;
|
||||
\endcode
|
||||
|
||||
|
||||
\subsection TutorialSparse_TriangularSelfadjoint Triangular and selfadjoint views
|
||||
|
||||
Just as with dense matrices, the triangularView() function can be used to address a triangular part of the matrix, and perform triangular solves with a dense right hand side:
|
||||
\code
|
||||
dm2 = sm1.triangularView<Lower>(dm1);
|
||||
dv2 = sm1.transpose().triangularView<Upper>(dv1);
|
||||
\endcode
|
||||
|
||||
The selfadjointView() function permits various operations:
|
||||
- optimized sparse-dense matrix products:
|
||||
\code
|
||||
dm2 = sm1.selfadjointView<>() * dm1; // if all coefficients of A are stored
|
||||
dm2 = A.selfadjointView<Upper>() * dm1; // if only the upper part of A is stored
|
||||
dm2 = A.selfadjointView<Lower>() * dm1; // if only the lower part of A is stored
|
||||
\endcode
|
||||
- copy of triangular parts:
|
||||
\code
|
||||
sm2 = sm1.selfadjointView<Upper>(); // makes a full selfadjoint matrix from the upper triangular part
|
||||
sm2.selfadjointView<Lower>() = sm1.selfadjointView<Upper>(); // copies the upper triangular part to the lower triangular part
|
||||
\endcode
|
||||
- application of symmetric permutations:
|
||||
\code
|
||||
PermutationMatrix<Dynamic,Dynamic> P = ...;
|
||||
sm2 = A.selfadjointView<Upper>().twistedBy(P); // compute P S P' from the upper triangular part of A, and make it a full matrix
|
||||
sm2.selfadjointView<Lower>() = A.selfadjointView<Lower>().twistedBy(P); // compute P S P' from the lower triangular part of A, and then only compute the lower part
|
||||
\endcode
|
||||
|
||||
Please, refer to the \link SparseQuickRefPage Quick Reference \endlink guide for the list of supported operations. The list of linear solvers available is \link TopicSparseSystems here. \endlink
|
||||
|
||||
*/
|
||||
|
||||
}
|
|
@ -1,4 +0,0 @@
|
|||
/**
|
||||
\page TutorialSparse_example_details
|
||||
\include Tutorial_sparse_example_details.cpp
|
||||
*/
|
|
@ -1,114 +0,0 @@
|
|||
namespace Eigen {
|
||||
|
||||
/** \eigenManualPage TopicUnalignedArrayAssert Explanation of the assertion on unaligned arrays
|
||||
|
||||
Hello! You are seeing this webpage because your program terminated on an assertion failure like this one:
|
||||
<pre>
|
||||
my_program: path/to/eigen/Eigen/src/Core/DenseStorage.h:44:
|
||||
Eigen::internal::matrix_array<T, Size, MatrixOptions, Align>::internal::matrix_array()
|
||||
[with T = double, int Size = 2, int MatrixOptions = 2, bool Align = true]:
|
||||
Assertion `(reinterpret_cast<size_t>(array) & 0xf) == 0 && "this assertion
|
||||
is explained here: http://eigen.tuxfamily.org/dox/UnalignedArrayAssert.html
|
||||
**** READ THIS WEB PAGE !!! ****"' failed.
|
||||
</pre>
|
||||
|
||||
There are 4 known causes for this issue. Please read on to understand them and learn how to fix them.
|
||||
|
||||
\eigenAutoToc
|
||||
|
||||
\section where Where in my own code is the cause of the problem?
|
||||
|
||||
First of all, you need to find out where in your own code this assertion was triggered from. At first glance, the error message doesn't look helpful, as it refers to a file inside Eigen! However, since your program crashed, if you can reproduce the crash, you can get a backtrace using any debugger. For example, if you're using GCC, you can use the GDB debugger as follows:
|
||||
\code
|
||||
$ gdb ./my_program # Start GDB on your program
|
||||
> run # Start running your program
|
||||
... # Now reproduce the crash!
|
||||
> bt # Obtain the backtrace
|
||||
\endcode
|
||||
Now that you know precisely where in your own code the problem is happening, read on to understand what you need to change.
|
||||
|
||||
\section c1 Cause 1: Structures having Eigen objects as members
|
||||
|
||||
If you have code like this,
|
||||
|
||||
\code
|
||||
class Foo
|
||||
{
|
||||
//...
|
||||
Eigen::Vector2d v;
|
||||
//...
|
||||
};
|
||||
//...
|
||||
Foo *foo = new Foo;
|
||||
\endcode
|
||||
|
||||
then you need to read this separate page: \ref TopicStructHavingEigenMembers "Structures Having Eigen Members".
|
||||
|
||||
Note that here, Eigen::Vector2d is only used as an example, more generally the issue arises for all \ref TopicFixedSizeVectorizable "fixed-size vectorizable Eigen types".
|
||||
|
||||
\section c2 Cause 2: STL Containers
|
||||
|
||||
If you use STL Containers such as std::vector, std::map, ..., with Eigen objects, or with classes containing Eigen objects, like this,
|
||||
|
||||
\code
|
||||
std::vector<Eigen::Matrix2f> my_vector;
|
||||
struct my_class { ... Eigen::Matrix2f m; ... };
|
||||
std::map<int, my_class> my_map;
|
||||
\endcode
|
||||
|
||||
then you need to read this separate page: \ref TopicStlContainers "Using STL Containers with Eigen".
|
||||
|
||||
Note that here, Eigen::Matrix2f is only used as an example, more generally the issue arises for all \ref TopicFixedSizeVectorizable "fixed-size vectorizable Eigen types" and \ref TopicStructHavingEigenMembers "structures having such Eigen objects as member".
|
||||
|
||||
\section c3 Cause 3: Passing Eigen objects by value
|
||||
|
||||
If some function in your code is getting an Eigen object passed by value, like this,
|
||||
|
||||
\code
|
||||
void func(Eigen::Vector4d v);
|
||||
\endcode
|
||||
|
||||
then you need to read this separate page: \ref TopicPassingByValue "Passing Eigen objects by value to functions".
|
||||
|
||||
Note that here, Eigen::Vector4d is only used as an example, more generally the issue arises for all \ref TopicFixedSizeVectorizable "fixed-size vectorizable Eigen types".
|
||||
|
||||
\section c4 Cause 4: Compiler making a wrong assumption on stack alignment (for instance GCC on Windows)
|
||||
|
||||
This is a must-read for people using GCC on Windows (like MinGW or TDM-GCC). If you have this assertion failure in an innocent function declaring a local variable like this:
|
||||
|
||||
\code
|
||||
void foo()
|
||||
{
|
||||
Eigen::Quaternionf q;
|
||||
//...
|
||||
}
|
||||
\endcode
|
||||
|
||||
then you need to read this separate page: \ref TopicWrongStackAlignment "Compiler making a wrong assumption on stack alignment".
|
||||
|
||||
Note that here, Eigen::Quaternionf is only used as an example, more generally the issue arises for all \ref TopicFixedSizeVectorizable "fixed-size vectorizable Eigen types".
|
||||
|
||||
\section explanation General explanation of this assertion
|
||||
|
||||
\ref TopicFixedSizeVectorizable "fixed-size vectorizable Eigen objects" must absolutely be created at 16-byte-aligned locations, otherwise SIMD instructions adressing them will crash.
|
||||
|
||||
Eigen normally takes care of these alignment issues for you, by setting an alignment attribute on them and by overloading their "operator new".
|
||||
|
||||
However there are a few corner cases where these alignment settings get overridden: they are the possible causes for this assertion.
|
||||
|
||||
\section getrid I don't care about vectorization, how do I get rid of that stuff?
|
||||
|
||||
Two possibilities:
|
||||
<ul>
|
||||
<li>Define EIGEN_DONT_ALIGN_STATICALLY. That disables all 128-bit static alignment code, while keeping 128-bit heap alignment. This has the effect of
|
||||
disabling vectorization for fixed-size objects (like Matrix4d) while keeping vectorization of dynamic-size objects
|
||||
(like MatrixXd). But do note that this breaks ABI compatibility with the default behavior of 128-bit static alignment.</li>
|
||||
<li>Or define both EIGEN_DONT_VECTORIZE and EIGEN_DISABLE_UNALIGNED_ARRAY_ASSERT. This keeps the
|
||||
128-bit alignment code and thus preserves ABI compatibility, but completely disables vectorization.</li>
|
||||
</ul>
|
||||
|
||||
For more information, see <a href="http://eigen.tuxfamily.org/index.php?title=FAQ#I_disabled_vectorization.2C_but_I.27m_still_getting_annoyed_about_alignment_issues.21">this FAQ</a>.
|
||||
|
||||
*/
|
||||
|
||||
}
|
|
@ -1,168 +0,0 @@
|
|||
/*
|
||||
Copyright (c) 2011, Intel Corporation. All rights reserved.
|
||||
Copyright (C) 2011 Gael Guennebaud <gael.guennebaud@inria.fr>
|
||||
|
||||
Redistribution and use in source and binary forms, with or without modification,
|
||||
are permitted provided that the following conditions are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
* Neither the name of Intel Corporation nor the names of its contributors may
|
||||
be used to endorse or promote products derived from this software without
|
||||
specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
||||
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
|
||||
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
********************************************************************************
|
||||
* Content : Documentation on the use of Intel MKL through Eigen
|
||||
********************************************************************************
|
||||
*/
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
/** \page TopicUsingIntelMKL Using Intel® Math Kernel Library from Eigen
|
||||
|
||||
\section TopicUsingIntelMKL_Intro Eigen and Intel® Math Kernel Library (Intel® MKL)
|
||||
|
||||
Since Eigen version 3.1 and later, users can benefit from built-in Intel MKL optimizations with an installed copy of Intel MKL 10.3 (or later).
|
||||
<a href="http://eigen.tuxfamily.org/Counter/redirect_to_mkl.php"> Intel MKL </a> provides highly optimized multi-threaded mathematical routines for x86-compatible architectures.
|
||||
Intel MKL is available on Linux, Mac and Windows for both Intel64 and IA32 architectures.
|
||||
|
||||
\warning Be aware that Intel® MKL is a proprietary software. It is the responsibility of the users to buy MKL licenses for their products. Moreover, the license of the user product has to allow linking to proprietary software that excludes any unmodified versions of the GPL.
|
||||
|
||||
Using Intel MKL through Eigen is easy:
|
||||
-# define the \c EIGEN_USE_MKL_ALL macro before including any Eigen's header
|
||||
-# link your program to MKL libraries (see the <a href="http://software.intel.com/en-us/articles/intel-mkl-link-line-advisor/">MKL linking advisor</a>)
|
||||
-# on a 64bits system, you must use the LP64 interface (not the ILP64 one)
|
||||
|
||||
When doing so, a number of Eigen's algorithms are silently substituted with calls to Intel MKL routines.
|
||||
These substitutions apply only for \b Dynamic \b or \b large enough objects with one of the following four standard scalar types: \c float, \c double, \c complex<float>, and \c complex<double>.
|
||||
Operations on other scalar types or mixing reals and complexes will continue to use the built-in algorithms.
|
||||
|
||||
In addition you can coarsely select choose which parts will be substituted by defining one or multiple of the following macros:
|
||||
|
||||
<table class="manual">
|
||||
<tr><td>\c EIGEN_USE_BLAS </td><td>Enables the use of external BLAS level 2 and 3 routines (currently works with Intel MKL only)</td></tr>
|
||||
<tr class="alt"><td>\c EIGEN_USE_LAPACKE </td><td>Enables the use of external Lapack routines via the <a href="http://www.netlib.org/lapack/lapacke.html">Intel Lapacke</a> C interface to Lapack (currently works with Intel MKL only)</td></tr>
|
||||
<tr><td>\c EIGEN_USE_LAPACKE_STRICT </td><td>Same as \c EIGEN_USE_LAPACKE but algorithm of lower robustness are disabled. This currently concerns only JacobiSVD which otherwise would be replaced by \c gesvd that is less robust than Jacobi rotations.</td></tr>
|
||||
<tr class="alt"><td>\c EIGEN_USE_MKL_VML </td><td>Enables the use of Intel VML (vector operations)</td></tr>
|
||||
<tr><td>\c EIGEN_USE_MKL_ALL </td><td>Defines \c EIGEN_USE_BLAS, \c EIGEN_USE_LAPACKE, and \c EIGEN_USE_MKL_VML </td></tr>
|
||||
</table>
|
||||
|
||||
Finally, the PARDISO sparse solver shipped with Intel MKL can be used through the \ref PardisoLU, \ref PardisoLLT and \ref PardisoLDLT classes of the \ref PardisoSupport_Module.
|
||||
|
||||
|
||||
\section TopicUsingIntelMKL_SupportedFeatures List of supported features
|
||||
|
||||
The breadth of Eigen functionality covered by Intel MKL is listed in the table below.
|
||||
<table class="manual">
|
||||
<tr><th>Functional domain</th><th>Code example</th><th>MKL routines</th></tr>
|
||||
<tr><td>Matrix-matrix operations \n \c EIGEN_USE_BLAS </td><td>\code
|
||||
m1*m2.transpose();
|
||||
m1.selfadjointView<Lower>()*m2;
|
||||
m1*m2.triangularView<Upper>();
|
||||
m1.selfadjointView<Lower>().rankUpdate(m2,1.0);
|
||||
\endcode</td><td>\code
|
||||
?gemm
|
||||
?symm/?hemm
|
||||
?trmm
|
||||
dsyrk/ssyrk
|
||||
\endcode</td></tr>
|
||||
<tr class="alt"><td>Matrix-vector operations \n \c EIGEN_USE_BLAS </td><td>\code
|
||||
m1.adjoint()*b;
|
||||
m1.selfadjointView<Lower>()*b;
|
||||
m1.triangularView<Upper>()*b;
|
||||
\endcode</td><td>\code
|
||||
?gemv
|
||||
?symv/?hemv
|
||||
?trmv
|
||||
\endcode</td></tr>
|
||||
<tr><td>LU decomposition \n \c EIGEN_USE_LAPACKE \n \c EIGEN_USE_LAPACKE_STRICT </td><td>\code
|
||||
v1 = m1.lu().solve(v2);
|
||||
\endcode</td><td>\code
|
||||
?getrf
|
||||
\endcode</td></tr>
|
||||
<tr class="alt"><td>Cholesky decomposition \n \c EIGEN_USE_LAPACKE \n \c EIGEN_USE_LAPACKE_STRICT </td><td>\code
|
||||
v1 = m2.selfadjointView<Upper>().llt().solve(v2);
|
||||
\endcode</td><td>\code
|
||||
?potrf
|
||||
\endcode</td></tr>
|
||||
<tr><td>QR decomposition \n \c EIGEN_USE_LAPACKE \n \c EIGEN_USE_LAPACKE_STRICT </td><td>\code
|
||||
m1.householderQr();
|
||||
m1.colPivHouseholderQr();
|
||||
\endcode</td><td>\code
|
||||
?geqrf
|
||||
?geqp3
|
||||
\endcode</td></tr>
|
||||
<tr class="alt"><td>Singular value decomposition \n \c EIGEN_USE_LAPACKE </td><td>\code
|
||||
JacobiSVD<MatrixXd> svd;
|
||||
svd.compute(m1, ComputeThinV);
|
||||
\endcode</td><td>\code
|
||||
?gesvd
|
||||
\endcode</td></tr>
|
||||
<tr><td>Eigen-value decompositions \n \c EIGEN_USE_LAPACKE \n \c EIGEN_USE_LAPACKE_STRICT </td><td>\code
|
||||
EigenSolver<MatrixXd> es(m1);
|
||||
ComplexEigenSolver<MatrixXcd> ces(m1);
|
||||
SelfAdjointEigenSolver<MatrixXd> saes(m1+m1.transpose());
|
||||
GeneralizedSelfAdjointEigenSolver<MatrixXd>
|
||||
gsaes(m1+m1.transpose(),m2+m2.transpose());
|
||||
\endcode</td><td>\code
|
||||
?gees
|
||||
?gees
|
||||
?syev/?heev
|
||||
?syev/?heev,
|
||||
?potrf
|
||||
\endcode</td></tr>
|
||||
<tr class="alt"><td>Schur decomposition \n \c EIGEN_USE_LAPACKE \n \c EIGEN_USE_LAPACKE_STRICT </td><td>\code
|
||||
RealSchur<MatrixXd> schurR(m1);
|
||||
ComplexSchur<MatrixXcd> schurC(m1);
|
||||
\endcode</td><td>\code
|
||||
?gees
|
||||
\endcode</td></tr>
|
||||
<tr><td>Vector Math \n \c EIGEN_USE_MKL_VML </td><td>\code
|
||||
v2=v1.array().sin();
|
||||
v2=v1.array().asin();
|
||||
v2=v1.array().cos();
|
||||
v2=v1.array().acos();
|
||||
v2=v1.array().tan();
|
||||
v2=v1.array().exp();
|
||||
v2=v1.array().log();
|
||||
v2=v1.array().sqrt();
|
||||
v2=v1.array().square();
|
||||
v2=v1.array().pow(1.5);
|
||||
\endcode</td><td>\code
|
||||
v?Sin
|
||||
v?Asin
|
||||
v?Cos
|
||||
v?Acos
|
||||
v?Tan
|
||||
v?Exp
|
||||
v?Ln
|
||||
v?Sqrt
|
||||
v?Sqr
|
||||
v?Powx
|
||||
\endcode</td></tr>
|
||||
</table>
|
||||
In the examples, m1 and m2 are dense matrices and v1 and v2 are dense vectors.
|
||||
|
||||
|
||||
\section TopicUsingIntelMKL_Links Links
|
||||
- Intel MKL can be purchased and downloaded <a href="http://eigen.tuxfamily.org/Counter/redirect_to_mkl.php">here</a>.
|
||||
- Intel MKL is also bundled with <a href="http://software.intel.com/en-us/articles/intel-composer-xe/">Intel Composer XE</a>.
|
||||
|
||||
|
||||
*/
|
||||
|
||||
}
|
|
@ -1,56 +0,0 @@
|
|||
namespace Eigen {
|
||||
|
||||
/** \eigenManualPage TopicWrongStackAlignment Compiler making a wrong assumption on stack alignment
|
||||
|
||||
<h4>It appears that this was a GCC bug that has been fixed in GCC 4.5.
|
||||
If you hit this issue, please upgrade to GCC 4.5 and report to us, so we can update this page.</h4>
|
||||
|
||||
This is an issue that, so far, we met only with GCC on Windows: for instance, MinGW and TDM-GCC.
|
||||
|
||||
By default, in a function like this,
|
||||
|
||||
\code
|
||||
void foo()
|
||||
{
|
||||
Eigen::Quaternionf q;
|
||||
//...
|
||||
}
|
||||
\endcode
|
||||
|
||||
GCC assumes that the stack is already 16-byte-aligned so that the object \a q will be created at a 16-byte-aligned location. For this reason, it doesn't take any special care to explicitly align the object \a q, as Eigen requires.
|
||||
|
||||
The problem is that, in some particular cases, this assumption can be wrong on Windows, where the stack is only guaranteed to have 4-byte alignment. Indeed, even though GCC takes care of aligning the stack in the main function and does its best to keep it aligned, when a function is called from another thread or from a binary compiled with another compiler, the stack alignment can be corrupted. This results in the object 'q' being created at an unaligned location, making your program crash with the \ref TopicUnalignedArrayAssert "assertion on unaligned arrays". So far we found the three following solutions.
|
||||
|
||||
|
||||
\section sec_sol1 Local solution
|
||||
|
||||
A local solution is to mark such a function with this attribute:
|
||||
\code
|
||||
__attribute__((force_align_arg_pointer)) void foo()
|
||||
{
|
||||
Eigen::Quaternionf q;
|
||||
//...
|
||||
}
|
||||
\endcode
|
||||
Read <a href="http://gcc.gnu.org/onlinedocs/gcc-4.4.0/gcc/Function-Attributes.html#Function-Attributes">this GCC documentation</a> to understand what this does. Of course this should only be done on GCC on Windows, so for portability you'll have to encapsulate this in a macro which you leave empty on other platforms. The advantage of this solution is that you can finely select which function might have a corrupted stack alignment. Of course on the downside this has to be done for every such function, so you may prefer one of the following two global solutions.
|
||||
|
||||
|
||||
\section sec_sol2 Global solutions
|
||||
|
||||
A global solution is to edit your project so that when compiling with GCC on Windows, you pass this option to GCC:
|
||||
\code
|
||||
-mincoming-stack-boundary=2
|
||||
\endcode
|
||||
Explanation: this tells GCC that the stack is only required to be aligned to 2^2=4 bytes, so that GCC now knows that it really must take extra care to honor the 16 byte alignment of \ref TopicFixedSizeVectorizable "fixed-size vectorizable Eigen types" when needed.
|
||||
|
||||
Another global solution is to pass this option to gcc:
|
||||
\code
|
||||
-mstackrealign
|
||||
\endcode
|
||||
which has the same effect than adding the \c force_align_arg_pointer attribute to all functions.
|
||||
|
||||
These global solutions are easy to use, but note that they may slowdown your program because they lead to extra prologue/epilogue instructions for every function.
|
||||
|
||||
*/
|
||||
|
||||
}
|
|
@ -1,240 +0,0 @@
|
|||
|
||||
// generate a table of contents in the side-nav based on the h1/h2 tags of the current page.
|
||||
function generate_autotoc() {
|
||||
var headers = $("h1, h2");
|
||||
if(headers.length > 1) {
|
||||
var toc = $("#side-nav").append('<div id="nav-toc" class="toc"><h3>Table of contents</h3></div>');
|
||||
toc = $("#nav-toc");
|
||||
var footerHeight = footer.height();
|
||||
toc = toc.append('<ul></ul>');
|
||||
toc = toc.find('ul');
|
||||
var indices = new Array();
|
||||
indices[0] = 0;
|
||||
indices[1] = 0;
|
||||
|
||||
var h1counts = $("h1").length;
|
||||
headers.each(function(i) {
|
||||
var current = $(this);
|
||||
var levelTag = current[0].tagName.charAt(1);
|
||||
if(h1counts==0)
|
||||
levelTag--;
|
||||
var cur_id = current.attr("id");
|
||||
|
||||
indices[levelTag-1]+=1;
|
||||
var prefix = indices[0];
|
||||
if (levelTag >1) {
|
||||
prefix+="."+indices[1];
|
||||
}
|
||||
|
||||
// Uncomment to add number prefixes
|
||||
// current.html(prefix + " " + current.html());
|
||||
for(var l = levelTag; l < 2; ++l){
|
||||
indices[l] = 0;
|
||||
}
|
||||
|
||||
if(cur_id == undefined) {
|
||||
current.attr('id', 'title' + i);
|
||||
current.addClass('anchor');
|
||||
toc.append("<li class='level" + levelTag + "'><a id='link" + i + "' href='#title" +
|
||||
i + "' title='" + current.prop("tagName") + "'>" + current.text() + "</a></li>");
|
||||
} else {
|
||||
toc.append("<li class='level" + levelTag + "'><a id='" + cur_id + "' href='#title" +
|
||||
i + "' title='" + current.prop("tagName") + "'>" + current.text() + "</a></li>");
|
||||
}
|
||||
});
|
||||
resizeHeight();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
var global_navtree_object;
|
||||
|
||||
// Overloaded to remove links to sections/subsections
|
||||
function getNode(o, po)
|
||||
{
|
||||
po.childrenVisited = true;
|
||||
var l = po.childrenData.length-1;
|
||||
for (var i in po.childrenData) {
|
||||
var nodeData = po.childrenData[i];
|
||||
if((!nodeData[1]) || (nodeData[1].indexOf('#')==-1)) // <- we added this line
|
||||
po.children[i] = newNode(o, po, nodeData[0], nodeData[1], nodeData[2], i==l);
|
||||
}
|
||||
}
|
||||
|
||||
// Overloaded to adjust the size of the navtree wrt the toc
|
||||
function resizeHeight()
|
||||
{
|
||||
var toc = $("#nav-toc");
|
||||
var tocHeight = toc.height(); // <- we added this line
|
||||
var headerHeight = header.height();
|
||||
var footerHeight = footer.height();
|
||||
var windowHeight = $(window).height() - headerHeight - footerHeight;
|
||||
content.css({height:windowHeight + "px"});
|
||||
navtree.css({height:(windowHeight-tocHeight) + "px"}); // <- we modified this line
|
||||
sidenav.css({height:(windowHeight) + "px",top: headerHeight+"px"});
|
||||
}
|
||||
|
||||
// Overloaded to save the root node into global_navtree_object
|
||||
function initNavTree(toroot,relpath)
|
||||
{
|
||||
var o = new Object();
|
||||
global_navtree_object = o; // <- we added this line
|
||||
o.toroot = toroot;
|
||||
o.node = new Object();
|
||||
o.node.li = document.getElementById("nav-tree-contents");
|
||||
o.node.childrenData = NAVTREE;
|
||||
o.node.children = new Array();
|
||||
o.node.childrenUL = document.createElement("ul");
|
||||
o.node.getChildrenUL = function() { return o.node.childrenUL; };
|
||||
o.node.li.appendChild(o.node.childrenUL);
|
||||
o.node.depth = 0;
|
||||
o.node.relpath = relpath;
|
||||
o.node.expanded = false;
|
||||
o.node.isLast = true;
|
||||
o.node.plus_img = document.createElement("img");
|
||||
o.node.plus_img.src = relpath+"ftv2pnode.png";
|
||||
o.node.plus_img.width = 16;
|
||||
o.node.plus_img.height = 22;
|
||||
|
||||
if (localStorageSupported()) {
|
||||
var navSync = $('#nav-sync');
|
||||
if (cachedLink()) {
|
||||
showSyncOff(navSync,relpath);
|
||||
navSync.removeClass('sync');
|
||||
} else {
|
||||
showSyncOn(navSync,relpath);
|
||||
}
|
||||
navSync.click(function(){ toggleSyncButton(relpath); });
|
||||
}
|
||||
|
||||
navTo(o,toroot,window.location.hash,relpath);
|
||||
|
||||
$(window).bind('hashchange', function(){
|
||||
if (window.location.hash && window.location.hash.length>1){
|
||||
var a;
|
||||
if ($(location).attr('hash')){
|
||||
var clslink=stripPath($(location).attr('pathname'))+':'+
|
||||
$(location).attr('hash').substring(1);
|
||||
a=$('.item a[class$="'+clslink+'"]');
|
||||
}
|
||||
if (a==null || !$(a).parent().parent().hasClass('selected')){
|
||||
$('.item').removeClass('selected');
|
||||
$('.item').removeAttr('id');
|
||||
}
|
||||
var link=stripPath2($(location).attr('pathname'));
|
||||
navTo(o,link,$(location).attr('hash'),relpath);
|
||||
} else if (!animationInProgress) {
|
||||
$('#doc-content').scrollTop(0);
|
||||
$('.item').removeClass('selected');
|
||||
$('.item').removeAttr('id');
|
||||
navTo(o,toroot,window.location.hash,relpath);
|
||||
}
|
||||
})
|
||||
|
||||
$(window).load(showRoot);
|
||||
}
|
||||
|
||||
// return false if the the node has no children at all, or has only section/subsection children
|
||||
function checkChildrenData(node) {
|
||||
if (!(typeof(node.childrenData)==='string')) {
|
||||
for (var i in node.childrenData) {
|
||||
var url = node.childrenData[i][1];
|
||||
if(url.indexOf("#")==-1)
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
return (node.childrenData);
|
||||
}
|
||||
|
||||
// Modified to:
|
||||
// 1 - remove the root node
|
||||
// 2 - remove the section/subsection children
|
||||
function createIndent(o,domNode,node,level)
|
||||
{
|
||||
var level=-2; // <- we replaced level=-1 by level=-2
|
||||
var n = node;
|
||||
while (n.parentNode) { level++; n=n.parentNode; }
|
||||
var imgNode = document.createElement("img");
|
||||
imgNode.style.paddingLeft=(16*(level)).toString()+'px';
|
||||
imgNode.width = 16;
|
||||
imgNode.height = 22;
|
||||
imgNode.border = 0;
|
||||
if (checkChildrenData(node)) { // <- we modified this line to use checkChildrenData(node) instead of node.childrenData
|
||||
node.plus_img = imgNode;
|
||||
node.expandToggle = document.createElement("a");
|
||||
node.expandToggle.href = "javascript:void(0)";
|
||||
node.expandToggle.onclick = function() {
|
||||
if (node.expanded) {
|
||||
$(node.getChildrenUL()).slideUp("fast");
|
||||
node.plus_img.src = node.relpath+"ftv2pnode.png";
|
||||
node.expanded = false;
|
||||
} else {
|
||||
expandNode(o, node, false, false);
|
||||
}
|
||||
}
|
||||
node.expandToggle.appendChild(imgNode);
|
||||
domNode.appendChild(node.expandToggle);
|
||||
imgNode.src = node.relpath+"ftv2pnode.png";
|
||||
} else {
|
||||
imgNode.src = node.relpath+"ftv2node.png";
|
||||
domNode.appendChild(imgNode);
|
||||
}
|
||||
}
|
||||
|
||||
// Overloaded to automatically expand the selected node
|
||||
function selectAndHighlight(hash,n)
|
||||
{
|
||||
var a;
|
||||
if (hash) {
|
||||
var link=stripPath($(location).attr('pathname'))+':'+hash.substring(1);
|
||||
a=$('.item a[class$="'+link+'"]');
|
||||
}
|
||||
if (a && a.length) {
|
||||
a.parent().parent().addClass('selected');
|
||||
a.parent().parent().attr('id','selected');
|
||||
highlightAnchor();
|
||||
} else if (n) {
|
||||
$(n.itemDiv).addClass('selected');
|
||||
$(n.itemDiv).attr('id','selected');
|
||||
}
|
||||
if ($('#nav-tree-contents .item:first').hasClass('selected')) {
|
||||
$('#nav-sync').css('top','30px');
|
||||
} else {
|
||||
$('#nav-sync').css('top','5px');
|
||||
}
|
||||
expandNode(global_navtree_object, n, true, true); // <- we added this line
|
||||
showRoot();
|
||||
}
|
||||
|
||||
|
||||
$(document).ready(function() {
|
||||
|
||||
generate_autotoc();
|
||||
|
||||
(function (){ // wait until the first "selected" element has been created
|
||||
try {
|
||||
|
||||
// this line will triger an exception if there is no #selected element, i.e., before the tree structure is complete.
|
||||
document.getElementById("selected").className = "item selected";
|
||||
|
||||
// ok, the default tree has been created, we can keep going...
|
||||
|
||||
// expand the "Chapters" node
|
||||
if(window.location.href.indexOf('unsupported')==-1)
|
||||
expandNode(global_navtree_object, global_navtree_object.node.children[0].children[2], true, true);
|
||||
else
|
||||
expandNode(global_navtree_object, global_navtree_object.node.children[0].children[1], true, true);
|
||||
|
||||
// Hide the root node "Eigen"
|
||||
$(document.getElementsByClassName('index.html')[0]).parent().parent().css({display:"none"});
|
||||
|
||||
} catch (err) {
|
||||
setTimeout(arguments.callee, 10);
|
||||
}
|
||||
})();
|
||||
});
|
||||
|
||||
$(window).load(function() {
|
||||
resizeHeight();
|
||||
});
|
|
@ -1,211 +0,0 @@
|
|||
|
||||
/******** Eigen specific CSS code ************/
|
||||
|
||||
/**** Styles removing elements ****/
|
||||
|
||||
/* remove the "modules|classes" link for module pages (they are already in the TOC) */
|
||||
div.summary {
|
||||
display:none;
|
||||
}
|
||||
|
||||
/* remove */
|
||||
div.contents hr {
|
||||
display:none;
|
||||
}
|
||||
|
||||
/**** ****/
|
||||
|
||||
p, dl.warning, dl.attention, dl.note
|
||||
{
|
||||
max-width:60em;
|
||||
text-align:justify;
|
||||
}
|
||||
|
||||
li {
|
||||
max-width:55em;
|
||||
text-align:justify;
|
||||
}
|
||||
|
||||
img {
|
||||
border: 0;
|
||||
}
|
||||
|
||||
div.fragment {
|
||||
display:table; /* this allows the element to be larger than its parent */
|
||||
padding: 0pt;
|
||||
}
|
||||
pre.fragment {
|
||||
border: 1px solid #cccccc;
|
||||
|
||||
margin: 2px 0px 2px 0px;
|
||||
padding: 3px 5px 3px 5px;
|
||||
}
|
||||
|
||||
|
||||
|
||||
/* Common style for all Eigen's tables */
|
||||
|
||||
table.example, table.manual, table.manual-vl {
|
||||
max-width:100%;
|
||||
border-collapse: collapse;
|
||||
border-style: solid;
|
||||
border-width: 1px;
|
||||
border-color: #cccccc;
|
||||
font-size: 1em;
|
||||
|
||||
box-shadow: 5px 5px 5px rgba(0, 0, 0, 0.15);
|
||||
-moz-box-shadow: 5px 5px 5px rgba(0, 0, 0, 0.15);
|
||||
-webkit-box-shadow: 5px 5px 5px rgba(0, 0, 0, 0.15);
|
||||
}
|
||||
|
||||
table.example th, table.manual th, table.manual-vl th {
|
||||
padding: 0.5em 0.5em 0.5em 0.5em;
|
||||
text-align: left;
|
||||
padding-right: 1em;
|
||||
color: #555555;
|
||||
background-color: #F4F4E5;
|
||||
|
||||
background-image: -webkit-gradient(linear,center top,center bottom,from(#FFFFFF), color-stop(0.3,#FFFFFF), color-stop(0.30,#FFFFFF), color-stop(0.98,#F4F4E5), to(#ECECDE));
|
||||
background-image: -moz-linear-gradient(center top, #FFFFFF 0%, #FFFFFF 30%, #F4F4E5 98%, #ECECDE);
|
||||
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#FFFFFF', endColorstr='#F4F4E5');
|
||||
}
|
||||
|
||||
table.example td, table.manual td, table.manual-vl td {
|
||||
vertical-align:top;
|
||||
border-width: 1px;
|
||||
border-color: #cccccc;
|
||||
}
|
||||
|
||||
/* header of headers */
|
||||
table th.meta {
|
||||
text-align:center;
|
||||
font-size: 1.2em;
|
||||
background-color:#FFFFFF;
|
||||
}
|
||||
|
||||
/* intermediate header */
|
||||
table th.inter {
|
||||
text-align:left;
|
||||
background-color:#FFFFFF;
|
||||
background-image:none;
|
||||
border-style:solid solid solid solid;
|
||||
border-width: 1px;
|
||||
border-color: #cccccc;
|
||||
}
|
||||
|
||||
/** class for exemple / output tables **/
|
||||
|
||||
table.example {
|
||||
}
|
||||
|
||||
table.example th {
|
||||
}
|
||||
|
||||
table.example td {
|
||||
padding: 0.5em 0.5em 0.5em 0.5em;
|
||||
vertical-align:top;
|
||||
}
|
||||
|
||||
/* standard class for the manual */
|
||||
|
||||
table.manual, table.manual-vl {
|
||||
padding: 0.2em 0em 0.5em 0em;
|
||||
}
|
||||
|
||||
table.manual th, table.manual-vl th {
|
||||
margin: 0em 0em 0.3em 0em;
|
||||
}
|
||||
|
||||
table.manual td, table.manual-vl td {
|
||||
padding: 0.3em 0.5em 0.3em 0.5em;
|
||||
vertical-align:top;
|
||||
border-width: 1px;
|
||||
}
|
||||
|
||||
table.manual td.alt, table.manual tr.alt, table.manual-vl td.alt, table.manual-vl tr.alt {
|
||||
background-color: #F4F4E5;
|
||||
}
|
||||
|
||||
table.manual-vl th, table.manual-vl td, table.manual-vl td.alt {
|
||||
border-color: #cccccc;
|
||||
border-width: 1px;
|
||||
border-style: none solid none solid;
|
||||
}
|
||||
|
||||
table.manual-vl th.inter {
|
||||
border-style: solid solid solid solid;
|
||||
}
|
||||
|
||||
h2 {
|
||||
margin-top:2em;
|
||||
border-style: none none solid none;
|
||||
border-width: 1px;
|
||||
border-color: #cccccc;
|
||||
}
|
||||
|
||||
/**** Table of content in the side-nav ****/
|
||||
|
||||
|
||||
div.toc {
|
||||
margin:0;
|
||||
padding: 0.3em 0 0 0;
|
||||
width:100%;
|
||||
float:none;
|
||||
position:absolute;
|
||||
bottom:0;
|
||||
border-radius:0px;
|
||||
border-style: solid none none none;
|
||||
}
|
||||
|
||||
div.toc h3 {
|
||||
margin-left: 0.5em;
|
||||
margin-bottom: 0.2em;
|
||||
}
|
||||
|
||||
div.toc ul {
|
||||
margin: 0.2em 0 0.4em 0.5em;
|
||||
}
|
||||
|
||||
/**** old Eigen's styles ****/
|
||||
|
||||
|
||||
table.tutorial_code td {
|
||||
border-color: transparent; /* required for Firefox */
|
||||
padding: 3pt 5pt 3pt 5pt;
|
||||
vertical-align: top;
|
||||
}
|
||||
|
||||
|
||||
/* Whenever doxygen meets a '\n' or a '<BR/>', it will put
|
||||
* the text containing the characted into a <p class="starttd">.
|
||||
* This little hack togehter with table.tutorial_code td.note
|
||||
* aims at fixing this issue. */
|
||||
table.tutorial_code td.note p.starttd {
|
||||
margin: 0px;
|
||||
border: none;
|
||||
padding: 0px;
|
||||
}
|
||||
|
||||
div.eimainmenu {
|
||||
text-align: center;
|
||||
}
|
||||
|
||||
/* center version number on main page */
|
||||
h3.version {
|
||||
text-align: center;
|
||||
}
|
||||
|
||||
|
||||
td.width20em p.endtd {
|
||||
width: 20em;
|
||||
}
|
||||
|
||||
.bigwarning {
|
||||
font-size:2em;
|
||||
font-weight:bold;
|
||||
margin:1em;
|
||||
padding:1em;
|
||||
color:red;
|
||||
border:solid;
|
||||
}
|
||||
|
|
@ -1,36 +0,0 @@
|
|||
<!-- start footer part -->
|
||||
<!--BEGIN GENERATE_TREEVIEW-->
|
||||
<div id="nav-path" class="navpath"><!-- id is needed for treeview function! -->
|
||||
<ul>
|
||||
$navpath
|
||||
<li class="footer">$generatedby
|
||||
<a href="http://www.doxygen.org/index.html">
|
||||
<img class="footer" src="$relpath$doxygen.png" alt="doxygen"/></a> $doxygenversion </li>
|
||||
</ul>
|
||||
</div>
|
||||
<!--END GENERATE_TREEVIEW-->
|
||||
<!--BEGIN !GENERATE_TREEVIEW-->
|
||||
<hr class="footer"/><address class="footer"><small>
|
||||
$generatedby  <a href="http://www.doxygen.org/index.html">
|
||||
<img class="footer" src="$relpath$doxygen.png" alt="doxygen"/>
|
||||
</a> $doxygenversion
|
||||
</small></address>
|
||||
<!--END !GENERATE_TREEVIEW-->
|
||||
|
||||
<!-- Piwik -->
|
||||
<script type="text/javascript">
|
||||
var pkBaseURL = (("https:" == document.location.protocol) ? "https://stats.sylphide-consulting.com/piwik/" : "http://stats.sylphide-consulting.com/piwik/");
|
||||
document.write(unescape("%3Cscript src='" + pkBaseURL + "piwik.js' type='text/javascript'%3E%3C/script%3E"));
|
||||
</script><script type="text/javascript">
|
||||
try {
|
||||
var piwikTracker = Piwik.getTracker(pkBaseURL + "piwik.php", 20);
|
||||
piwikTracker.trackPageView();
|
||||
piwikTracker.enableLinkTracking();
|
||||
} catch( err ) {}
|
||||
</script><noscript><p><img src="http://stats.sylphide-consulting.com/piwik/piwik.php?idsite=20" style="border:0" alt="" /></p></noscript>
|
||||
<!-- End Piwik Tracking Code -->
|
||||
|
||||
</body>
|
||||
</html>
|
||||
|
||||
|
|
@ -1,61 +0,0 @@
|
|||
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
|
||||
<html xmlns="http://www.w3.org/1999/xhtml">
|
||||
<head>
|
||||
<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
|
||||
<meta http-equiv="X-UA-Compatible" content="IE=9"/>
|
||||
<meta name="generator" content="Doxygen $doxygenversion"/>
|
||||
<!--BEGIN PROJECT_NAME--><title>$projectname: $title</title><!--END PROJECT_NAME-->
|
||||
<!--BEGIN !PROJECT_NAME--><title>$title</title><!--END !PROJECT_NAME-->
|
||||
<link href="$relpath$tabs.css" rel="stylesheet" type="text/css"/>
|
||||
<script type="text/javascript" src="$relpath$jquery.js"></script>
|
||||
<script type="text/javascript" src="$relpath$dynsections.js"></script>
|
||||
$treeview
|
||||
$search
|
||||
$mathjax
|
||||
<link href="$relpath$$stylesheet" rel="stylesheet" type="text/css" />
|
||||
<link href="$relpath$eigendoxy.css" rel="stylesheet" type="text/css">
|
||||
<!-- $extrastylesheet -->
|
||||
<script type="text/javascript" src="$relpath$eigen_navtree_hacks.js"></script>
|
||||
<!-- <script type="text/javascript"> -->
|
||||
<!-- </script> -->
|
||||
|
||||
</head>
|
||||
<body>
|
||||
<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
|
||||
<!-- <a name="top"></a> -->
|
||||
|
||||
<!--BEGIN TITLEAREA-->
|
||||
<div id="titlearea">
|
||||
<table cellspacing="0" cellpadding="0">
|
||||
<tbody>
|
||||
<tr style="height: 56px;">
|
||||
<!--BEGIN PROJECT_LOGO-->
|
||||
<td id="projectlogo"><img alt="Logo" src="$relpath$$projectlogo"/></td>
|
||||
<!--END PROJECT_LOGO-->
|
||||
<!--BEGIN PROJECT_NAME-->
|
||||
<td style="padding-left: 0.5em;">
|
||||
<div id="projectname"><a href="http://eigen.tuxfamily.org">$projectname</a>
|
||||
<!--BEGIN PROJECT_NUMBER--> <span id="projectnumber">$projectnumber</span><!--END PROJECT_NUMBER-->
|
||||
</div>
|
||||
<!--BEGIN PROJECT_BRIEF--><div id="projectbrief">$projectbrief</div><!--END PROJECT_BRIEF-->
|
||||
</td>
|
||||
<!--END PROJECT_NAME-->
|
||||
<!--BEGIN !PROJECT_NAME-->
|
||||
<!--BEGIN PROJECT_BRIEF-->
|
||||
<td style="padding-left: 0.5em;">
|
||||
<div id="projectbrief">$projectbrief</div>
|
||||
</td>
|
||||
<!--END PROJECT_BRIEF-->
|
||||
<!--END !PROJECT_NAME-->
|
||||
<!--BEGIN DISABLE_INDEX-->
|
||||
<!--BEGIN SEARCHENGINE-->
|
||||
<td>$searchbox</td>
|
||||
<!--END SEARCHENGINE-->
|
||||
<!--END DISABLE_INDEX-->
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
<!--END TITLEAREA-->
|
||||
<!-- end header part -->
|
||||
|
|
@ -1,178 +0,0 @@
|
|||
<?xml version="1.0"?>
|
||||
<doxygenlayout version="1.0">
|
||||
<!-- Navigation index tabs for HTML output -->
|
||||
<navindex>
|
||||
<tab type="user" url="index.html" title="Overview" />
|
||||
<tab type="user" url="@ref GettingStarted" title="Getting started" />
|
||||
<tab type="modules" visible="yes" title="Chapters" intro=""/>
|
||||
<tab type="mainpage" visible="yes" title=""/>
|
||||
<tab type="classlist" visible="yes" title="" intro=""/>
|
||||
<!-- <tab type="classmembers" visible="yes" title="" intro=""/> -->
|
||||
</navindex>
|
||||
|
||||
<!-- Layout definition for a class page -->
|
||||
<class>
|
||||
<briefdescription visible="no"/>
|
||||
<includes visible="$SHOW_INCLUDE_FILES"/>
|
||||
<detaileddescription title=""/>
|
||||
<inheritancegraph visible="$CLASS_GRAPH"/>
|
||||
<collaborationgraph visible="$COLLABORATION_GRAPH"/>
|
||||
<allmemberslink visible="yes"/>
|
||||
<memberdecl>
|
||||
<nestedclasses visible="yes" title=""/>
|
||||
<publictypes title=""/>
|
||||
<publicslots title=""/>
|
||||
<signals title=""/>
|
||||
<publicmethods title=""/>
|
||||
<publicstaticmethods title=""/>
|
||||
<publicattributes title=""/>
|
||||
<publicstaticattributes title=""/>
|
||||
<protectedtypes title=""/>
|
||||
<protectedslots title=""/>
|
||||
<protectedmethods title=""/>
|
||||
<protectedstaticmethods title=""/>
|
||||
<protectedattributes title=""/>
|
||||
<protectedstaticattributes title=""/>
|
||||
<packagetypes title=""/>
|
||||
<packagemethods title=""/>
|
||||
<packagestaticmethods title=""/>
|
||||
<packageattributes title=""/>
|
||||
<packagestaticattributes title=""/>
|
||||
<properties title=""/>
|
||||
<events title=""/>
|
||||
<privatetypes title=""/>
|
||||
<privateslots title=""/>
|
||||
<privatemethods title=""/>
|
||||
<privatestaticmethods title=""/>
|
||||
<privateattributes title=""/>
|
||||
<privatestaticattributes title=""/>
|
||||
<friends title=""/>
|
||||
<related title="" subtitle=""/>
|
||||
<membergroups visible="yes"/>
|
||||
</memberdecl>
|
||||
|
||||
<memberdef>
|
||||
<inlineclasses title=""/>
|
||||
<typedefs title=""/>
|
||||
<enums title=""/>
|
||||
<constructors title=""/>
|
||||
<functions title=""/>
|
||||
<related title=""/>
|
||||
<variables title=""/>
|
||||
<properties title=""/>
|
||||
<events title=""/>
|
||||
</memberdef>
|
||||
<usedfiles visible="$SHOW_USED_FILES"/>
|
||||
<authorsection visible="yes"/>
|
||||
</class>
|
||||
|
||||
<!-- Layout definition for a namespace page -->
|
||||
<namespace>
|
||||
<briefdescription visible="yes"/>
|
||||
<memberdecl>
|
||||
<nestednamespaces visible="yes" title=""/>
|
||||
<classes visible="yes" title=""/>
|
||||
<typedefs title=""/>
|
||||
<enums title=""/>
|
||||
<functions title=""/>
|
||||
<variables title=""/>
|
||||
<membergroups visible="yes"/>
|
||||
</memberdecl>
|
||||
<detaileddescription title=""/>
|
||||
<memberdef>
|
||||
<inlineclasses title=""/>
|
||||
<typedefs title=""/>
|
||||
<enums title=""/>
|
||||
<functions title=""/>
|
||||
<variables title=""/>
|
||||
</memberdef>
|
||||
<authorsection visible="yes"/>
|
||||
</namespace>
|
||||
|
||||
<!-- Layout definition for a file page -->
|
||||
<file>
|
||||
<briefdescription visible="yes"/>
|
||||
<includes visible="$SHOW_INCLUDE_FILES"/>
|
||||
<includegraph visible="$INCLUDE_GRAPH"/>
|
||||
<includedbygraph visible="$INCLUDED_BY_GRAPH"/>
|
||||
<sourcelink visible="yes"/>
|
||||
<memberdecl>
|
||||
<classes visible="yes" title=""/>
|
||||
<namespaces visible="yes" title=""/>
|
||||
<defines title=""/>
|
||||
<typedefs title=""/>
|
||||
<enums title=""/>
|
||||
<functions title=""/>
|
||||
<variables title=""/>
|
||||
<membergroups visible="yes"/>
|
||||
</memberdecl>
|
||||
<detaileddescription title=""/>
|
||||
<memberdef>
|
||||
<inlineclasses title=""/>
|
||||
<defines title=""/>
|
||||
<typedefs title=""/>
|
||||
<enums title=""/>
|
||||
<functions title=""/>
|
||||
<variables title=""/>
|
||||
</memberdef>
|
||||
<authorsection/>
|
||||
</file>
|
||||
|
||||
<!-- Layout definition for a group page -->
|
||||
<group>
|
||||
<briefdescription visible="no"/>
|
||||
<detaileddescription title=""/>
|
||||
<groupgraph visible="$GROUP_GRAPHS"/>
|
||||
<memberdecl>
|
||||
<nestedgroups visible="yes" title=""/>
|
||||
<dirs visible="yes" title=""/>
|
||||
<files visible="yes" title=""/>
|
||||
<namespaces visible="yes" title=""/>
|
||||
<classes visible="yes" title=""/>
|
||||
<defines title=""/>
|
||||
<typedefs title=""/>
|
||||
<enums title=""/>
|
||||
<enumvalues title=""/>
|
||||
<functions title=""/>
|
||||
<variables title=""/>
|
||||
<signals title=""/>
|
||||
<publicslots title=""/>
|
||||
<protectedslots title=""/>
|
||||
<privateslots title=""/>
|
||||
<events title=""/>
|
||||
<properties title=""/>
|
||||
<friends title=""/>
|
||||
<membergroups visible="yes"/>
|
||||
</memberdecl>
|
||||
|
||||
<memberdef>
|
||||
<pagedocs/>
|
||||
<inlineclasses title=""/>
|
||||
<defines title=""/>
|
||||
<typedefs title=""/>
|
||||
<enums title=""/>
|
||||
<enumvalues title=""/>
|
||||
<functions title=""/>
|
||||
<variables title=""/>
|
||||
<signals title=""/>
|
||||
<publicslots title=""/>
|
||||
<protectedslots title=""/>
|
||||
<privateslots title=""/>
|
||||
<events title=""/>
|
||||
<properties title=""/>
|
||||
<friends title=""/>
|
||||
</memberdef>
|
||||
<authorsection visible="yes"/>
|
||||
</group>
|
||||
|
||||
<!-- Layout definition for a directory page -->
|
||||
<directory>
|
||||
<briefdescription visible="yes"/>
|
||||
<directorygraph visible="yes"/>
|
||||
<memberdecl>
|
||||
<dirs visible="yes"/>
|
||||
<files visible="yes"/>
|
||||
</memberdecl>
|
||||
<detaileddescription title=""/>
|
||||
</directory>
|
||||
</doxygenlayout>
|
|
@ -1,59 +0,0 @@
|
|||
.tabs, .tabs2, .tabs3 {
|
||||
background-image: url('tab_b.png');
|
||||
width: 100%;
|
||||
z-index: 101;
|
||||
font-size: 13px;
|
||||
}
|
||||
|
||||
.tabs2 {
|
||||
font-size: 10px;
|
||||
}
|
||||
.tabs3 {
|
||||
font-size: 9px;
|
||||
}
|
||||
|
||||
.tablist {
|
||||
margin: 0;
|
||||
padding: 0;
|
||||
display: table;
|
||||
}
|
||||
|
||||
.tablist li {
|
||||
float: left;
|
||||
display: table-cell;
|
||||
background-image: url('tab_b.png');
|
||||
line-height: 36px;
|
||||
list-style: none;
|
||||
}
|
||||
|
||||
.tablist a {
|
||||
display: block;
|
||||
padding: 0 20px;
|
||||
font-weight: bold;
|
||||
background-image:url('tab_s.png');
|
||||
background-repeat:no-repeat;
|
||||
background-position:right;
|
||||
color: #283A5D;
|
||||
text-shadow: 0px 1px 1px rgba(255, 255, 255, 0.9);
|
||||
text-decoration: none;
|
||||
outline: none;
|
||||
}
|
||||
|
||||
.tabs3 .tablist a {
|
||||
padding: 0 10px;
|
||||
}
|
||||
|
||||
.tablist a:hover {
|
||||
background-image: url('tab_h.png');
|
||||
background-repeat:repeat-x;
|
||||
color: #fff;
|
||||
text-shadow: 0px 1px 1px rgba(0, 0, 0, 1.0);
|
||||
text-decoration: none;
|
||||
}
|
||||
|
||||
.tablist li.current a {
|
||||
background-image: url('tab_a.png');
|
||||
background-repeat:repeat-x;
|
||||
color: #fff;
|
||||
text-shadow: 0px 1px 1px rgba(0, 0, 0, 1.0);
|
||||
}
|
|
@ -1,2 +0,0 @@
|
|||
EXCLUDE copyright
|
||||
EXCLUDE license
|
|
@ -1,16 +0,0 @@
|
|||
file(GLOB examples_SRCS "*.cpp")
|
||||
|
||||
foreach(example_src ${examples_SRCS})
|
||||
get_filename_component(example ${example_src} NAME_WE)
|
||||
add_executable(${example} ${example_src})
|
||||
if(EIGEN_STANDARD_LIBRARIES_TO_LINK_TO)
|
||||
target_link_libraries(${example} ${EIGEN_STANDARD_LIBRARIES_TO_LINK_TO})
|
||||
endif()
|
||||
add_custom_command(
|
||||
TARGET ${example}
|
||||
POST_BUILD
|
||||
COMMAND ${example}
|
||||
ARGS >${CMAKE_CURRENT_BINARY_DIR}/${example}.out
|
||||
)
|
||||
add_dependencies(all_examples ${example})
|
||||
endforeach(example_src)
|
|
@ -1,15 +0,0 @@
|
|||
#include <Eigen/Core>
|
||||
#include <iostream>
|
||||
|
||||
using namespace Eigen;
|
||||
using namespace std;
|
||||
|
||||
int main(void)
|
||||
{
|
||||
int const N = 5;
|
||||
MatrixXi A(N,N);
|
||||
A.setRandom();
|
||||
cout << "A =\n" << A << '\n' << endl;
|
||||
cout << "A(1..3,:) =\n" << A.middleCols(1,3) << endl;
|
||||
return 0;
|
||||
}
|
|
@ -1,15 +0,0 @@
|
|||
#include <Eigen/Core>
|
||||
#include <iostream>
|
||||
|
||||
using namespace Eigen;
|
||||
using namespace std;
|
||||
|
||||
int main(void)
|
||||
{
|
||||
int const N = 5;
|
||||
MatrixXi A(N,N);
|
||||
A.setRandom();
|
||||
cout << "A =\n" << A << '\n' << endl;
|
||||
cout << "A(2..3,:) =\n" << A.middleRows(2,2) << endl;
|
||||
return 0;
|
||||
}
|
|
@ -1,15 +0,0 @@
|
|||
#include <Eigen/Core>
|
||||
#include <iostream>
|
||||
|
||||
using namespace Eigen;
|
||||
using namespace std;
|
||||
|
||||
int main(void)
|
||||
{
|
||||
int const N = 5;
|
||||
MatrixXi A(N,N);
|
||||
A.setRandom();
|
||||
cout << "A =\n" << A << '\n' << endl;
|
||||
cout << "A(:,1..3) =\n" << A.middleCols<3>(1) << endl;
|
||||
return 0;
|
||||
}
|
|
@ -1,15 +0,0 @@
|
|||
#include <Eigen/Core>
|
||||
#include <iostream>
|
||||
|
||||
using namespace Eigen;
|
||||
using namespace std;
|
||||
|
||||
int main(void)
|
||||
{
|
||||
int const N = 5;
|
||||
MatrixXi A(N,N);
|
||||
A.setRandom();
|
||||
cout << "A =\n" << A << '\n' << endl;
|
||||
cout << "A(1..3,:) =\n" << A.middleRows<3>(1) << endl;
|
||||
return 0;
|
||||
}
|
|
@ -1,18 +0,0 @@
|
|||
#define EIGEN2_SUPPORT
|
||||
#include <Eigen/Core>
|
||||
#include <iostream>
|
||||
|
||||
using namespace Eigen;
|
||||
using namespace std;
|
||||
|
||||
int main()
|
||||
{
|
||||
Matrix3i m = Matrix3i::Random();
|
||||
cout << "Here is the matrix m:" << endl << m << endl;
|
||||
Matrix3i n = Matrix3i::Random();
|
||||
cout << "And here is the matrix n:" << endl << n << endl;
|
||||
cout << "The coefficient-wise product of m and n is:" << endl;
|
||||
cout << m.cwise() * n << endl;
|
||||
cout << "Taking the cube of the coefficients of m yields:" << endl;
|
||||
cout << m.cwise().pow(3) << endl;
|
||||
}
|
|
@ -1,14 +0,0 @@
|
|||
#include <iostream>
|
||||
#include <Eigen/Dense>
|
||||
|
||||
using Eigen::MatrixXd;
|
||||
|
||||
int main()
|
||||
{
|
||||
MatrixXd m(2,2);
|
||||
m(0,0) = 3;
|
||||
m(1,0) = 2.5;
|
||||
m(0,1) = -1;
|
||||
m(1,1) = m(1,0) + m(0,1);
|
||||
std::cout << m << std::endl;
|
||||
}
|
|
@ -1,15 +0,0 @@
|
|||
#include <iostream>
|
||||
#include <Eigen/Dense>
|
||||
|
||||
using namespace Eigen;
|
||||
using namespace std;
|
||||
|
||||
int main()
|
||||
{
|
||||
MatrixXd m = MatrixXd::Random(3,3);
|
||||
m = (m + MatrixXd::Constant(3,3,1.2)) * 50;
|
||||
cout << "m =" << endl << m << endl;
|
||||
VectorXd v(3);
|
||||
v << 1, 2, 3;
|
||||
cout << "m * v =" << endl << m * v << endl;
|
||||
}
|
|
@ -1,15 +0,0 @@
|
|||
#include <iostream>
|
||||
#include <Eigen/Dense>
|
||||
|
||||
using namespace Eigen;
|
||||
using namespace std;
|
||||
|
||||
int main()
|
||||
{
|
||||
Matrix3d m = Matrix3d::Random();
|
||||
m = (m + Matrix3d::Constant(1.2)) * 50;
|
||||
cout << "m =" << endl << m << endl;
|
||||
Vector3d v(1,2,3);
|
||||
|
||||
cout << "m * v =" << endl << m * v << endl;
|
||||
}
|
|
@ -1,22 +0,0 @@
|
|||
#include <Eigen/Dense>
|
||||
#include <iostream>
|
||||
|
||||
using namespace Eigen;
|
||||
|
||||
template <typename Derived1, typename Derived2>
|
||||
void copyUpperTriangularPart(MatrixBase<Derived1>& dst, const MatrixBase<Derived2>& src)
|
||||
{
|
||||
/* Note the 'template' keywords in the following line! */
|
||||
dst.template triangularView<Upper>() = src.template triangularView<Upper>();
|
||||
}
|
||||
|
||||
int main()
|
||||
{
|
||||
MatrixXi m1 = MatrixXi::Ones(5,5);
|
||||
MatrixXi m2 = MatrixXi::Random(4,4);
|
||||
std::cout << "m2 before copy:" << std::endl;
|
||||
std::cout << m2 << std::endl << std::endl;
|
||||
copyUpperTriangularPart(m2, m1.topLeftCorner(4,4));
|
||||
std::cout << "m2 after copy:" << std::endl;
|
||||
std::cout << m2 << std::endl << std::endl;
|
||||
}
|
|
@ -1,20 +0,0 @@
|
|||
#include <Eigen/Dense>
|
||||
#include <iostream>
|
||||
|
||||
using namespace Eigen;
|
||||
|
||||
void copyUpperTriangularPart(MatrixXf& dst, const MatrixXf& src)
|
||||
{
|
||||
dst.triangularView<Upper>() = src.triangularView<Upper>();
|
||||
}
|
||||
|
||||
int main()
|
||||
{
|
||||
MatrixXf m1 = MatrixXf::Ones(4,4);
|
||||
MatrixXf m2 = MatrixXf::Random(4,4);
|
||||
std::cout << "m2 before copy:" << std::endl;
|
||||
std::cout << m2 << std::endl << std::endl;
|
||||
copyUpperTriangularPart(m2, m1);
|
||||
std::cout << "m2 after copy:" << std::endl;
|
||||
std::cout << m2 << std::endl << std::endl;
|
||||
}
|
|
@ -1,23 +0,0 @@
|
|||
#include <iostream>
|
||||
#include <Eigen/Dense>
|
||||
|
||||
using namespace std;
|
||||
using namespace Eigen;
|
||||
|
||||
int main()
|
||||
{
|
||||
Matrix2f A, b;
|
||||
LLT<Matrix2f> llt;
|
||||
A << 2, -1, -1, 3;
|
||||
b << 1, 2, 3, 1;
|
||||
cout << "Here is the matrix A:\n" << A << endl;
|
||||
cout << "Here is the right hand side b:\n" << b << endl;
|
||||
cout << "Computing LLT decomposition..." << endl;
|
||||
llt.compute(A);
|
||||
cout << "The solution is:\n" << llt.solve(b) << endl;
|
||||
A(1,1)++;
|
||||
cout << "The matrix A is now:\n" << A << endl;
|
||||
cout << "Computing LLT decomposition..." << endl;
|
||||
llt.compute(A);
|
||||
cout << "The solution is now:\n" << llt.solve(b) << endl;
|
||||
}
|
|
@ -1,14 +0,0 @@
|
|||
#include <iostream>
|
||||
#include <Eigen/Dense>
|
||||
|
||||
using namespace std;
|
||||
using namespace Eigen;
|
||||
|
||||
int main()
|
||||
{
|
||||
MatrixXd A = MatrixXd::Random(100,100);
|
||||
MatrixXd b = MatrixXd::Random(100,50);
|
||||
MatrixXd x = A.fullPivLu().solve(b);
|
||||
double relative_error = (A*x - b).norm() / b.norm(); // norm() is L2 norm
|
||||
cout << "The relative error is:\n" << relative_error << endl;
|
||||
}
|
|
@ -1,17 +0,0 @@
|
|||
#include <iostream>
|
||||
#include <Eigen/Dense>
|
||||
|
||||
using namespace std;
|
||||
using namespace Eigen;
|
||||
|
||||
int main()
|
||||
{
|
||||
Matrix3f A;
|
||||
Vector3f b;
|
||||
A << 1,2,3, 4,5,6, 7,8,10;
|
||||
b << 3, 3, 4;
|
||||
cout << "Here is the matrix A:\n" << A << endl;
|
||||
cout << "Here is the vector b:\n" << b << endl;
|
||||
Vector3f x = A.colPivHouseholderQr().solve(b);
|
||||
cout << "The solution is:\n" << x << endl;
|
||||
}
|
|
@ -1,16 +0,0 @@
|
|||
#include <iostream>
|
||||
#include <Eigen/Dense>
|
||||
|
||||
using namespace std;
|
||||
using namespace Eigen;
|
||||
|
||||
int main()
|
||||
{
|
||||
Matrix2f A, b;
|
||||
A << 2, -1, -1, 3;
|
||||
b << 1, 2, 3, 1;
|
||||
cout << "Here is the matrix A:\n" << A << endl;
|
||||
cout << "Here is the right hand side b:\n" << b << endl;
|
||||
Matrix2f x = A.ldlt().solve(b);
|
||||
cout << "The solution is:\n" << x << endl;
|
||||
}
|
|
@ -1,16 +0,0 @@
|
|||
#include <iostream>
|
||||
#include <Eigen/Dense>
|
||||
|
||||
using namespace std;
|
||||
using namespace Eigen;
|
||||
|
||||
int main()
|
||||
{
|
||||
Matrix3f A;
|
||||
A << 1, 2, 1,
|
||||
2, 1, 0,
|
||||
-1, 1, 2;
|
||||
cout << "Here is the matrix A:\n" << A << endl;
|
||||
cout << "The determinant of A is " << A.determinant() << endl;
|
||||
cout << "The inverse of A is:\n" << A.inverse() << endl;
|
||||
}
|
|
@ -1,20 +0,0 @@
|
|||
#include <iostream>
|
||||
#include <Eigen/Dense>
|
||||
|
||||
using namespace std;
|
||||
using namespace Eigen;
|
||||
|
||||
int main()
|
||||
{
|
||||
Matrix3f A;
|
||||
A << 1, 2, 5,
|
||||
2, 1, 4,
|
||||
3, 0, 3;
|
||||
cout << "Here is the matrix A:\n" << A << endl;
|
||||
FullPivLU<Matrix3f> lu_decomp(A);
|
||||
cout << "The rank of A is " << lu_decomp.rank() << endl;
|
||||
cout << "Here is a matrix whose columns form a basis of the null-space of A:\n"
|
||||
<< lu_decomp.kernel() << endl;
|
||||
cout << "Here is a matrix whose columns form a basis of the column-space of A:\n"
|
||||
<< lu_decomp.image(A) << endl; // yes, have to pass the original A
|
||||
}
|
|
@ -1,15 +0,0 @@
|
|||
#include <iostream>
|
||||
#include <Eigen/Dense>
|
||||
|
||||
using namespace std;
|
||||
using namespace Eigen;
|
||||
|
||||
int main()
|
||||
{
|
||||
MatrixXf A = MatrixXf::Random(3, 2);
|
||||
cout << "Here is the matrix A:\n" << A << endl;
|
||||
VectorXf b = VectorXf::Random(3);
|
||||
cout << "Here is the right hand side b:\n" << b << endl;
|
||||
cout << "The least-squares solution is:\n"
|
||||
<< A.jacobiSvd(ComputeThinU | ComputeThinV).solve(b) << endl;
|
||||
}
|
|
@ -1,18 +0,0 @@
|
|||
#include <iostream>
|
||||
#include <Eigen/Dense>
|
||||
|
||||
using namespace std;
|
||||
using namespace Eigen;
|
||||
|
||||
int main()
|
||||
{
|
||||
Matrix2f A;
|
||||
A << 1, 2, 2, 3;
|
||||
cout << "Here is the matrix A:\n" << A << endl;
|
||||
SelfAdjointEigenSolver<Matrix2f> eigensolver(A);
|
||||
if (eigensolver.info() != Success) abort();
|
||||
cout << "The eigenvalues of A are:\n" << eigensolver.eigenvalues() << endl;
|
||||
cout << "Here's a matrix whose columns are eigenvectors of A \n"
|
||||
<< "corresponding to these eigenvalues:\n"
|
||||
<< eigensolver.eigenvectors() << endl;
|
||||
}
|
|
@ -1,16 +0,0 @@
|
|||
#include <iostream>
|
||||
#include <Eigen/Dense>
|
||||
|
||||
using namespace std;
|
||||
using namespace Eigen;
|
||||
|
||||
int main()
|
||||
{
|
||||
Matrix2d A;
|
||||
A << 2, 1,
|
||||
2, 0.9999999999;
|
||||
FullPivLU<Matrix2d> lu(A);
|
||||
cout << "By default, the rank of A is found to be " << lu.rank() << endl;
|
||||
lu.setThreshold(1e-5);
|
||||
cout << "With threshold 1e-5, the rank of A is found to be " << lu.rank() << endl;
|
||||
}
|
|
@ -1,24 +0,0 @@
|
|||
#include <Eigen/Dense>
|
||||
#include <iostream>
|
||||
|
||||
using namespace Eigen;
|
||||
using namespace std;
|
||||
|
||||
int main()
|
||||
{
|
||||
ArrayXXf m(2,2);
|
||||
|
||||
// assign some values coefficient by coefficient
|
||||
m(0,0) = 1.0; m(0,1) = 2.0;
|
||||
m(1,0) = 3.0; m(1,1) = m(0,1) + m(1,0);
|
||||
|
||||
// print values to standard output
|
||||
cout << m << endl << endl;
|
||||
|
||||
// using the comma-initializer is also allowed
|
||||
m << 1.0,2.0,
|
||||
3.0,4.0;
|
||||
|
||||
// print values to standard output
|
||||
cout << m << endl;
|
||||
}
|
|
@ -1,23 +0,0 @@
|
|||
#include <Eigen/Dense>
|
||||
#include <iostream>
|
||||
|
||||
using namespace Eigen;
|
||||
using namespace std;
|
||||
|
||||
int main()
|
||||
{
|
||||
ArrayXXf a(3,3);
|
||||
ArrayXXf b(3,3);
|
||||
a << 1,2,3,
|
||||
4,5,6,
|
||||
7,8,9;
|
||||
b << 1,2,3,
|
||||
1,2,3,
|
||||
1,2,3;
|
||||
|
||||
// Adding two arrays
|
||||
cout << "a + b = " << endl << a + b << endl << endl;
|
||||
|
||||
// Subtracting a scalar from an array
|
||||
cout << "a - 2 = " << endl << a - 2 << endl;
|
||||
}
|
|
@ -1,19 +0,0 @@
|
|||
#include <Eigen/Dense>
|
||||
#include <iostream>
|
||||
|
||||
using namespace Eigen;
|
||||
using namespace std;
|
||||
|
||||
int main()
|
||||
{
|
||||
ArrayXf a = ArrayXf::Random(5);
|
||||
a *= 2;
|
||||
cout << "a =" << endl
|
||||
<< a << endl;
|
||||
cout << "a.abs() =" << endl
|
||||
<< a.abs() << endl;
|
||||
cout << "a.abs().sqrt() =" << endl
|
||||
<< a.abs().sqrt() << endl;
|
||||
cout << "a.min(a.abs().sqrt()) =" << endl
|
||||
<< a.min(a.abs().sqrt()) << endl;
|
||||
}
|
|
@ -1,22 +0,0 @@
|
|||
#include <Eigen/Dense>
|
||||
#include <iostream>
|
||||
|
||||
using namespace Eigen;
|
||||
using namespace std;
|
||||
|
||||
int main()
|
||||
{
|
||||
MatrixXf m(2,2);
|
||||
MatrixXf n(2,2);
|
||||
MatrixXf result(2,2);
|
||||
|
||||
m << 1,2,
|
||||
3,4;
|
||||
n << 5,6,
|
||||
7,8;
|
||||
|
||||
result = (m.array() + 4).matrix() * m;
|
||||
cout << "-- Combination 1: --" << endl << result << endl << endl;
|
||||
result = (m.array() * n.array()).matrix() * m;
|
||||
cout << "-- Combination 2: --" << endl << result << endl << endl;
|
||||
}
|
|
@ -1,26 +0,0 @@
|
|||
#include <Eigen/Dense>
|
||||
#include <iostream>
|
||||
|
||||
using namespace Eigen;
|
||||
using namespace std;
|
||||
|
||||
int main()
|
||||
{
|
||||
MatrixXf m(2,2);
|
||||
MatrixXf n(2,2);
|
||||
MatrixXf result(2,2);
|
||||
|
||||
m << 1,2,
|
||||
3,4;
|
||||
n << 5,6,
|
||||
7,8;
|
||||
|
||||
result = m * n;
|
||||
cout << "-- Matrix m*n: --" << endl << result << endl << endl;
|
||||
result = m.array() * n.array();
|
||||
cout << "-- Array m*n: --" << endl << result << endl << endl;
|
||||
result = m.cwiseProduct(n);
|
||||
cout << "-- With cwiseProduct: --" << endl << result << endl << endl;
|
||||
result = m.array() + 4;
|
||||
cout << "-- Array m + 4: --" << endl << result << endl << endl;
|
||||
}
|
|
@ -1,16 +0,0 @@
|
|||
#include <Eigen/Dense>
|
||||
#include <iostream>
|
||||
|
||||
using namespace Eigen;
|
||||
using namespace std;
|
||||
|
||||
int main()
|
||||
{
|
||||
ArrayXXf a(2,2);
|
||||
ArrayXXf b(2,2);
|
||||
a << 1,2,
|
||||
3,4;
|
||||
b << 5,6,
|
||||
7,8;
|
||||
cout << "a * b = " << endl << a * b << endl;
|
||||
}
|
|
@ -1,18 +0,0 @@
|
|||
#include <Eigen/Dense>
|
||||
#include <iostream>
|
||||
|
||||
using namespace std;
|
||||
using namespace Eigen;
|
||||
|
||||
int main()
|
||||
{
|
||||
Array22f m;
|
||||
m << 1,2,
|
||||
3,4;
|
||||
Array44f a = Array44f::Constant(0.6);
|
||||
cout << "Here is the array a:" << endl << a << endl << endl;
|
||||
a.block<2,2>(1,1) = m;
|
||||
cout << "Here is now a with m copied into its central 2x2 block:" << endl << a << endl << endl;
|
||||
a.block(0,0,2,3) = a.block(2,1,2,3);
|
||||
cout << "Here is now a with bottom-right 2x3 block copied into top-left 2x2 block:" << endl << a << endl << endl;
|
||||
}
|
|
@ -1,17 +0,0 @@
|
|||
#include <Eigen/Dense>
|
||||
#include <iostream>
|
||||
|
||||
using namespace std;
|
||||
|
||||
int main()
|
||||
{
|
||||
Eigen::MatrixXf m(3,3);
|
||||
m << 1,2,3,
|
||||
4,5,6,
|
||||
7,8,9;
|
||||
cout << "Here is the matrix m:" << endl << m << endl;
|
||||
cout << "2nd Row: " << m.row(1) << endl;
|
||||
m.col(2) += 3 * m.col(0);
|
||||
cout << "After adding 3 times the first column into the third column, the matrix m is:\n";
|
||||
cout << m << endl;
|
||||
}
|
|
@ -1,17 +0,0 @@
|
|||
#include <Eigen/Dense>
|
||||
#include <iostream>
|
||||
|
||||
using namespace std;
|
||||
|
||||
int main()
|
||||
{
|
||||
Eigen::Matrix4f m;
|
||||
m << 1, 2, 3, 4,
|
||||
5, 6, 7, 8,
|
||||
9, 10,11,12,
|
||||
13,14,15,16;
|
||||
cout << "m.leftCols(2) =" << endl << m.leftCols(2) << endl << endl;
|
||||
cout << "m.bottomRows<2>() =" << endl << m.bottomRows<2>() << endl << endl;
|
||||
m.topLeftCorner(1,3) = m.bottomRightCorner(3,1).transpose();
|
||||
cout << "After assignment, m = " << endl << m << endl;
|
||||
}
|
|
@ -1,20 +0,0 @@
|
|||
#include <Eigen/Dense>
|
||||
#include <iostream>
|
||||
|
||||
using namespace std;
|
||||
|
||||
int main()
|
||||
{
|
||||
Eigen::MatrixXf m(4,4);
|
||||
m << 1, 2, 3, 4,
|
||||
5, 6, 7, 8,
|
||||
9,10,11,12,
|
||||
13,14,15,16;
|
||||
cout << "Block in the middle" << endl;
|
||||
cout << m.block<2,2>(1,1) << endl << endl;
|
||||
for (int i = 1; i <= 3; ++i)
|
||||
{
|
||||
cout << "Block of size " << i << "x" << i << endl;
|
||||
cout << m.block(0,0,i,i) << endl << endl;
|
||||
}
|
||||
}
|
|
@ -1,14 +0,0 @@
|
|||
#include <Eigen/Dense>
|
||||
#include <iostream>
|
||||
|
||||
using namespace std;
|
||||
|
||||
int main()
|
||||
{
|
||||
Eigen::ArrayXf v(6);
|
||||
v << 1, 2, 3, 4, 5, 6;
|
||||
cout << "v.head(3) =" << endl << v.head(3) << endl << endl;
|
||||
cout << "v.tail<3>() = " << endl << v.tail<3>() << endl << endl;
|
||||
v.segment(1,4) *= 2;
|
||||
cout << "after 'v.segment(1,4) *= 2', v =" << endl << v << endl;
|
||||
}
|
|
@ -1,18 +0,0 @@
|
|||
#include <Eigen/Core>
|
||||
#include <Eigen/LU>
|
||||
#include <iostream>
|
||||
|
||||
using namespace std;
|
||||
using namespace Eigen;
|
||||
|
||||
int main()
|
||||
{
|
||||
Matrix3f A;
|
||||
Vector3f b;
|
||||
A << 1,2,3, 4,5,6, 7,8,10;
|
||||
b << 3, 3, 4;
|
||||
cout << "Here is the matrix A:" << endl << A << endl;
|
||||
cout << "Here is the vector b:" << endl << b << endl;
|
||||
Vector3f x = A.lu().solve(b);
|
||||
cout << "The solution is:" << endl << x << endl;
|
||||
}
|
|
@ -1,24 +0,0 @@
|
|||
#include <iostream>
|
||||
#include <Eigen/Dense>
|
||||
|
||||
using namespace std;
|
||||
using namespace Eigen;
|
||||
|
||||
int main()
|
||||
{
|
||||
Eigen::MatrixXf m(2,4);
|
||||
Eigen::VectorXf v(2);
|
||||
|
||||
m << 1, 23, 6, 9,
|
||||
3, 11, 7, 2;
|
||||
|
||||
v << 2,
|
||||
3;
|
||||
|
||||
MatrixXf::Index index;
|
||||
// find nearest neighbour
|
||||
(m.colwise() - v).colwise().squaredNorm().minCoeff(&index);
|
||||
|
||||
cout << "Nearest neighbour is column " << index << ":" << endl;
|
||||
cout << m.col(index) << endl;
|
||||
}
|
|
@ -1,21 +0,0 @@
|
|||
#include <iostream>
|
||||
#include <Eigen/Dense>
|
||||
|
||||
using namespace std;
|
||||
int main()
|
||||
{
|
||||
Eigen::MatrixXf mat(2,4);
|
||||
Eigen::VectorXf v(2);
|
||||
|
||||
mat << 1, 2, 6, 9,
|
||||
3, 1, 7, 2;
|
||||
|
||||
v << 0,
|
||||
1;
|
||||
|
||||
//add v to each column of m
|
||||
mat.colwise() += v;
|
||||
|
||||
std::cout << "Broadcasting result: " << std::endl;
|
||||
std::cout << mat << std::endl;
|
||||
}
|
|
@ -1,20 +0,0 @@
|
|||
#include <iostream>
|
||||
#include <Eigen/Dense>
|
||||
|
||||
using namespace std;
|
||||
int main()
|
||||
{
|
||||
Eigen::MatrixXf mat(2,4);
|
||||
Eigen::VectorXf v(4);
|
||||
|
||||
mat << 1, 2, 6, 9,
|
||||
3, 1, 7, 2;
|
||||
|
||||
v << 0,1,2,3;
|
||||
|
||||
//add v to each row of m
|
||||
mat.rowwise() += v.transpose();
|
||||
|
||||
std::cout << "Broadcasting result: " << std::endl;
|
||||
std::cout << mat << std::endl;
|
||||
}
|
|
@ -1,13 +0,0 @@
|
|||
#include <iostream>
|
||||
#include <Eigen/Dense>
|
||||
|
||||
using namespace std;
|
||||
int main()
|
||||
{
|
||||
Eigen::MatrixXf mat(2,4);
|
||||
mat << 1, 2, 6, 9,
|
||||
3, 1, 7, 2;
|
||||
|
||||
std::cout << "Column's maximum: " << std::endl
|
||||
<< mat.colwise().maxCoeff() << std::endl;
|
||||
}
|
|
@ -1,20 +0,0 @@
|
|||
#include <iostream>
|
||||
#include <Eigen/Dense>
|
||||
|
||||
using namespace std;
|
||||
using namespace Eigen;
|
||||
int main()
|
||||
{
|
||||
MatrixXf mat(2,4);
|
||||
mat << 1, 2, 6, 9,
|
||||
3, 1, 7, 2;
|
||||
|
||||
MatrixXf::Index maxIndex;
|
||||
float maxNorm = mat.colwise().sum().maxCoeff(&maxIndex);
|
||||
|
||||
std::cout << "Maximum sum at position " << maxIndex << std::endl;
|
||||
|
||||
std::cout << "The corresponding vector is: " << std::endl;
|
||||
std::cout << mat.col( maxIndex ) << std::endl;
|
||||
std::cout << "And its sum is is: " << maxNorm << std::endl;
|
||||
}
|
|
@ -1,21 +0,0 @@
|
|||
#include <Eigen/Dense>
|
||||
#include <iostream>
|
||||
|
||||
using namespace std;
|
||||
using namespace Eigen;
|
||||
|
||||
int main()
|
||||
{
|
||||
ArrayXXf a(2,2);
|
||||
|
||||
a << 1,2,
|
||||
3,4;
|
||||
|
||||
cout << "(a > 0).all() = " << (a > 0).all() << endl;
|
||||
cout << "(a > 0).any() = " << (a > 0).any() << endl;
|
||||
cout << "(a > 0).count() = " << (a > 0).count() << endl;
|
||||
cout << endl;
|
||||
cout << "(a > 2).all() = " << (a > 2).all() << endl;
|
||||
cout << "(a > 2).any() = " << (a > 2).any() << endl;
|
||||
cout << "(a > 2).count() = " << (a > 2).count() << endl;
|
||||
}
|
|
@ -1,28 +0,0 @@
|
|||
#include <Eigen/Dense>
|
||||
#include <iostream>
|
||||
|
||||
using namespace std;
|
||||
using namespace Eigen;
|
||||
|
||||
int main()
|
||||
{
|
||||
VectorXf v(2);
|
||||
MatrixXf m(2,2), n(2,2);
|
||||
|
||||
v << -1,
|
||||
2;
|
||||
|
||||
m << 1,-2,
|
||||
-3,4;
|
||||
|
||||
cout << "v.squaredNorm() = " << v.squaredNorm() << endl;
|
||||
cout << "v.norm() = " << v.norm() << endl;
|
||||
cout << "v.lpNorm<1>() = " << v.lpNorm<1>() << endl;
|
||||
cout << "v.lpNorm<Infinity>() = " << v.lpNorm<Infinity>() << endl;
|
||||
|
||||
cout << endl;
|
||||
cout << "m.squaredNorm() = " << m.squaredNorm() << endl;
|
||||
cout << "m.norm() = " << m.norm() << endl;
|
||||
cout << "m.lpNorm<1>() = " << m.lpNorm<1>() << endl;
|
||||
cout << "m.lpNorm<Infinity>() = " << m.lpNorm<Infinity>() << endl;
|
||||
}
|
|
@ -1,13 +0,0 @@
|
|||
#include <iostream>
|
||||
#include <Eigen/Dense>
|
||||
|
||||
using namespace std;
|
||||
int main()
|
||||
{
|
||||
Eigen::MatrixXf mat(2,4);
|
||||
mat << 1, 2, 6, 9,
|
||||
3, 1, 7, 2;
|
||||
|
||||
std::cout << "Row's maximum: " << std::endl
|
||||
<< mat.rowwise().maxCoeff() << std::endl;
|
||||
}
|
|
@ -1,26 +0,0 @@
|
|||
#include <iostream>
|
||||
#include <Eigen/Dense>
|
||||
|
||||
using namespace std;
|
||||
using namespace Eigen;
|
||||
|
||||
int main()
|
||||
{
|
||||
Eigen::MatrixXf m(2,2);
|
||||
|
||||
m << 1, 2,
|
||||
3, 4;
|
||||
|
||||
//get location of maximum
|
||||
MatrixXf::Index maxRow, maxCol;
|
||||
float max = m.maxCoeff(&maxRow, &maxCol);
|
||||
|
||||
//get location of minimum
|
||||
MatrixXf::Index minRow, minCol;
|
||||
float min = m.minCoeff(&minRow, &minCol);
|
||||
|
||||
cout << "Max: " << max << ", at: " <<
|
||||
maxRow << "," << maxCol << endl;
|
||||
cout << "Min: " << min << ", at: " <<
|
||||
minRow << "," << minCol << endl;
|
||||
}
|
|
@ -1,22 +0,0 @@
|
|||
#include <Eigen/Core>
|
||||
#include <iostream>
|
||||
|
||||
using namespace Eigen;
|
||||
|
||||
int main()
|
||||
{
|
||||
for (int size=1; size<=4; ++size)
|
||||
{
|
||||
MatrixXi m(size,size+1); // a (size)x(size+1)-matrix of int's
|
||||
for (int j=0; j<m.cols(); ++j) // loop over columns
|
||||
for (int i=0; i<m.rows(); ++i) // loop over rows
|
||||
m(i,j) = i+j*m.rows(); // to access matrix coefficients,
|
||||
// use operator()(int,int)
|
||||
std::cout << m << "\n\n";
|
||||
}
|
||||
|
||||
VectorXf v(4); // a vector of 4 float's
|
||||
// to access vector coefficients, use either operator () or operator []
|
||||
v[0] = 1; v[1] = 2; v(2) = 3; v(3) = 4;
|
||||
std::cout << "\nv:\n" << v << std::endl;
|
||||
}
|
|
@ -1,15 +0,0 @@
|
|||
#include <Eigen/Core>
|
||||
#include <iostream>
|
||||
|
||||
using namespace Eigen;
|
||||
|
||||
int main()
|
||||
{
|
||||
Matrix3f m3;
|
||||
m3 << 1, 2, 3, 4, 5, 6, 7, 8, 9;
|
||||
Matrix4f m4 = Matrix4f::Identity();
|
||||
Vector4i v4(1, 2, 3, 4);
|
||||
|
||||
std::cout << "m3\n" << m3 << "\nm4:\n"
|
||||
<< m4 << "\nv4:\n" << v4 << std::endl;
|
||||
}
|
|
@ -1,27 +0,0 @@
|
|||
#include <Eigen/Core>
|
||||
#include <iostream>
|
||||
using namespace Eigen;
|
||||
using namespace std;
|
||||
|
||||
template<typename Derived>
|
||||
Eigen::Block<Derived>
|
||||
topLeftCorner(MatrixBase<Derived>& m, int rows, int cols)
|
||||
{
|
||||
return Eigen::Block<Derived>(m.derived(), 0, 0, rows, cols);
|
||||
}
|
||||
|
||||
template<typename Derived>
|
||||
const Eigen::Block<const Derived>
|
||||
topLeftCorner(const MatrixBase<Derived>& m, int rows, int cols)
|
||||
{
|
||||
return Eigen::Block<const Derived>(m.derived(), 0, 0, rows, cols);
|
||||
}
|
||||
|
||||
int main(int, char**)
|
||||
{
|
||||
Matrix4d m = Matrix4d::Identity();
|
||||
cout << topLeftCorner(4*m, 2, 3) << endl; // calls the const version
|
||||
topLeftCorner(m, 2, 3) *= 5; // calls the non-const version
|
||||
cout << "Now the matrix m is:" << endl << m << endl;
|
||||
return 0;
|
||||
}
|
|
@ -1,18 +0,0 @@
|
|||
#include <Eigen/Core>
|
||||
#include <iostream>
|
||||
using namespace Eigen;
|
||||
using namespace std;
|
||||
|
||||
// define a custom template binary functor
|
||||
template<typename Scalar> struct MakeComplexOp {
|
||||
EIGEN_EMPTY_STRUCT_CTOR(MakeComplexOp)
|
||||
typedef complex<Scalar> result_type;
|
||||
complex<Scalar> operator()(const Scalar& a, const Scalar& b) const { return complex<Scalar>(a,b); }
|
||||
};
|
||||
|
||||
int main(int, char**)
|
||||
{
|
||||
Matrix4d m1 = Matrix4d::Random(), m2 = Matrix4d::Random();
|
||||
cout << m1.binaryExpr(m2, MakeComplexOp<double>()) << endl;
|
||||
return 0;
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user