From e12223517ecadc5840db031f63d2b71cc3bb423f Mon Sep 17 00:00:00 2001
From: Gennady Pospelov <g.pospelov@fz-juelich.de>
Date: Thu, 28 Nov 2013 12:01:14 +0100
Subject: [PATCH] Update to Eigen 3.2

---
 ThirdParty/eigen3/Eigen/Eigenvalues           |    2 +
 .../eigen3/Eigen/IterativeLinearSolvers       |    2 +-
 ThirdParty/eigen3/Eigen/MetisSupport          |   28 +
 ThirdParty/eigen3/Eigen/OrderingMethods       |   51 +-
 ThirdParty/eigen3/Eigen/SPQRSupport           |   29 +
 ThirdParty/eigen3/Eigen/Sparse                |   14 +-
 ThirdParty/eigen3/Eigen/SparseCholesky        |   21 +-
 ThirdParty/eigen3/Eigen/SparseCore            |    4 +-
 ThirdParty/eigen3/Eigen/SparseLU              |   49 +
 ThirdParty/eigen3/Eigen/SparseQR              |   33 +
 ThirdParty/eigen3/Eigen/src/Cholesky/LDLT.h   |   41 +-
 ThirdParty/eigen3/Eigen/src/Cholesky/LLT.h    |   20 +-
 .../eigen3/Eigen/src/Core/DenseCoeffsBase.h   |   12 +-
 .../eigen3/Eigen/src/Core/DenseStorage.h      |   99 +-
 ThirdParty/eigen3/Eigen/src/Core/Diagonal.h   |   41 +-
 .../eigen3/Eigen/src/Core/DiagonalMatrix.h    |   14 +-
 .../eigen3/Eigen/src/Core/DiagonalProduct.h   |   51 +-
 ThirdParty/eigen3/Eigen/src/Core/Dot.h        |   12 +-
 ThirdParty/eigen3/Eigen/src/Core/EigenBase.h  |    3 +-
 ThirdParty/eigen3/Eigen/src/Core/Functors.h   |   74 +-
 ThirdParty/eigen3/Eigen/src/Core/Fuzzy.h      |   22 +-
 .../eigen3/Eigen/src/Core/GeneralProduct.h    |  100 +-
 .../eigen3/Eigen/src/Core/GenericPacketMath.h |   50 +-
 .../eigen3/Eigen/src/Core/GlobalFunctions.h   |   49 +-
 ThirdParty/eigen3/Eigen/src/Core/IO.h         |    4 +-
 ThirdParty/eigen3/Eigen/src/Core/Map.h        |   30 +-
 ThirdParty/eigen3/Eigen/src/Core/MapBase.h    |   50 +-
 .../eigen3/Eigen/src/Core/MathFunctions.h     |  354 ++--
 ThirdParty/eigen3/Eigen/src/Core/Matrix.h     |    8 +-
 ThirdParty/eigen3/Eigen/src/Core/MatrixBase.h |   32 +-
 ThirdParty/eigen3/Eigen/src/Core/NoAlias.h    |    9 +
 ThirdParty/eigen3/Eigen/src/Core/NumTraits.h  |    3 +
 .../eigen3/Eigen/src/Core/PermutationMatrix.h |   37 +-
 .../eigen3/Eigen/src/Core/PlainObjectBase.h   |  160 +-
 ThirdParty/eigen3/Eigen/src/Core/Product.h    |   98 -
 .../eigen3/Eigen/src/Core/ProductBase.h       |   20 +-
 ThirdParty/eigen3/Eigen/src/Core/Random.h     |   14 +-
 ThirdParty/eigen3/Eigen/src/Core/Redux.h      |    6 +-
 ThirdParty/eigen3/Eigen/src/Core/Ref.h        |  255 +++
 ThirdParty/eigen3/Eigen/src/Core/Replicate.h  |   28 +-
 .../eigen3/Eigen/src/Core/ReturnByValue.h     |    2 +-
 ThirdParty/eigen3/Eigen/src/Core/Select.h     |   14 +-
 .../eigen3/Eigen/src/Core/SelfAdjointView.h   |   16 +-
 .../eigen3/Eigen/src/Core/SelfCwiseBinaryOp.h |    5 +-
 ThirdParty/eigen3/Eigen/src/Core/StableNorm.h |  155 +-
 ThirdParty/eigen3/Eigen/src/Core/Swap.h       |   34 +-
 ThirdParty/eigen3/Eigen/src/Core/Transpose.h  |   35 +-
 .../eigen3/Eigen/src/Core/Transpositions.h    |   18 +-
 .../eigen3/Eigen/src/Core/TriangularMatrix.h  |   14 +-
 .../eigen3/Eigen/src/Core/VectorBlock.h       |  189 --
 .../eigen3/Eigen/src/Core/VectorwiseOp.h      |   47 +-
 ThirdParty/eigen3/Eigen/src/Core/Visitor.h    |   28 +-
 .../Eigen/src/Core/arch/AltiVec/PacketMath.h  |    3 +
 .../Eigen/src/Core/arch/NEON/PacketMath.h     |   50 +-
 .../Eigen/src/Core/arch/SSE/MathFunctions.h   |   80 +-
 .../Eigen/src/Core/arch/SSE/PacketMath.h      |   20 +-
 .../Core/products/GeneralBlockPanelKernel.h   |  284 +--
 .../src/Core/products/GeneralMatrixMatrix.h   |    7 +-
 .../products/GeneralMatrixMatrixTriangular.h  |  112 +-
 .../src/Core/products/GeneralMatrixVector.h   |   27 +-
 .../Core/products/GeneralMatrixVector_MKL.h   |    6 +-
 .../Core/products/SelfadjointMatrixMatrix.h   |   66 +-
 .../products/SelfadjointMatrixMatrix_MKL.h    |   10 +-
 .../Core/products/SelfadjointMatrixVector.h   |   27 +-
 .../products/SelfadjointMatrixVector_MKL.h    |    4 +-
 .../src/Core/products/SelfadjointProduct.h    |   20 +-
 .../Core/products/SelfadjointRank2Update.h    |   18 +-
 .../Core/products/TriangularMatrixMatrix.h    |   40 +-
 .../products/TriangularMatrixMatrix_MKL.h     |    4 +-
 .../Core/products/TriangularMatrixVector.h    |   32 +-
 .../products/TriangularMatrixVector_MKL.h     |   12 +-
 .../Core/products/TriangularSolverMatrix.h    |   18 +-
 .../products/TriangularSolverMatrix_MKL.h     |    4 +-
 .../Eigen/src/Core/util/ForwardDeclarations.h |    7 +-
 .../eigen3/Eigen/src/Core/util/Macros.h       |   32 +-
 .../eigen3/Eigen/src/Core/util/Memory.h       |   42 +-
 ThirdParty/eigen3/Eigen/src/Core/util/Meta.h  |   20 +-
 .../eigen3/Eigen/src/Core/util/StaticAssert.h |    3 +-
 .../eigen3/Eigen/src/Core/util/XprHelper.h    |   24 +-
 .../src/Eigen2Support/Geometry/Hyperplane.h   |    4 +-
 .../Eigen2Support/Geometry/ParametrizedLine.h |    4 +-
 .../src/Eigen2Support/Geometry/Quaternion.h   |    2 +-
 .../src/Eigen2Support/Geometry/Rotation2D.h   |    2 +-
 .../src/Eigen2Support/Geometry/RotationBase.h |    2 +-
 .../src/Eigen2Support/Geometry/Scaling.h      |    2 +-
 .../src/Eigen2Support/Geometry/Transform.h    |    2 +-
 .../src/Eigen2Support/Geometry/Translation.h  |    2 +-
 .../Eigen/src/Eigen2Support/LeastSquares.h    |    2 +-
 .../Eigen/src/Eigen2Support/MathFunctions.h   |   24 +-
 .../eigen3/Eigen/src/Eigen2Support/SVD.h      |   10 +-
 .../Eigen/src/Eigenvalues/EigenSolver.h       |   71 +-
 .../src/Eigenvalues/GeneralizedEigenSolver.h  |  341 +++
 .../src/Eigenvalues/HessenbergDecomposition.h |    6 +-
 .../src/Eigenvalues/MatrixBaseEigenvalues.h   |    3 +-
 .../eigen3/Eigen/src/Eigenvalues/RealQZ.h     |  624 ++++++
 .../eigen3/Eigen/src/Eigenvalues/RealSchur.h  |  117 +-
 .../Eigen/src/Eigenvalues/RealSchur_MKL.h     |    2 +-
 .../src/Eigenvalues/SelfAdjointEigenSolver.h  |   26 +-
 .../Eigenvalues/SelfAdjointEigenSolver_MKL.h  |    2 +-
 .../src/Eigenvalues/Tridiagonalization.h      |   12 +-
 .../eigen3/Eigen/src/Geometry/EulerAngles.h   |   60 +-
 .../eigen3/Eigen/src/Geometry/Homogeneous.h   |    2 +-
 .../eigen3/Eigen/src/Geometry/Hyperplane.h    |   11 +-
 .../eigen3/Eigen/src/Geometry/OrthoMethods.h  |   30 +-
 .../Eigen/src/Geometry/ParametrizedLine.h     |    8 +-
 .../eigen3/Eigen/src/Geometry/Quaternion.h    |   35 +-
 .../eigen3/Eigen/src/Geometry/Rotation2D.h    |   15 +-
 .../eigen3/Eigen/src/Geometry/Scaling.h       |    6 +-
 .../eigen3/Eigen/src/Geometry/Transform.h     |   18 +-
 .../eigen3/Eigen/src/Geometry/Umeyama.h       |   25 +-
 .../Eigen/src/Householder/Householder.h       |   13 +-
 .../src/Householder/HouseholderSequence.h     |   14 +-
 .../IterativeLinearSolvers/IncompleteLUT.h    |  215 +-
 .../IterativeSolverBase.h                     |    2 +-
 ThirdParty/eigen3/Eigen/src/Jacobi/Jacobi.h   |   99 +-
 ThirdParty/eigen3/Eigen/src/LU/Determinant.h  |    2 +-
 ThirdParty/eigen3/Eigen/src/LU/FullPivLU.h    |   14 +-
 ThirdParty/eigen3/Eigen/src/LU/Inverse.h      |    6 +-
 ThirdParty/eigen3/Eigen/src/LU/PartialPivLU.h |    9 +-
 .../Eigen/src/OrderingMethods/Eigen_Colamd.h  | 1850 +++++++++++++++++
 .../Eigen/src/OrderingMethods/Ordering.h      |  150 ++
 .../Eigen/src/PaStiXSupport/PaStiXSupport.h   |   23 +-
 .../Eigen/src/PardisoSupport/PardisoSupport.h |   25 +-
 .../Eigen/src/QR/FullPivHouseholderQR.h       |   39 +-
 .../eigen3/Eigen/src/QR/HouseholderQR.h       |   47 +-
 ThirdParty/eigen3/Eigen/src/SVD/JacobiSVD.h   |   84 +-
 .../Eigen/src/SVD/UpperBidiagonalization.h    |    4 +-
 .../src/SparseCholesky/SimplicialCholesky.h   |  214 +-
 .../SparseCholesky/SimplicialCholesky_impl.h  |  199 ++
 .../Eigen/src/SparseCore/SparseAssign.h       |    0
 .../eigen3/Eigen/src/SparseCore/SparseBlock.h |  432 ++--
 .../Eigen/src/SparseCore/SparseColEtree.h     |  204 ++
 .../src/SparseCore/SparseCwiseBinaryOp.h      |    6 +-
 .../Eigen/src/SparseCore/SparseDenseProduct.h |   15 +-
 .../src/SparseCore/SparseDiagonalProduct.h    |   22 +-
 .../eigen3/Eigen/src/SparseCore/SparseDot.h   |   19 +-
 .../Eigen/src/SparseCore/SparseMatrix.h       |  594 +++---
 .../Eigen/src/SparseCore/SparseMatrixBase.h   |   73 +-
 .../Eigen/src/SparseCore/SparseProduct.h      |    7 +-
 .../src/SparseCore/SparseSelfAdjointView.h    |   44 +-
 .../SparseSparseProductWithPruning.h          |   10 +-
 .../Eigen/src/SparseCore/SparseTranspose.h    |   12 +-
 .../src/SparseCore/SparseTriangularView.h     |   31 +-
 .../eigen3/Eigen/src/SparseCore/SparseUtil.h  |   17 +-
 .../Eigen/src/SparseCore/SparseVector.h       |  158 +-
 .../eigen3/Eigen/src/SparseCore/SparseView.h  |    5 +-
 .../Eigen/src/SuperLUSupport/SuperLUSupport.h |   23 +-
 .../Eigen/src/UmfPackSupport/UmfPackSupport.h |   37 +-
 .../eigen3/Eigen/src/misc/SparseSolve.h       |   17 +
 149 files changed, 6932 insertions(+), 2731 deletions(-)
 create mode 100644 ThirdParty/eigen3/Eigen/MetisSupport
 create mode 100644 ThirdParty/eigen3/Eigen/SPQRSupport
 create mode 100644 ThirdParty/eigen3/Eigen/SparseLU
 create mode 100644 ThirdParty/eigen3/Eigen/SparseQR
 delete mode 100644 ThirdParty/eigen3/Eigen/src/Core/Product.h
 create mode 100644 ThirdParty/eigen3/Eigen/src/Core/Ref.h
 create mode 100644 ThirdParty/eigen3/Eigen/src/Eigenvalues/GeneralizedEigenSolver.h
 create mode 100644 ThirdParty/eigen3/Eigen/src/Eigenvalues/RealQZ.h
 create mode 100644 ThirdParty/eigen3/Eigen/src/OrderingMethods/Eigen_Colamd.h
 create mode 100644 ThirdParty/eigen3/Eigen/src/OrderingMethods/Ordering.h
 create mode 100644 ThirdParty/eigen3/Eigen/src/SparseCholesky/SimplicialCholesky_impl.h
 delete mode 100644 ThirdParty/eigen3/Eigen/src/SparseCore/SparseAssign.h
 create mode 100644 ThirdParty/eigen3/Eigen/src/SparseCore/SparseColEtree.h

diff --git a/ThirdParty/eigen3/Eigen/Eigenvalues b/ThirdParty/eigen3/Eigen/Eigenvalues
index af99ccd1fab..53c5a73a278 100644
--- a/ThirdParty/eigen3/Eigen/Eigenvalues
+++ b/ThirdParty/eigen3/Eigen/Eigenvalues
@@ -33,6 +33,8 @@
 #include "src/Eigenvalues/HessenbergDecomposition.h"
 #include "src/Eigenvalues/ComplexSchur.h"
 #include "src/Eigenvalues/ComplexEigenSolver.h"
+#include "src/Eigenvalues/RealQZ.h"
+#include "src/Eigenvalues/GeneralizedEigenSolver.h"
 #include "src/Eigenvalues/MatrixBaseEigenvalues.h"
 #ifdef EIGEN_USE_LAPACKE
 #include "src/Eigenvalues/RealSchur_MKL.h"
diff --git a/ThirdParty/eigen3/Eigen/IterativeLinearSolvers b/ThirdParty/eigen3/Eigen/IterativeLinearSolvers
index 315c2dd1ee7..0f4159dc19f 100644
--- a/ThirdParty/eigen3/Eigen/IterativeLinearSolvers
+++ b/ThirdParty/eigen3/Eigen/IterativeLinearSolvers
@@ -6,7 +6,7 @@
 
 #include "src/Core/util/DisableStupidWarnings.h"
 
-/** \ingroup Sparse_modules
+/** 
   * \defgroup IterativeLinearSolvers_Module IterativeLinearSolvers module
   *
   * This module currently provides iterative methods to solve problems of the form \c A \c x = \c b, where \c A is a squared matrix, usually very large and sparse.
diff --git a/ThirdParty/eigen3/Eigen/MetisSupport b/ThirdParty/eigen3/Eigen/MetisSupport
new file mode 100644
index 00000000000..6a113f7a878
--- /dev/null
+++ b/ThirdParty/eigen3/Eigen/MetisSupport
@@ -0,0 +1,28 @@
+#ifndef EIGEN_METISSUPPORT_MODULE_H
+#define EIGEN_METISSUPPORT_MODULE_H
+
+#include "SparseCore"
+
+#include "src/Core/util/DisableStupidWarnings.h"
+
+extern "C" {
+#include <metis.h>
+}
+
+
+/** \ingroup Support_modules
+  * \defgroup MetisSupport_Module MetisSupport module
+  *
+  * \code
+  * #include <Eigen/MetisSupport>
+  * \endcode
+  * This module defines an interface to the METIS reordering package (http://glaros.dtc.umn.edu/gkhome/views/metis). 
+  * It can be used just as any other built-in method as explained in \link OrderingMethods_Module here. \endlink
+  */
+
+
+#include "src/MetisSupport/MetisSupport.h"
+
+#include "src/Core/util/ReenableStupidWarnings.h"
+
+#endif // EIGEN_METISSUPPORT_MODULE_H
diff --git a/ThirdParty/eigen3/Eigen/OrderingMethods b/ThirdParty/eigen3/Eigen/OrderingMethods
index 1e2d87452e5..7c0f1fffff6 100644
--- a/ThirdParty/eigen3/Eigen/OrderingMethods
+++ b/ThirdParty/eigen3/Eigen/OrderingMethods
@@ -5,19 +5,62 @@
 
 #include "src/Core/util/DisableStupidWarnings.h"
 
-/** \ingroup Sparse_modules
+/** 
   * \defgroup OrderingMethods_Module OrderingMethods module
   *
-  * This module is currently for internal use only.
-  *
-  *
+  * This module is currently for internal use only
+  * 
+  * It defines various built-in and external ordering methods for sparse matrices. 
+  * They are typically used to reduce the number of elements during 
+  * the sparse matrix decomposition (LLT, LU, QR).
+  * Precisely, in a preprocessing step, a permutation matrix P is computed using 
+  * those ordering methods and applied to the columns of the matrix. 
+  * Using for instance the sparse Cholesky decomposition, it is expected that 
+  * the nonzeros elements in LLT(A*P) will be much smaller than that in LLT(A).
+  * 
+  * 
+  * Usage : 
   * \code
   * #include <Eigen/OrderingMethods>
   * \endcode
+  * 
+  * A simple usage is as a template parameter in the sparse decomposition classes : 
+  * 
+  * \code 
+  * SparseLU<MatrixType, COLAMDOrdering<int> > solver;
+  * \endcode 
+  * 
+  * \code 
+  * SparseQR<MatrixType, COLAMDOrdering<int> > solver;
+  * \endcode
+  * 
+  * It is possible as well to call directly a particular ordering method for your own purpose, 
+  * \code 
+  * AMDOrdering<int> ordering;
+  * PermutationMatrix<Dynamic, Dynamic, int> perm;
+  * SparseMatrix<double> A; 
+  * //Fill the matrix ...
+  * 
+  * ordering(A, perm); // Call AMD
+  * \endcode
+  * 
+  * \note Some of these methods (like AMD or METIS), need the sparsity pattern 
+  * of the input matrix to be symmetric. When the matrix is structurally unsymmetric, 
+  * Eigen computes internally the pattern of \f$A^T*A\f$ before calling the method.
+  * If your matrix is already symmetric (at leat in structure), you can avoid that
+  * by calling the method with a SelfAdjointView type.
+  * 
+  * \code
+  *  // Call the ordering on the pattern of the lower triangular matrix A
+  * ordering(A.selfadjointView<Lower>(), perm);
+  * \endcode
   */
 
+#ifndef EIGEN_MPL2_ONLY
 #include "src/OrderingMethods/Amd.h"
+#endif
 
+#include "src/OrderingMethods/Ordering.h"
 #include "src/Core/util/ReenableStupidWarnings.h"
 
 #endif // EIGEN_ORDERINGMETHODS_MODULE_H
diff --git a/ThirdParty/eigen3/Eigen/SPQRSupport b/ThirdParty/eigen3/Eigen/SPQRSupport
new file mode 100644
index 00000000000..77016442ee7
--- /dev/null
+++ b/ThirdParty/eigen3/Eigen/SPQRSupport
@@ -0,0 +1,29 @@
+#ifndef EIGEN_SPQRSUPPORT_MODULE_H
+#define EIGEN_SPQRSUPPORT_MODULE_H
+
+#include "SparseCore"
+
+#include "src/Core/util/DisableStupidWarnings.h"
+
+#include "SuiteSparseQR.hpp"
+
+/** \ingroup Support_modules
+  * \defgroup SPQRSupport_Module SuiteSparseQR module
+  * 
+  * This module provides an interface to the SPQR library, which is part of the <a href="http://www.cise.ufl.edu/research/sparse/SuiteSparse/">suitesparse</a> package.
+  *
+  * \code
+  * #include <Eigen/SPQRSupport>
+  * \endcode
+  *
+  * In order to use this module, the SPQR headers must be accessible from the include paths, and your binary must be linked to the SPQR library and its dependencies (Cholmod, AMD, COLAMD,...).
+  * For a cmake based project, you can use our FindSPQR.cmake and FindCholmod.Cmake modules
+  *
+  */
+
+#include "src/misc/Solve.h"
+#include "src/misc/SparseSolve.h"
+#include "src/CholmodSupport/CholmodSupport.h"
+#include "src/SPQRSupport/SuiteSparseQRSupport.h"
+
+#endif
diff --git a/ThirdParty/eigen3/Eigen/Sparse b/ThirdParty/eigen3/Eigen/Sparse
index 2d1757172eb..7cc9c09133a 100644
--- a/ThirdParty/eigen3/Eigen/Sparse
+++ b/ThirdParty/eigen3/Eigen/Sparse
@@ -1,13 +1,15 @@
 #ifndef EIGEN_SPARSE_MODULE_H
 #define EIGEN_SPARSE_MODULE_H
 
-/** \defgroup Sparse_modules Sparse modules
+/** \defgroup Sparse_Module Sparse meta-module
   *
   * Meta-module including all related modules:
-  * - SparseCore
-  * - OrderingMethods
-  * - SparseCholesky
-  * - IterativeLinearSolvers
+  * - \ref SparseCore_Module
+  * - \ref OrderingMethods_Module
+  * - \ref SparseCholesky_Module
+  * - \ref SparseLU_Module
+  * - \ref SparseQR_Module
+  * - \ref IterativeLinearSolvers_Module
   *
   * \code
   * #include <Eigen/Sparse>
@@ -17,6 +19,8 @@
 #include "SparseCore"
 #include "OrderingMethods"
 #include "SparseCholesky"
+#include "SparseLU"
+#include "SparseQR"
 #include "IterativeLinearSolvers"
 
 #endif // EIGEN_SPARSE_MODULE_H
diff --git a/ThirdParty/eigen3/Eigen/SparseCholesky b/ThirdParty/eigen3/Eigen/SparseCholesky
index 5f82742f7d8..9f5056aa1a1 100644
--- a/ThirdParty/eigen3/Eigen/SparseCholesky
+++ b/ThirdParty/eigen3/Eigen/SparseCholesky
@@ -1,11 +1,21 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2013 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
 #ifndef EIGEN_SPARSECHOLESKY_MODULE_H
 #define EIGEN_SPARSECHOLESKY_MODULE_H
 
 #include "SparseCore"
+#include "OrderingMethods"
 
 #include "src/Core/util/DisableStupidWarnings.h"
 
-/** \ingroup Sparse_modules
+/** 
   * \defgroup SparseCholesky_Module SparseCholesky module
   *
   * This module currently provides two variants of the direct sparse Cholesky decomposition for selfadjoint (hermitian) matrices.
@@ -20,11 +30,18 @@
   * \endcode
   */
 
+#ifdef EIGEN_MPL2_ONLY
+#error The SparseCholesky module has nothing to offer in MPL2 only mode
+#endif
+
 #include "src/misc/Solve.h"
 #include "src/misc/SparseSolve.h"
-
 #include "src/SparseCholesky/SimplicialCholesky.h"
 
+#ifndef EIGEN_MPL2_ONLY
+#include "src/SparseCholesky/SimplicialCholesky_impl.h"
+#endif
+
 #include "src/Core/util/ReenableStupidWarnings.h"
 
 #endif // EIGEN_SPARSECHOLESKY_MODULE_H
diff --git a/ThirdParty/eigen3/Eigen/SparseCore b/ThirdParty/eigen3/Eigen/SparseCore
index 41d28c92824..9b5be5e15a9 100644
--- a/ThirdParty/eigen3/Eigen/SparseCore
+++ b/ThirdParty/eigen3/Eigen/SparseCore
@@ -11,7 +11,7 @@
 #include <cstring>
 #include <algorithm>
 
-/** \ingroup Sparse_modules
+/** 
   * \defgroup SparseCore_Module SparseCore module
   *
   * This module provides a sparse matrix representation, and basic associatd matrix manipulations
@@ -40,14 +40,12 @@ struct Sparse {};
 #include "src/SparseCore/SparseMatrix.h"
 #include "src/SparseCore/MappedSparseMatrix.h"
 #include "src/SparseCore/SparseVector.h"
-#include "src/SparseCore/CoreIterators.h"
 #include "src/SparseCore/SparseBlock.h"
 #include "src/SparseCore/SparseTranspose.h"
 #include "src/SparseCore/SparseCwiseUnaryOp.h"
 #include "src/SparseCore/SparseCwiseBinaryOp.h"
 #include "src/SparseCore/SparseDot.h"
 #include "src/SparseCore/SparsePermutation.h"
-#include "src/SparseCore/SparseAssign.h"
 #include "src/SparseCore/SparseRedux.h"
 #include "src/SparseCore/SparseFuzzy.h"
 #include "src/SparseCore/ConservativeSparseSparseProduct.h"
diff --git a/ThirdParty/eigen3/Eigen/SparseLU b/ThirdParty/eigen3/Eigen/SparseLU
new file mode 100644
index 00000000000..8527a49bd86
--- /dev/null
+++ b/ThirdParty/eigen3/Eigen/SparseLU
@@ -0,0 +1,49 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr>
+// Copyright (C) 2012 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_SPARSELU_MODULE_H
+#define EIGEN_SPARSELU_MODULE_H
+
+#include "SparseCore"
+
+/** 
+  * \defgroup SparseLU_Module SparseLU module
+  * This module defines a supernodal factorization of general sparse matrices.
+  * The code is fully optimized for supernode-panel updates with specialized kernels.
+  * Please, see the documentation of the SparseLU class for more details.
+  */
+
+#include "src/misc/Solve.h"
+#include "src/misc/SparseSolve.h"
+
+// Ordering interface
+#include "OrderingMethods"
+
+#include "src/SparseLU/SparseLU_gemm_kernel.h"
+
+#include "src/SparseLU/SparseLU_Structs.h"
+#include "src/SparseLU/SparseLU_SupernodalMatrix.h"
+#include "src/SparseLU/SparseLUImpl.h"
+#include "src/SparseCore/SparseColEtree.h"
+#include "src/SparseLU/SparseLU_Memory.h"
+#include "src/SparseLU/SparseLU_heap_relax_snode.h"
+#include "src/SparseLU/SparseLU_relax_snode.h"
+#include "src/SparseLU/SparseLU_pivotL.h"
+#include "src/SparseLU/SparseLU_panel_dfs.h"
+#include "src/SparseLU/SparseLU_kernel_bmod.h"
+#include "src/SparseLU/SparseLU_panel_bmod.h"
+#include "src/SparseLU/SparseLU_column_dfs.h"
+#include "src/SparseLU/SparseLU_column_bmod.h"
+#include "src/SparseLU/SparseLU_copy_to_ucol.h"
+#include "src/SparseLU/SparseLU_pruneL.h"
+#include "src/SparseLU/SparseLU_Utils.h"
+#include "src/SparseLU/SparseLU.h"
+
+#endif // EIGEN_SPARSELU_MODULE_H
diff --git a/ThirdParty/eigen3/Eigen/SparseQR b/ThirdParty/eigen3/Eigen/SparseQR
new file mode 100644
index 00000000000..4ee42065eed
--- /dev/null
+++ b/ThirdParty/eigen3/Eigen/SparseQR
@@ -0,0 +1,33 @@
+#ifndef EIGEN_SPARSEQR_MODULE_H
+#define EIGEN_SPARSEQR_MODULE_H
+
+#include "SparseCore"
+#include "OrderingMethods"
+#include "src/Core/util/DisableStupidWarnings.h"
+
+/** \defgroup SparseQR_Module SparseQR module
+  * \brief Provides QR decomposition for sparse matrices
+  * 
+  * This module provides a simplicial version of the left-looking Sparse QR decomposition. 
+  * The columns of the input matrix should be reordered to limit the fill-in during the 
+  * decomposition. Built-in methods (COLAMD, AMD) or external  methods (METIS) can be used to this end.
+  * See the \link OrderingMethods_Module OrderingMethods\endlink module for the list 
+  * of built-in and external ordering methods.
+  * 
+  * \code
+  * #include <Eigen/SparseQR>
+  * \endcode
+  * 
+  * 
+  */
+
+#include "src/misc/Solve.h"
+#include "src/misc/SparseSolve.h"
+
+#include "OrderingMethods"
+#include "src/SparseCore/SparseColEtree.h"
+#include "src/SparseQR/SparseQR.h"
+
+#include "src/Core/util/ReenableStupidWarnings.h"
+
+#endif
diff --git a/ThirdParty/eigen3/Eigen/src/Cholesky/LDLT.h b/ThirdParty/eigen3/Eigen/src/Cholesky/LDLT.h
index 5edc9b4725e..d19cb3968dc 100644
--- a/ThirdParty/eigen3/Eigen/src/Cholesky/LDLT.h
+++ b/ThirdParty/eigen3/Eigen/src/Cholesky/LDLT.h
@@ -196,7 +196,7 @@ template<typename _MatrixType, int _UpLo> class LDLT
     LDLT& compute(const MatrixType& matrix);
 
     template <typename Derived>
-    LDLT& rankUpdate(const MatrixBase<Derived>& w,RealScalar alpha=1);
+    LDLT& rankUpdate(const MatrixBase<Derived>& w, const RealScalar& alpha=1);
 
     /** \returns the internal LDLT decomposition matrix
       *
@@ -248,6 +248,7 @@ template<> struct ldlt_inplace<Lower>
   template<typename MatrixType, typename TranspositionType, typename Workspace>
   static bool unblocked(MatrixType& mat, TranspositionType& transpositions, Workspace& temp, int* sign=0)
   {
+    using std::abs;
     typedef typename MatrixType::Scalar Scalar;
     typedef typename MatrixType::RealScalar RealScalar;
     typedef typename MatrixType::Index Index;
@@ -258,7 +259,7 @@ template<> struct ldlt_inplace<Lower>
     {
       transpositions.setIdentity();
       if(sign)
-        *sign = real(mat.coeff(0,0))>0 ? 1:-1;
+        *sign = numext::real(mat.coeff(0,0))>0 ? 1:-1;
       return true;
     }
 
@@ -277,15 +278,13 @@ template<> struct ldlt_inplace<Lower>
         // are compared; if any diagonal is negligible compared
         // to the largest overall, the algorithm bails.
         cutoff = abs(NumTraits<Scalar>::epsilon() * biggest_in_corner);
-
-        if(sign)
-          *sign = real(mat.diagonal().coeff(index_of_biggest_in_corner)) > 0 ? 1 : -1;
       }
 
       // Finish early if the matrix is not full rank.
       if(biggest_in_corner < cutoff)
       {
         for(Index i = k; i < size; i++) transpositions.coeffRef(i) = i;
+        if(sign) *sign = 0;
         break;
       }
 
@@ -301,11 +300,11 @@ template<> struct ldlt_inplace<Lower>
         for(int i=k+1;i<index_of_biggest_in_corner;++i)
         {
           Scalar tmp = mat.coeffRef(i,k);
-          mat.coeffRef(i,k) = conj(mat.coeffRef(index_of_biggest_in_corner,i));
-          mat.coeffRef(index_of_biggest_in_corner,i) = conj(tmp);
+          mat.coeffRef(i,k) = numext::conj(mat.coeffRef(index_of_biggest_in_corner,i));
+          mat.coeffRef(index_of_biggest_in_corner,i) = numext::conj(tmp);
         }
         if(NumTraits<Scalar>::IsComplex)
-          mat.coeffRef(index_of_biggest_in_corner,k) = conj(mat.coeff(index_of_biggest_in_corner,k));
+          mat.coeffRef(index_of_biggest_in_corner,k) = numext::conj(mat.coeff(index_of_biggest_in_corner,k));
       }
 
       // partition the matrix:
@@ -326,6 +325,16 @@ template<> struct ldlt_inplace<Lower>
       }
       if((rs>0) && (abs(mat.coeffRef(k,k)) > cutoff))
         A21 /= mat.coeffRef(k,k);
+      
+      if(sign)
+      {
+        // LDLT is not guaranteed to work for indefinite matrices, but let's try to get the sign right
+        int newSign = numext::real(mat.diagonal().coeff(index_of_biggest_in_corner)) > 0;
+        if(k == 0)
+          *sign = newSign;
+        else if(*sign != newSign)
+          *sign = 0;
+      }
     }
 
     return true;
@@ -339,9 +348,9 @@ template<> struct ldlt_inplace<Lower>
   // Here only rank-1 updates are implemented, to reduce the
   // requirement for intermediate storage and improve accuracy
   template<typename MatrixType, typename WDerived>
-  static bool updateInPlace(MatrixType& mat, MatrixBase<WDerived>& w, typename MatrixType::RealScalar sigma=1)
+  static bool updateInPlace(MatrixType& mat, MatrixBase<WDerived>& w, const typename MatrixType::RealScalar& sigma=1)
   {
-    using internal::isfinite;
+    using numext::isfinite;
     typedef typename MatrixType::Scalar Scalar;
     typedef typename MatrixType::RealScalar RealScalar;
     typedef typename MatrixType::Index Index;
@@ -359,9 +368,9 @@ template<> struct ldlt_inplace<Lower>
         break;
 
       // Update the diagonal terms
-      RealScalar dj = real(mat.coeff(j,j));
+      RealScalar dj = numext::real(mat.coeff(j,j));
       Scalar wj = w.coeff(j);
-      RealScalar swj2 = sigma*abs2(wj);
+      RealScalar swj2 = sigma*numext::abs2(wj);
       RealScalar gamma = dj*alpha + swj2;
 
       mat.coeffRef(j,j) += swj2/alpha;
@@ -372,13 +381,13 @@ template<> struct ldlt_inplace<Lower>
       Index rs = size-j-1;
       w.tail(rs) -= wj * mat.col(j).tail(rs);
       if(gamma != 0)
-        mat.col(j).tail(rs) += (sigma*conj(wj)/gamma)*w.tail(rs);
+        mat.col(j).tail(rs) += (sigma*numext::conj(wj)/gamma)*w.tail(rs);
     }
     return true;
   }
 
   template<typename MatrixType, typename TranspositionType, typename Workspace, typename WType>
-  static bool update(MatrixType& mat, const TranspositionType& transpositions, Workspace& tmp, const WType& w, typename MatrixType::RealScalar sigma=1)
+  static bool update(MatrixType& mat, const TranspositionType& transpositions, Workspace& tmp, const WType& w, const typename MatrixType::RealScalar& sigma=1)
   {
     // Apply the permutation to the input w
     tmp = transpositions * w;
@@ -397,7 +406,7 @@ template<> struct ldlt_inplace<Upper>
   }
 
   template<typename MatrixType, typename TranspositionType, typename Workspace, typename WType>
-  static EIGEN_STRONG_INLINE bool update(MatrixType& mat, TranspositionType& transpositions, Workspace& tmp, WType& w, typename MatrixType::RealScalar sigma=1)
+  static EIGEN_STRONG_INLINE bool update(MatrixType& mat, TranspositionType& transpositions, Workspace& tmp, WType& w, const typename MatrixType::RealScalar& sigma=1)
   {
     Transpose<MatrixType> matt(mat);
     return ldlt_inplace<Lower>::update(matt, transpositions, tmp, w.conjugate(), sigma);
@@ -449,7 +458,7 @@ LDLT<MatrixType,_UpLo>& LDLT<MatrixType,_UpLo>::compute(const MatrixType& a)
   */
 template<typename MatrixType, int _UpLo>
 template<typename Derived>
-LDLT<MatrixType,_UpLo>& LDLT<MatrixType,_UpLo>::rankUpdate(const MatrixBase<Derived>& w,typename NumTraits<typename MatrixType::Scalar>::Real sigma)
+LDLT<MatrixType,_UpLo>& LDLT<MatrixType,_UpLo>::rankUpdate(const MatrixBase<Derived>& w, const typename NumTraits<typename MatrixType::Scalar>::Real& sigma)
 {
   const Index size = w.rows();
   if (m_isInitialized)
diff --git a/ThirdParty/eigen3/Eigen/src/Cholesky/LLT.h b/ThirdParty/eigen3/Eigen/src/Cholesky/LLT.h
index 41d14e532f1..2e6189f7dab 100644
--- a/ThirdParty/eigen3/Eigen/src/Cholesky/LLT.h
+++ b/ThirdParty/eigen3/Eigen/src/Cholesky/LLT.h
@@ -190,6 +190,7 @@ template<typename Scalar, int UpLo> struct llt_inplace;
 template<typename MatrixType, typename VectorType>
 static typename MatrixType::Index llt_rank_update_lower(MatrixType& mat, const VectorType& vec, const typename MatrixType::RealScalar& sigma)
 {
+  using std::sqrt;
   typedef typename MatrixType::Scalar Scalar;
   typedef typename MatrixType::RealScalar RealScalar;
   typedef typename MatrixType::Index Index;
@@ -199,7 +200,7 @@ static typename MatrixType::Index llt_rank_update_lower(MatrixType& mat, const V
   typedef Matrix<Scalar,Dynamic,1> TempVectorType;
   typedef typename TempVectorType::SegmentReturnType TempVecSegment;
 
-  int n = mat.cols();
+  Index n = mat.cols();
   eigen_assert(mat.rows()==n && vec.size()==n);
 
   TempVectorType temp;
@@ -211,12 +212,12 @@ static typename MatrixType::Index llt_rank_update_lower(MatrixType& mat, const V
     // i.e., for sigma > 0
     temp = sqrt(sigma) * vec;
 
-    for(int i=0; i<n; ++i)
+    for(Index i=0; i<n; ++i)
     {
       JacobiRotation<Scalar> g;
       g.makeGivens(mat(i,i), -temp(i), &mat(i,i));
 
-      int rs = n-i-1;
+      Index rs = n-i-1;
       if(rs>0)
       {
         ColXprSegment x(mat.col(i).tail(rs));
@@ -229,12 +230,12 @@ static typename MatrixType::Index llt_rank_update_lower(MatrixType& mat, const V
   {
     temp = vec;
     RealScalar beta = 1;
-    for(int j=0; j<n; ++j)
+    for(Index j=0; j<n; ++j)
     {
-      RealScalar Ljj = real(mat.coeff(j,j));
-      RealScalar dj = abs2(Ljj);
+      RealScalar Ljj = numext::real(mat.coeff(j,j));
+      RealScalar dj = numext::abs2(Ljj);
       Scalar wj = temp.coeff(j);
-      RealScalar swj2 = sigma*abs2(wj);
+      RealScalar swj2 = sigma*numext::abs2(wj);
       RealScalar gamma = dj*beta + swj2;
 
       RealScalar x = dj + swj2/beta;
@@ -250,7 +251,7 @@ static typename MatrixType::Index llt_rank_update_lower(MatrixType& mat, const V
       {
         temp.tail(rs) -= (wj/Ljj) * mat.col(j).tail(rs);
         if(gamma != 0)
-          mat.col(j).tail(rs) = (nLjj/Ljj) * mat.col(j).tail(rs) + (nLjj * sigma*conj(wj)/gamma)*temp.tail(rs);
+          mat.col(j).tail(rs) = (nLjj/Ljj) * mat.col(j).tail(rs) + (nLjj * sigma*numext::conj(wj)/gamma)*temp.tail(rs);
       }
     }
   }
@@ -263,6 +264,7 @@ template<typename Scalar> struct llt_inplace<Scalar, Lower>
   template<typename MatrixType>
   static typename MatrixType::Index unblocked(MatrixType& mat)
   {
+    using std::sqrt;
     typedef typename MatrixType::Index Index;
     
     eigen_assert(mat.rows()==mat.cols());
@@ -275,7 +277,7 @@ template<typename Scalar> struct llt_inplace<Scalar, Lower>
       Block<MatrixType,1,Dynamic> A10(mat,k,0,1,k);
       Block<MatrixType,Dynamic,Dynamic> A20(mat,k+1,0,rs,k);
 
-      RealScalar x = real(mat.coeff(k,k));
+      RealScalar x = numext::real(mat.coeff(k,k));
       if (k>0) x -= A10.squaredNorm();
       if (x<=RealScalar(0))
         return k;
diff --git a/ThirdParty/eigen3/Eigen/src/Core/DenseCoeffsBase.h b/ThirdParty/eigen3/Eigen/src/Core/DenseCoeffsBase.h
index 72704c2d79f..3c890f21590 100644
--- a/ThirdParty/eigen3/Eigen/src/Core/DenseCoeffsBase.h
+++ b/ThirdParty/eigen3/Eigen/src/Core/DenseCoeffsBase.h
@@ -427,22 +427,22 @@ class DenseCoeffsBase<Derived, WriteAccessors> : public DenseCoeffsBase<Derived,
 
     template<int StoreMode>
     EIGEN_STRONG_INLINE void writePacket
-    (Index row, Index col, const typename internal::packet_traits<Scalar>::type& x)
+    (Index row, Index col, const typename internal::packet_traits<Scalar>::type& val)
     {
       eigen_internal_assert(row >= 0 && row < rows()
                         && col >= 0 && col < cols());
-      derived().template writePacket<StoreMode>(row,col,x);
+      derived().template writePacket<StoreMode>(row,col,val);
     }
 
 
     /** \internal */
     template<int StoreMode>
     EIGEN_STRONG_INLINE void writePacketByOuterInner
-    (Index outer, Index inner, const typename internal::packet_traits<Scalar>::type& x)
+    (Index outer, Index inner, const typename internal::packet_traits<Scalar>::type& val)
     {
       writePacket<StoreMode>(rowIndexByOuterInner(outer, inner),
                             colIndexByOuterInner(outer, inner),
-                            x);
+                            val);
     }
 
     /** \internal
@@ -456,10 +456,10 @@ class DenseCoeffsBase<Derived, WriteAccessors> : public DenseCoeffsBase<Derived,
       */
     template<int StoreMode>
     EIGEN_STRONG_INLINE void writePacket
-    (Index index, const typename internal::packet_traits<Scalar>::type& x)
+    (Index index, const typename internal::packet_traits<Scalar>::type& val)
     {
       eigen_internal_assert(index >= 0 && index < size());
-      derived().template writePacket<StoreMode>(index,x);
+      derived().template writePacket<StoreMode>(index,val);
     }
 
 #ifndef EIGEN_PARSED_BY_DOXYGEN
diff --git a/ThirdParty/eigen3/Eigen/src/Core/DenseStorage.h b/ThirdParty/eigen3/Eigen/src/Core/DenseStorage.h
index 4276efe1b0e..3e7f9c1b7a7 100644
--- a/ThirdParty/eigen3/Eigen/src/Core/DenseStorage.h
+++ b/ThirdParty/eigen3/Eigen/src/Core/DenseStorage.h
@@ -35,8 +35,16 @@ template <typename T, int Size, int MatrixOrArrayOptions,
 struct plain_array
 {
   T array[Size];
-  plain_array() {}
-  plain_array(constructor_without_unaligned_array_assert) {}
+
+  plain_array() 
+  { 
+    EIGEN_STATIC_ASSERT(Size * sizeof(T) <= 128 * 128 * 8, OBJECT_ALLOCATED_ON_STACK_IS_TOO_BIG);
+  }
+
+  plain_array(constructor_without_unaligned_array_assert) 
+  { 
+    EIGEN_STATIC_ASSERT(Size * sizeof(T) <= 128 * 128 * 8, OBJECT_ALLOCATED_ON_STACK_IS_TOO_BIG);
+  }
 };
 
 #if defined(EIGEN_DISABLE_UNALIGNED_ARRAY_ASSERT)
@@ -64,8 +72,17 @@ template <typename T, int Size, int MatrixOrArrayOptions>
 struct plain_array<T, Size, MatrixOrArrayOptions, 16>
 {
   EIGEN_USER_ALIGN16 T array[Size];
-  plain_array() { EIGEN_MAKE_UNALIGNED_ARRAY_ASSERT(0xf) }
-  plain_array(constructor_without_unaligned_array_assert) {}
+
+  plain_array() 
+  { 
+    EIGEN_MAKE_UNALIGNED_ARRAY_ASSERT(0xf);
+    EIGEN_STATIC_ASSERT(Size * sizeof(T) <= 128 * 128 * 8, OBJECT_ALLOCATED_ON_STACK_IS_TOO_BIG);
+  }
+
+  plain_array(constructor_without_unaligned_array_assert) 
+  { 
+    EIGEN_STATIC_ASSERT(Size * sizeof(T) <= 128 * 128 * 8, OBJECT_ALLOCATED_ON_STACK_IS_TOO_BIG);
+  }
 };
 
 template <typename T, int MatrixOrArrayOptions, int Alignment>
@@ -97,7 +114,7 @@ template<typename T, int Size, int _Rows, int _Cols, int _Options> class DenseSt
 {
     internal::plain_array<T,Size,_Options> m_data;
   public:
-    inline explicit DenseStorage() {}
+    inline DenseStorage() {}
     inline DenseStorage(internal::constructor_without_unaligned_array_assert)
       : m_data(internal::constructor_without_unaligned_array_assert()) {}
     inline DenseStorage(DenseIndex,DenseIndex,DenseIndex) {}
@@ -114,7 +131,7 @@ template<typename T, int Size, int _Rows, int _Cols, int _Options> class DenseSt
 template<typename T, int _Rows, int _Cols, int _Options> class DenseStorage<T, 0, _Rows, _Cols, _Options>
 {
   public:
-    inline explicit DenseStorage() {}
+    inline DenseStorage() {}
     inline DenseStorage(internal::constructor_without_unaligned_array_assert) {}
     inline DenseStorage(DenseIndex,DenseIndex,DenseIndex) {}
     inline void swap(DenseStorage& ) {}
@@ -143,16 +160,16 @@ template<typename T, int Size, int _Options> class DenseStorage<T, Size, Dynamic
     DenseIndex m_rows;
     DenseIndex m_cols;
   public:
-    inline explicit DenseStorage() : m_rows(0), m_cols(0) {}
+    inline DenseStorage() : m_rows(0), m_cols(0) {}
     inline DenseStorage(internal::constructor_without_unaligned_array_assert)
       : m_data(internal::constructor_without_unaligned_array_assert()), m_rows(0), m_cols(0) {}
-    inline DenseStorage(DenseIndex, DenseIndex rows, DenseIndex cols) : m_rows(rows), m_cols(cols) {}
+    inline DenseStorage(DenseIndex, DenseIndex nbRows, DenseIndex nbCols) : m_rows(nbRows), m_cols(nbCols) {}
     inline void swap(DenseStorage& other)
     { std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); std::swap(m_cols,other.m_cols); }
-    inline DenseIndex rows(void) const {return m_rows;}
-    inline DenseIndex cols(void) const {return m_cols;}
-    inline void conservativeResize(DenseIndex, DenseIndex rows, DenseIndex cols) { m_rows = rows; m_cols = cols; }
-    inline void resize(DenseIndex, DenseIndex rows, DenseIndex cols) { m_rows = rows; m_cols = cols; }
+    inline DenseIndex rows() const {return m_rows;}
+    inline DenseIndex cols() const {return m_cols;}
+    inline void conservativeResize(DenseIndex, DenseIndex nbRows, DenseIndex nbCols) { m_rows = nbRows; m_cols = nbCols; }
+    inline void resize(DenseIndex, DenseIndex nbRows, DenseIndex nbCols) { m_rows = nbRows; m_cols = nbCols; }
     inline const T *data() const { return m_data.array; }
     inline T *data() { return m_data.array; }
 };
@@ -163,15 +180,15 @@ template<typename T, int Size, int _Cols, int _Options> class DenseStorage<T, Si
     internal::plain_array<T,Size,_Options> m_data;
     DenseIndex m_rows;
   public:
-    inline explicit DenseStorage() : m_rows(0) {}
+    inline DenseStorage() : m_rows(0) {}
     inline DenseStorage(internal::constructor_without_unaligned_array_assert)
       : m_data(internal::constructor_without_unaligned_array_assert()), m_rows(0) {}
-    inline DenseStorage(DenseIndex, DenseIndex rows, DenseIndex) : m_rows(rows) {}
+    inline DenseStorage(DenseIndex, DenseIndex nbRows, DenseIndex) : m_rows(nbRows) {}
     inline void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); }
     inline DenseIndex rows(void) const {return m_rows;}
     inline DenseIndex cols(void) const {return _Cols;}
-    inline void conservativeResize(DenseIndex, DenseIndex rows, DenseIndex) { m_rows = rows; }
-    inline void resize(DenseIndex, DenseIndex rows, DenseIndex) { m_rows = rows; }
+    inline void conservativeResize(DenseIndex, DenseIndex nbRows, DenseIndex) { m_rows = nbRows; }
+    inline void resize(DenseIndex, DenseIndex nbRows, DenseIndex) { m_rows = nbRows; }
     inline const T *data() const { return m_data.array; }
     inline T *data() { return m_data.array; }
 };
@@ -182,15 +199,15 @@ template<typename T, int Size, int _Rows, int _Options> class DenseStorage<T, Si
     internal::plain_array<T,Size,_Options> m_data;
     DenseIndex m_cols;
   public:
-    inline explicit DenseStorage() : m_cols(0) {}
+    inline DenseStorage() : m_cols(0) {}
     inline DenseStorage(internal::constructor_without_unaligned_array_assert)
       : m_data(internal::constructor_without_unaligned_array_assert()), m_cols(0) {}
-    inline DenseStorage(DenseIndex, DenseIndex, DenseIndex cols) : m_cols(cols) {}
+    inline DenseStorage(DenseIndex, DenseIndex, DenseIndex nbCols) : m_cols(nbCols) {}
     inline void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_cols,other.m_cols); }
     inline DenseIndex rows(void) const {return _Rows;}
     inline DenseIndex cols(void) const {return m_cols;}
-    inline void conservativeResize(DenseIndex, DenseIndex, DenseIndex cols) { m_cols = cols; }
-    inline void resize(DenseIndex, DenseIndex, DenseIndex cols) { m_cols = cols; }
+    inline void conservativeResize(DenseIndex, DenseIndex, DenseIndex nbCols) { m_cols = nbCols; }
+    inline void resize(DenseIndex, DenseIndex, DenseIndex nbCols) { m_cols = nbCols; }
     inline const T *data() const { return m_data.array; }
     inline T *data() { return m_data.array; }
 };
@@ -202,24 +219,24 @@ template<typename T, int _Options> class DenseStorage<T, Dynamic, Dynamic, Dynam
     DenseIndex m_rows;
     DenseIndex m_cols;
   public:
-    inline explicit DenseStorage() : m_data(0), m_rows(0), m_cols(0) {}
+    inline DenseStorage() : m_data(0), m_rows(0), m_cols(0) {}
     inline DenseStorage(internal::constructor_without_unaligned_array_assert)
        : m_data(0), m_rows(0), m_cols(0) {}
-    inline DenseStorage(DenseIndex size, DenseIndex rows, DenseIndex cols)
-      : m_data(internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(size)), m_rows(rows), m_cols(cols) 
+    inline DenseStorage(DenseIndex size, DenseIndex nbRows, DenseIndex nbCols)
+      : m_data(internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(size)), m_rows(nbRows), m_cols(nbCols)
     { EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN }
     inline ~DenseStorage() { internal::conditional_aligned_delete_auto<T,(_Options&DontAlign)==0>(m_data, m_rows*m_cols); }
     inline void swap(DenseStorage& other)
     { std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); std::swap(m_cols,other.m_cols); }
     inline DenseIndex rows(void) const {return m_rows;}
     inline DenseIndex cols(void) const {return m_cols;}
-    inline void conservativeResize(DenseIndex size, DenseIndex rows, DenseIndex cols)
+    inline void conservativeResize(DenseIndex size, DenseIndex nbRows, DenseIndex nbCols)
     {
       m_data = internal::conditional_aligned_realloc_new_auto<T,(_Options&DontAlign)==0>(m_data, size, m_rows*m_cols);
-      m_rows = rows;
-      m_cols = cols;
+      m_rows = nbRows;
+      m_cols = nbCols;
     }
-    void resize(DenseIndex size, DenseIndex rows, DenseIndex cols)
+    void resize(DenseIndex size, DenseIndex nbRows, DenseIndex nbCols)
     {
       if(size != m_rows*m_cols)
       {
@@ -230,8 +247,8 @@ template<typename T, int _Options> class DenseStorage<T, Dynamic, Dynamic, Dynam
           m_data = 0;
         EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN
       }
-      m_rows = rows;
-      m_cols = cols;
+      m_rows = nbRows;
+      m_cols = nbCols;
     }
     inline const T *data() const { return m_data; }
     inline T *data() { return m_data; }
@@ -243,20 +260,20 @@ template<typename T, int _Rows, int _Options> class DenseStorage<T, Dynamic, _Ro
     T *m_data;
     DenseIndex m_cols;
   public:
-    inline explicit DenseStorage() : m_data(0), m_cols(0) {}
+    inline DenseStorage() : m_data(0), m_cols(0) {}
     inline DenseStorage(internal::constructor_without_unaligned_array_assert) : m_data(0), m_cols(0) {}
-    inline DenseStorage(DenseIndex size, DenseIndex, DenseIndex cols) : m_data(internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(size)), m_cols(cols)
+    inline DenseStorage(DenseIndex size, DenseIndex, DenseIndex nbCols) : m_data(internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(size)), m_cols(nbCols)
     { EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN }
     inline ~DenseStorage() { internal::conditional_aligned_delete_auto<T,(_Options&DontAlign)==0>(m_data, _Rows*m_cols); }
     inline void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_cols,other.m_cols); }
     static inline DenseIndex rows(void) {return _Rows;}
     inline DenseIndex cols(void) const {return m_cols;}
-    inline void conservativeResize(DenseIndex size, DenseIndex, DenseIndex cols)
+    inline void conservativeResize(DenseIndex size, DenseIndex, DenseIndex nbCols)
     {
       m_data = internal::conditional_aligned_realloc_new_auto<T,(_Options&DontAlign)==0>(m_data, size, _Rows*m_cols);
-      m_cols = cols;
+      m_cols = nbCols;
     }
-    EIGEN_STRONG_INLINE void resize(DenseIndex size, DenseIndex, DenseIndex cols)
+    EIGEN_STRONG_INLINE void resize(DenseIndex size, DenseIndex, DenseIndex nbCols)
     {
       if(size != _Rows*m_cols)
       {
@@ -267,7 +284,7 @@ template<typename T, int _Rows, int _Options> class DenseStorage<T, Dynamic, _Ro
           m_data = 0;
         EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN
       }
-      m_cols = cols;
+      m_cols = nbCols;
     }
     inline const T *data() const { return m_data; }
     inline T *data() { return m_data; }
@@ -279,20 +296,20 @@ template<typename T, int _Cols, int _Options> class DenseStorage<T, Dynamic, Dyn
     T *m_data;
     DenseIndex m_rows;
   public:
-    inline explicit DenseStorage() : m_data(0), m_rows(0) {}
+    inline DenseStorage() : m_data(0), m_rows(0) {}
     inline DenseStorage(internal::constructor_without_unaligned_array_assert) : m_data(0), m_rows(0) {}
-    inline DenseStorage(DenseIndex size, DenseIndex rows, DenseIndex) : m_data(internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(size)), m_rows(rows)
+    inline DenseStorage(DenseIndex size, DenseIndex nbRows, DenseIndex) : m_data(internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(size)), m_rows(nbRows)
     { EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN }
     inline ~DenseStorage() { internal::conditional_aligned_delete_auto<T,(_Options&DontAlign)==0>(m_data, _Cols*m_rows); }
     inline void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); }
     inline DenseIndex rows(void) const {return m_rows;}
     static inline DenseIndex cols(void) {return _Cols;}
-    inline void conservativeResize(DenseIndex size, DenseIndex rows, DenseIndex)
+    inline void conservativeResize(DenseIndex size, DenseIndex nbRows, DenseIndex)
     {
       m_data = internal::conditional_aligned_realloc_new_auto<T,(_Options&DontAlign)==0>(m_data, size, m_rows*_Cols);
-      m_rows = rows;
+      m_rows = nbRows;
     }
-    EIGEN_STRONG_INLINE void resize(DenseIndex size, DenseIndex rows, DenseIndex)
+    EIGEN_STRONG_INLINE void resize(DenseIndex size, DenseIndex nbRows, DenseIndex)
     {
       if(size != m_rows*_Cols)
       {
@@ -303,7 +320,7 @@ template<typename T, int _Cols, int _Options> class DenseStorage<T, Dynamic, Dyn
           m_data = 0;
         EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN
       }
-      m_rows = rows;
+      m_rows = nbRows;
     }
     inline const T *data() const { return m_data; }
     inline T *data() { return m_data; }
diff --git a/ThirdParty/eigen3/Eigen/src/Core/Diagonal.h b/ThirdParty/eigen3/Eigen/src/Core/Diagonal.h
index 16261968a0f..aab8007b3fc 100644
--- a/ThirdParty/eigen3/Eigen/src/Core/Diagonal.h
+++ b/ThirdParty/eigen3/Eigen/src/Core/Diagonal.h
@@ -41,12 +41,12 @@ struct traits<Diagonal<MatrixType,DiagIndex> >
   typedef typename remove_reference<MatrixTypeNested>::type _MatrixTypeNested;
   typedef typename MatrixType::StorageKind StorageKind;
   enum {
-    RowsAtCompileTime = (int(DiagIndex) == Dynamic || int(MatrixType::SizeAtCompileTime) == Dynamic) ? Dynamic
-    : (EIGEN_PLAIN_ENUM_MIN(MatrixType::RowsAtCompileTime - EIGEN_PLAIN_ENUM_MAX(-DiagIndex, 0),
-                            MatrixType::ColsAtCompileTime - EIGEN_PLAIN_ENUM_MAX( DiagIndex, 0))),
+    RowsAtCompileTime = (int(DiagIndex) == DynamicIndex || int(MatrixType::SizeAtCompileTime) == Dynamic) ? Dynamic
+                      : (EIGEN_PLAIN_ENUM_MIN(MatrixType::RowsAtCompileTime - EIGEN_PLAIN_ENUM_MAX(-DiagIndex, 0),
+                                              MatrixType::ColsAtCompileTime - EIGEN_PLAIN_ENUM_MAX( DiagIndex, 0))),
     ColsAtCompileTime = 1,
     MaxRowsAtCompileTime = int(MatrixType::MaxSizeAtCompileTime) == Dynamic ? Dynamic
-                         : DiagIndex == Dynamic ? EIGEN_SIZE_MIN_PREFER_FIXED(MatrixType::MaxRowsAtCompileTime,
+                         : DiagIndex == DynamicIndex ? EIGEN_SIZE_MIN_PREFER_FIXED(MatrixType::MaxRowsAtCompileTime,
                                                                               MatrixType::MaxColsAtCompileTime)
                          : (EIGEN_PLAIN_ENUM_MIN(MatrixType::MaxRowsAtCompileTime - EIGEN_PLAIN_ENUM_MAX(-DiagIndex, 0),
                                                  MatrixType::MaxColsAtCompileTime - EIGEN_PLAIN_ENUM_MAX( DiagIndex, 0))),
@@ -61,20 +61,21 @@ struct traits<Diagonal<MatrixType,DiagIndex> >
 };
 }
 
-template<typename MatrixType, int DiagIndex> class Diagonal
-   : public internal::dense_xpr_base< Diagonal<MatrixType,DiagIndex> >::type
+template<typename MatrixType, int _DiagIndex> class Diagonal
+   : public internal::dense_xpr_base< Diagonal<MatrixType,_DiagIndex> >::type
 {
   public:
 
+    enum { DiagIndex = _DiagIndex };
     typedef typename internal::dense_xpr_base<Diagonal>::type Base;
     EIGEN_DENSE_PUBLIC_INTERFACE(Diagonal)
 
-    inline Diagonal(MatrixType& matrix, Index index = DiagIndex) : m_matrix(matrix), m_index(index) {}
+    inline Diagonal(MatrixType& matrix, Index a_index = DiagIndex) : m_matrix(matrix), m_index(a_index) {}
 
     EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Diagonal)
 
     inline Index rows() const
-    { return m_index.value()<0 ? (std::min)(m_matrix.cols(),m_matrix.rows()+m_index.value()) : (std::min)(m_matrix.rows(),m_matrix.cols()-m_index.value()); }
+    { return m_index.value()<0 ? (std::min<Index>)(m_matrix.cols(),m_matrix.rows()+m_index.value()) : (std::min<Index>)(m_matrix.rows(),m_matrix.cols()-m_index.value()); }
 
     inline Index cols() const { return 1; }
 
@@ -113,20 +114,20 @@ template<typename MatrixType, int DiagIndex> class Diagonal
       return m_matrix.coeff(row+rowOffset(), row+colOffset());
     }
 
-    inline Scalar& coeffRef(Index index)
+    inline Scalar& coeffRef(Index idx)
     {
       EIGEN_STATIC_ASSERT_LVALUE(MatrixType)
-      return m_matrix.const_cast_derived().coeffRef(index+rowOffset(), index+colOffset());
+      return m_matrix.const_cast_derived().coeffRef(idx+rowOffset(), idx+colOffset());
     }
 
-    inline const Scalar& coeffRef(Index index) const
+    inline const Scalar& coeffRef(Index idx) const
     {
-      return m_matrix.const_cast_derived().coeffRef(index+rowOffset(), index+colOffset());
+      return m_matrix.const_cast_derived().coeffRef(idx+rowOffset(), idx+colOffset());
     }
 
-    inline CoeffReturnType coeff(Index index) const
+    inline CoeffReturnType coeff(Index idx) const
     {
-      return m_matrix.coeff(index+rowOffset(), index+colOffset());
+      return m_matrix.coeff(idx+rowOffset(), idx+colOffset());
     }
 
     const typename internal::remove_all<typename MatrixType::Nested>::type& 
@@ -142,7 +143,7 @@ template<typename MatrixType, int DiagIndex> class Diagonal
 
   protected:
     typename MatrixType::Nested m_matrix;
-    const internal::variable_if_dynamic<Index, DiagIndex> m_index;
+    const internal::variable_if_dynamicindex<Index, DiagIndex> m_index;
 
   private:
     // some compilers may fail to optimize std::max etc in case of compile-time constants...
@@ -171,7 +172,7 @@ MatrixBase<Derived>::diagonal()
 
 /** This is the const version of diagonal(). */
 template<typename Derived>
-inline const typename MatrixBase<Derived>::ConstDiagonalReturnType
+inline typename MatrixBase<Derived>::ConstDiagonalReturnType
 MatrixBase<Derived>::diagonal() const
 {
   return ConstDiagonalReturnType(derived());
@@ -189,18 +190,18 @@ MatrixBase<Derived>::diagonal() const
   *
   * \sa MatrixBase::diagonal(), class Diagonal */
 template<typename Derived>
-inline typename MatrixBase<Derived>::template DiagonalIndexReturnType<Dynamic>::Type
+inline typename MatrixBase<Derived>::template DiagonalIndexReturnType<DynamicIndex>::Type
 MatrixBase<Derived>::diagonal(Index index)
 {
-  return typename DiagonalIndexReturnType<Dynamic>::Type(derived(), index);
+  return typename DiagonalIndexReturnType<DynamicIndex>::Type(derived(), index);
 }
 
 /** This is the const version of diagonal(Index). */
 template<typename Derived>
-inline typename MatrixBase<Derived>::template ConstDiagonalIndexReturnType<Dynamic>::Type
+inline typename MatrixBase<Derived>::template ConstDiagonalIndexReturnType<DynamicIndex>::Type
 MatrixBase<Derived>::diagonal(Index index) const
 {
-  return typename ConstDiagonalIndexReturnType<Dynamic>::Type(derived(), index);
+  return typename ConstDiagonalIndexReturnType<DynamicIndex>::Type(derived(), index);
 }
 
 /** \returns an expression of the \a DiagIndex-th sub or super diagonal of the matrix \c *this
diff --git a/ThirdParty/eigen3/Eigen/src/Core/DiagonalMatrix.h b/ThirdParty/eigen3/Eigen/src/Core/DiagonalMatrix.h
index 6e8b50fab4c..e6c220f4195 100644
--- a/ThirdParty/eigen3/Eigen/src/Core/DiagonalMatrix.h
+++ b/ThirdParty/eigen3/Eigen/src/Core/DiagonalMatrix.h
@@ -56,9 +56,14 @@ class DiagonalBase : public EigenBase<Derived>
     inline Index rows() const { return diagonal().size(); }
     inline Index cols() const { return diagonal().size(); }
 
+    /** \returns the diagonal matrix product of \c *this by the matrix \a matrix.
+      */
     template<typename MatrixDerived>
     const DiagonalProduct<MatrixDerived, Derived, OnTheLeft>
-    operator*(const MatrixBase<MatrixDerived> &matrix) const;
+    operator*(const MatrixBase<MatrixDerived> &matrix) const
+    {
+      return DiagonalProduct<MatrixDerived, Derived, OnTheLeft>(matrix.derived(), derived());
+    }
 
     inline const DiagonalWrapper<const CwiseUnaryOp<internal::scalar_inverse_op<Scalar>, const DiagonalVectorType> >
     inverse() const
@@ -250,7 +255,7 @@ class DiagonalWrapper
     #endif
 
     /** Constructor from expression of diagonal coefficients to wrap. */
-    inline DiagonalWrapper(DiagonalVectorType& diagonal) : m_diagonal(diagonal) {}
+    inline DiagonalWrapper(DiagonalVectorType& a_diagonal) : m_diagonal(a_diagonal) {}
 
     /** \returns a const reference to the wrapped expression of diagonal coefficients. */
     const DiagonalVectorType& diagonal() const { return m_diagonal; }
@@ -284,13 +289,14 @@ MatrixBase<Derived>::asDiagonal() const
   * \sa asDiagonal()
   */
 template<typename Derived>
-bool MatrixBase<Derived>::isDiagonal(RealScalar prec) const
+bool MatrixBase<Derived>::isDiagonal(const RealScalar& prec) const
 {
+  using std::abs;
   if(cols() != rows()) return false;
   RealScalar maxAbsOnDiagonal = static_cast<RealScalar>(-1);
   for(Index j = 0; j < cols(); ++j)
   {
-    RealScalar absOnDiagonal = internal::abs(coeff(j,j));
+    RealScalar absOnDiagonal = abs(coeff(j,j));
     if(absOnDiagonal > maxAbsOnDiagonal) maxAbsOnDiagonal = absOnDiagonal;
   }
   for(Index j = 0; j < cols(); ++j)
diff --git a/ThirdParty/eigen3/Eigen/src/Core/DiagonalProduct.h b/ThirdParty/eigen3/Eigen/src/Core/DiagonalProduct.h
index 598c6b3e19a..c03a0c2e12b 100644
--- a/ThirdParty/eigen3/Eigen/src/Core/DiagonalProduct.h
+++ b/ThirdParty/eigen3/Eigen/src/Core/DiagonalProduct.h
@@ -26,14 +26,15 @@ struct traits<DiagonalProduct<MatrixType, DiagonalType, ProductOrder> >
     MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime,
 
     _StorageOrder = MatrixType::Flags & RowMajorBit ? RowMajor : ColMajor,
-    _PacketOnDiag = !((int(_StorageOrder) == RowMajor && int(ProductOrder) == OnTheLeft)
-                    ||(int(_StorageOrder) == ColMajor && int(ProductOrder) == OnTheRight)),
+    _ScalarAccessOnDiag =  !((int(_StorageOrder) == ColMajor && int(ProductOrder) == OnTheLeft)
+                          ||(int(_StorageOrder) == RowMajor && int(ProductOrder) == OnTheRight)),
     _SameTypes = is_same<typename MatrixType::Scalar, typename DiagonalType::Scalar>::value,
     // FIXME currently we need same types, but in the future the next rule should be the one
-    //_Vectorizable = bool(int(MatrixType::Flags)&PacketAccessBit) && ((!_PacketOnDiag) || (_SameTypes && bool(int(DiagonalType::Flags)&PacketAccessBit))),
-    _Vectorizable = bool(int(MatrixType::Flags)&PacketAccessBit) && _SameTypes && ((!_PacketOnDiag) || (bool(int(DiagonalType::Flags)&PacketAccessBit))),
+    //_Vectorizable = bool(int(MatrixType::Flags)&PacketAccessBit) && ((!_PacketOnDiag) || (_SameTypes && bool(int(DiagonalType::DiagonalVectorType::Flags)&PacketAccessBit))),
+    _Vectorizable = bool(int(MatrixType::Flags)&PacketAccessBit) && _SameTypes && (_ScalarAccessOnDiag || (bool(int(DiagonalType::DiagonalVectorType::Flags)&PacketAccessBit))),
+    _LinearAccessMask = (RowsAtCompileTime==1 || ColsAtCompileTime==1) ? LinearAccessBit : 0,
 
-    Flags = (HereditaryBits & (unsigned int)(MatrixType::Flags)) | (_Vectorizable ? PacketAccessBit : 0),
+    Flags = ((HereditaryBits|_LinearAccessMask) & (unsigned int)(MatrixType::Flags)) | (_Vectorizable ? PacketAccessBit : 0) | AlignedBit,//(int(MatrixType::Flags)&int(DiagonalType::DiagonalVectorType::Flags)&AlignedBit),
     CoeffReadCost = NumTraits<Scalar>::MulCost + MatrixType::CoeffReadCost + DiagonalType::DiagonalVectorType::CoeffReadCost
   };
 };
@@ -54,13 +55,21 @@ class DiagonalProduct : internal::no_assignment_operator,
       eigen_assert(diagonal.diagonal().size() == (ProductOrder == OnTheLeft ? matrix.rows() : matrix.cols()));
     }
 
-    inline Index rows() const { return m_matrix.rows(); }
-    inline Index cols() const { return m_matrix.cols(); }
+    EIGEN_STRONG_INLINE Index rows() const { return m_matrix.rows(); }
+    EIGEN_STRONG_INLINE Index cols() const { return m_matrix.cols(); }
 
-    const Scalar coeff(Index row, Index col) const
+    EIGEN_STRONG_INLINE const Scalar coeff(Index row, Index col) const
     {
       return m_diagonal.diagonal().coeff(ProductOrder == OnTheLeft ? row : col) * m_matrix.coeff(row, col);
     }
+    
+    EIGEN_STRONG_INLINE const Scalar coeff(Index idx) const
+    {
+      enum {
+        StorageOrder = int(MatrixType::Flags) & RowMajorBit ? RowMajor : ColMajor
+      };
+      return coeff(int(StorageOrder)==ColMajor?idx:0,int(StorageOrder)==ColMajor?0:idx);
+    }
 
     template<int LoadMode>
     EIGEN_STRONG_INLINE PacketScalar packet(Index row, Index col) const
@@ -69,11 +78,19 @@ class DiagonalProduct : internal::no_assignment_operator,
         StorageOrder = Flags & RowMajorBit ? RowMajor : ColMajor
       };
       const Index indexInDiagonalVector = ProductOrder == OnTheLeft ? row : col;
-
       return packet_impl<LoadMode>(row,col,indexInDiagonalVector,typename internal::conditional<
         ((int(StorageOrder) == RowMajor && int(ProductOrder) == OnTheLeft)
        ||(int(StorageOrder) == ColMajor && int(ProductOrder) == OnTheRight)), internal::true_type, internal::false_type>::type());
     }
+    
+    template<int LoadMode>
+    EIGEN_STRONG_INLINE PacketScalar packet(Index idx) const
+    {
+      enum {
+        StorageOrder = int(MatrixType::Flags) & RowMajorBit ? RowMajor : ColMajor
+      };
+      return packet<LoadMode>(int(StorageOrder)==ColMajor?idx:0,int(StorageOrder)==ColMajor?0:idx);
+    }
 
   protected:
     template<int LoadMode>
@@ -88,7 +105,7 @@ class DiagonalProduct : internal::no_assignment_operator,
     {
       enum {
         InnerSize = (MatrixType::Flags & RowMajorBit) ? MatrixType::ColsAtCompileTime : MatrixType::RowsAtCompileTime,
-        DiagonalVectorPacketLoadMode = (LoadMode == Aligned && ((InnerSize%16) == 0)) ? Aligned : Unaligned
+        DiagonalVectorPacketLoadMode = (LoadMode == Aligned && (((InnerSize%16) == 0) || (int(DiagonalType::DiagonalVectorType::Flags)&AlignedBit)==AlignedBit) ? Aligned : Unaligned)
       };
       return internal::pmul(m_matrix.template packet<LoadMode>(row, col),
                      m_diagonal.diagonal().template packet<DiagonalVectorPacketLoadMode>(id));
@@ -103,19 +120,9 @@ class DiagonalProduct : internal::no_assignment_operator,
 template<typename Derived>
 template<typename DiagonalDerived>
 inline const DiagonalProduct<Derived, DiagonalDerived, OnTheRight>
-MatrixBase<Derived>::operator*(const DiagonalBase<DiagonalDerived> &diagonal) const
-{
-  return DiagonalProduct<Derived, DiagonalDerived, OnTheRight>(derived(), diagonal.derived());
-}
-
-/** \returns the diagonal matrix product of \c *this by the matrix \a matrix.
-  */
-template<typename DiagonalDerived>
-template<typename MatrixDerived>
-inline const DiagonalProduct<MatrixDerived, DiagonalDerived, OnTheLeft>
-DiagonalBase<DiagonalDerived>::operator*(const MatrixBase<MatrixDerived> &matrix) const
+MatrixBase<Derived>::operator*(const DiagonalBase<DiagonalDerived> &a_diagonal) const
 {
-  return DiagonalProduct<MatrixDerived, DiagonalDerived, OnTheLeft>(matrix.derived(), derived());
+  return DiagonalProduct<Derived, DiagonalDerived, OnTheRight>(derived(), a_diagonal.derived());
 }
 
 } // end namespace Eigen
diff --git a/ThirdParty/eigen3/Eigen/src/Core/Dot.h b/ThirdParty/eigen3/Eigen/src/Core/Dot.h
index ae9274e36dd..9d7651f1fd4 100644
--- a/ThirdParty/eigen3/Eigen/src/Core/Dot.h
+++ b/ThirdParty/eigen3/Eigen/src/Core/Dot.h
@@ -112,7 +112,7 @@ MatrixBase<Derived>::eigen2_dot(const MatrixBase<OtherDerived>& other) const
 template<typename Derived>
 EIGEN_STRONG_INLINE typename NumTraits<typename internal::traits<Derived>::Scalar>::Real MatrixBase<Derived>::squaredNorm() const
 {
-  return internal::real((*this).cwiseAbs2().sum());
+  return numext::real((*this).cwiseAbs2().sum());
 }
 
 /** \returns, for vectors, the \em l2 norm of \c *this, and for matrices the Frobenius norm.
@@ -124,7 +124,8 @@ EIGEN_STRONG_INLINE typename NumTraits<typename internal::traits<Derived>::Scala
 template<typename Derived>
 inline typename NumTraits<typename internal::traits<Derived>::Scalar>::Real MatrixBase<Derived>::norm() const
 {
-  return internal::sqrt(squaredNorm());
+  using std::sqrt;
+  return sqrt(squaredNorm());
 }
 
 /** \returns an expression of the quotient of *this by its own norm.
@@ -165,6 +166,7 @@ struct lpNorm_selector
   typedef typename NumTraits<typename traits<Derived>::Scalar>::Real RealScalar;
   static inline RealScalar run(const MatrixBase<Derived>& m)
   {
+    using std::pow;
     return pow(m.cwiseAbs().array().pow(p).sum(), RealScalar(1)/p);
   }
 };
@@ -223,11 +225,11 @@ MatrixBase<Derived>::lpNorm() const
 template<typename Derived>
 template<typename OtherDerived>
 bool MatrixBase<Derived>::isOrthogonal
-(const MatrixBase<OtherDerived>& other, RealScalar prec) const
+(const MatrixBase<OtherDerived>& other, const RealScalar& prec) const
 {
   typename internal::nested<Derived,2>::type nested(derived());
   typename internal::nested<OtherDerived,2>::type otherNested(other.derived());
-  return internal::abs2(nested.dot(otherNested)) <= prec * prec * nested.squaredNorm() * otherNested.squaredNorm();
+  return numext::abs2(nested.dot(otherNested)) <= prec * prec * nested.squaredNorm() * otherNested.squaredNorm();
 }
 
 /** \returns true if *this is approximately an unitary matrix,
@@ -242,7 +244,7 @@ bool MatrixBase<Derived>::isOrthogonal
   * Output: \verbinclude MatrixBase_isUnitary.out
   */
 template<typename Derived>
-bool MatrixBase<Derived>::isUnitary(RealScalar prec) const
+bool MatrixBase<Derived>::isUnitary(const RealScalar& prec) const
 {
   typename Derived::Nested nested(derived());
   for(Index i = 0; i < cols(); ++i)
diff --git a/ThirdParty/eigen3/Eigen/src/Core/EigenBase.h b/ThirdParty/eigen3/Eigen/src/Core/EigenBase.h
index 0bbd28bec21..2b8dd1b706f 100644
--- a/ThirdParty/eigen3/Eigen/src/Core/EigenBase.h
+++ b/ThirdParty/eigen3/Eigen/src/Core/EigenBase.h
@@ -139,7 +139,8 @@ MatrixBase<Derived>::operator*=(const EigenBase<OtherDerived> &other)
   return derived();
 }
 
-/** replaces \c *this by \c *this * \a other. It is equivalent to MatrixBase::operator*=() */
+/** replaces \c *this by \c *this * \a other. It is equivalent to MatrixBase::operator*=().
+  */
 template<typename Derived>
 template<typename OtherDerived>
 inline void MatrixBase<Derived>::applyOnTheRight(const EigenBase<OtherDerived> &other)
diff --git a/ThirdParty/eigen3/Eigen/src/Core/Functors.h b/ThirdParty/eigen3/Eigen/src/Core/Functors.h
index bb7b84adb65..04fb217323d 100644
--- a/ThirdParty/eigen3/Eigen/src/Core/Functors.h
+++ b/ThirdParty/eigen3/Eigen/src/Core/Functors.h
@@ -154,6 +154,7 @@ template<typename Scalar> struct scalar_hypot_op {
   {
     using std::max;
     using std::min;
+    using std::sqrt;
     Scalar p = (max)(_x, _y);
     Scalar q = (min)(_x, _y);
     Scalar qp = q/p;
@@ -170,7 +171,7 @@ struct functor_traits<scalar_hypot_op<Scalar> > {
   */
 template<typename Scalar, typename OtherScalar> struct scalar_binary_pow_op {
   EIGEN_EMPTY_STRUCT_CTOR(scalar_binary_pow_op)
-  inline Scalar operator() (const Scalar& a, const OtherScalar& b) const { return internal::pow(a, b); }
+  inline Scalar operator() (const Scalar& a, const OtherScalar& b) const { return numext::pow(a, b); }
 };
 template<typename Scalar, typename OtherScalar>
 struct functor_traits<scalar_binary_pow_op<Scalar,OtherScalar> > {
@@ -204,21 +205,28 @@ struct functor_traits<scalar_difference_op<Scalar> > {
   *
   * \sa class CwiseBinaryOp, Cwise::operator/()
   */
-template<typename Scalar> struct scalar_quotient_op {
+template<typename LhsScalar,typename RhsScalar> struct scalar_quotient_op {
+  enum {
+    // TODO vectorize mixed product
+    Vectorizable = is_same<LhsScalar,RhsScalar>::value && packet_traits<LhsScalar>::HasDiv && packet_traits<RhsScalar>::HasDiv
+  };
+  typedef typename scalar_product_traits<LhsScalar,RhsScalar>::ReturnType result_type;
   EIGEN_EMPTY_STRUCT_CTOR(scalar_quotient_op)
-  EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a, const Scalar& b) const { return a / b; }
+  EIGEN_STRONG_INLINE const result_type operator() (const LhsScalar& a, const RhsScalar& b) const { return a / b; }
   template<typename Packet>
   EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a, const Packet& b) const
   { return internal::pdiv(a,b); }
 };
-template<typename Scalar>
-struct functor_traits<scalar_quotient_op<Scalar> > {
+template<typename LhsScalar,typename RhsScalar>
+struct functor_traits<scalar_quotient_op<LhsScalar,RhsScalar> > {
   enum {
-    Cost = 2 * NumTraits<Scalar>::MulCost,
-    PacketAccess = packet_traits<Scalar>::HasDiv
+    Cost = (NumTraits<LhsScalar>::MulCost + NumTraits<RhsScalar>::MulCost), // rough estimate!
+    PacketAccess = scalar_quotient_op<LhsScalar,RhsScalar>::Vectorizable
   };
 };
 
+
+
 /** \internal
   * \brief Template functor to compute the and of two booleans
   *
@@ -280,7 +288,7 @@ struct functor_traits<scalar_opposite_op<Scalar> >
 template<typename Scalar> struct scalar_abs_op {
   EIGEN_EMPTY_STRUCT_CTOR(scalar_abs_op)
   typedef typename NumTraits<Scalar>::Real result_type;
-  EIGEN_STRONG_INLINE const result_type operator() (const Scalar& a) const { return internal::abs(a); }
+  EIGEN_STRONG_INLINE const result_type operator() (const Scalar& a) const { using std::abs; return abs(a); }
   template<typename Packet>
   EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a) const
   { return internal::pabs(a); }
@@ -302,7 +310,7 @@ struct functor_traits<scalar_abs_op<Scalar> >
 template<typename Scalar> struct scalar_abs2_op {
   EIGEN_EMPTY_STRUCT_CTOR(scalar_abs2_op)
   typedef typename NumTraits<Scalar>::Real result_type;
-  EIGEN_STRONG_INLINE const result_type operator() (const Scalar& a) const { return internal::abs2(a); }
+  EIGEN_STRONG_INLINE const result_type operator() (const Scalar& a) const { return numext::abs2(a); }
   template<typename Packet>
   EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a) const
   { return internal::pmul(a,a); }
@@ -318,7 +326,7 @@ struct functor_traits<scalar_abs2_op<Scalar> >
   */
 template<typename Scalar> struct scalar_conjugate_op {
   EIGEN_EMPTY_STRUCT_CTOR(scalar_conjugate_op)
-  EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a) const { return internal::conj(a); }
+  EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a) const { using numext::conj; return conj(a); }
   template<typename Packet>
   EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a) const { return internal::pconj(a); }
 };
@@ -355,7 +363,7 @@ template<typename Scalar>
 struct scalar_real_op {
   EIGEN_EMPTY_STRUCT_CTOR(scalar_real_op)
   typedef typename NumTraits<Scalar>::Real result_type;
-  EIGEN_STRONG_INLINE result_type operator() (const Scalar& a) const { return internal::real(a); }
+  EIGEN_STRONG_INLINE result_type operator() (const Scalar& a) const { return numext::real(a); }
 };
 template<typename Scalar>
 struct functor_traits<scalar_real_op<Scalar> >
@@ -370,7 +378,7 @@ template<typename Scalar>
 struct scalar_imag_op {
   EIGEN_EMPTY_STRUCT_CTOR(scalar_imag_op)
   typedef typename NumTraits<Scalar>::Real result_type;
-  EIGEN_STRONG_INLINE result_type operator() (const Scalar& a) const { return internal::imag(a); }
+  EIGEN_STRONG_INLINE result_type operator() (const Scalar& a) const { return numext::imag(a); }
 };
 template<typename Scalar>
 struct functor_traits<scalar_imag_op<Scalar> >
@@ -385,7 +393,7 @@ template<typename Scalar>
 struct scalar_real_ref_op {
   EIGEN_EMPTY_STRUCT_CTOR(scalar_real_ref_op)
   typedef typename NumTraits<Scalar>::Real result_type;
-  EIGEN_STRONG_INLINE result_type& operator() (const Scalar& a) const { return internal::real_ref(*const_cast<Scalar*>(&a)); }
+  EIGEN_STRONG_INLINE result_type& operator() (const Scalar& a) const { return numext::real_ref(*const_cast<Scalar*>(&a)); }
 };
 template<typename Scalar>
 struct functor_traits<scalar_real_ref_op<Scalar> >
@@ -400,7 +408,7 @@ template<typename Scalar>
 struct scalar_imag_ref_op {
   EIGEN_EMPTY_STRUCT_CTOR(scalar_imag_ref_op)
   typedef typename NumTraits<Scalar>::Real result_type;
-  EIGEN_STRONG_INLINE result_type& operator() (const Scalar& a) const { return internal::imag_ref(*const_cast<Scalar*>(&a)); }
+  EIGEN_STRONG_INLINE result_type& operator() (const Scalar& a) const { return numext::imag_ref(*const_cast<Scalar*>(&a)); }
 };
 template<typename Scalar>
 struct functor_traits<scalar_imag_ref_op<Scalar> >
@@ -414,7 +422,7 @@ struct functor_traits<scalar_imag_ref_op<Scalar> >
   */
 template<typename Scalar> struct scalar_exp_op {
   EIGEN_EMPTY_STRUCT_CTOR(scalar_exp_op)
-  inline const Scalar operator() (const Scalar& a) const { return internal::exp(a); }
+  inline const Scalar operator() (const Scalar& a) const { using std::exp; return exp(a); }
   typedef typename packet_traits<Scalar>::type Packet;
   inline Packet packetOp(const Packet& a) const { return internal::pexp(a); }
 };
@@ -430,7 +438,7 @@ struct functor_traits<scalar_exp_op<Scalar> >
   */
 template<typename Scalar> struct scalar_log_op {
   EIGEN_EMPTY_STRUCT_CTOR(scalar_log_op)
-  inline const Scalar operator() (const Scalar& a) const { return internal::log(a); }
+  inline const Scalar operator() (const Scalar& a) const { using std::log; return log(a); }
   typedef typename packet_traits<Scalar>::type Packet;
   inline Packet packetOp(const Packet& a) const { return internal::plog(a); }
 };
@@ -543,7 +551,7 @@ struct linspaced_op_impl<Scalar,false>
 {
   typedef typename packet_traits<Scalar>::type Packet;
 
-  linspaced_op_impl(Scalar low, Scalar step) :
+  linspaced_op_impl(const Scalar& low, const Scalar& step) :
   m_low(low), m_step(step),
   m_packetStep(pset1<Packet>(packet_traits<Scalar>::size*step)),
   m_base(padd(pset1<Packet>(low), pmul(pset1<Packet>(step),plset<Scalar>(-packet_traits<Scalar>::size)))) {}
@@ -552,7 +560,7 @@ struct linspaced_op_impl<Scalar,false>
   EIGEN_STRONG_INLINE const Scalar operator() (Index i) const 
   { 
     m_base = padd(m_base, pset1<Packet>(m_step));
-    return m_low+i*m_step; 
+    return m_low+Scalar(i)*m_step; 
   }
 
   template<typename Index>
@@ -572,7 +580,7 @@ struct linspaced_op_impl<Scalar,true>
 {
   typedef typename packet_traits<Scalar>::type Packet;
 
-  linspaced_op_impl(Scalar low, Scalar step) :
+  linspaced_op_impl(const Scalar& low, const Scalar& step) :
   m_low(low), m_step(step),
   m_lowPacket(pset1<Packet>(m_low)), m_stepPacket(pset1<Packet>(m_step)), m_interPacket(plset<Scalar>(0)) {}
 
@@ -601,7 +609,7 @@ template <typename Scalar, bool RandomAccess> struct functor_traits< linspaced_o
 template <typename Scalar, bool RandomAccess> struct linspaced_op
 {
   typedef typename packet_traits<Scalar>::type Packet;
-  linspaced_op(Scalar low, Scalar high, int num_steps) : impl((num_steps==1 ? high : low), (num_steps==1 ? Scalar() : (high-low)/(num_steps-1))) {}
+  linspaced_op(const Scalar& low, const Scalar& high, DenseIndex num_steps) : impl((num_steps==1 ? high : low), (num_steps==1 ? Scalar() : (high-low)/(num_steps-1))) {}
 
   template<typename Index>
   EIGEN_STRONG_INLINE const Scalar operator() (Index i) const { return impl(i); }
@@ -640,12 +648,14 @@ template <typename Scalar, bool RandomAccess> struct linspaced_op
 template<typename Functor> struct functor_has_linear_access { enum { ret = 1 }; };
 template<typename Scalar> struct functor_has_linear_access<scalar_identity_op<Scalar> > { enum { ret = 0 }; };
 
-// in CwiseBinaryOp, we require the Lhs and Rhs to have the same scalar type, except for multiplication
-// where we only require them to have the same _real_ scalar type so one may multiply, say, float by complex<float>.
+// In Eigen, any binary op (Product, CwiseBinaryOp) require the Lhs and Rhs to have the same scalar type, except for multiplication
+// where the mixing of different types is handled by scalar_product_traits
+// In particular, real * complex<real> is allowed.
 // FIXME move this to functor_traits adding a functor_default
-template<typename Functor> struct functor_allows_mixing_real_and_complex { enum { ret = 0 }; };
-template<typename LhsScalar,typename RhsScalar> struct functor_allows_mixing_real_and_complex<scalar_product_op<LhsScalar,RhsScalar> > { enum { ret = 1 }; };
-template<typename LhsScalar,typename RhsScalar> struct functor_allows_mixing_real_and_complex<scalar_conj_product_op<LhsScalar,RhsScalar> > { enum { ret = 1 }; };
+template<typename Functor> struct functor_is_product_like { enum { ret = 0 }; };
+template<typename LhsScalar,typename RhsScalar> struct functor_is_product_like<scalar_product_op<LhsScalar,RhsScalar> > { enum { ret = 1 }; };
+template<typename LhsScalar,typename RhsScalar> struct functor_is_product_like<scalar_conj_product_op<LhsScalar,RhsScalar> > { enum { ret = 1 }; };
+template<typename LhsScalar,typename RhsScalar> struct functor_is_product_like<scalar_quotient_op<LhsScalar,RhsScalar> > { enum { ret = 1 }; };
 
 
 /** \internal
@@ -674,7 +684,7 @@ struct functor_traits<scalar_add_op<Scalar> >
   */
 template<typename Scalar> struct scalar_sqrt_op {
   EIGEN_EMPTY_STRUCT_CTOR(scalar_sqrt_op)
-  inline const Scalar operator() (const Scalar& a) const { return internal::sqrt(a); }
+  inline const Scalar operator() (const Scalar& a) const { using std::sqrt; return sqrt(a); }
   typedef typename packet_traits<Scalar>::type Packet;
   inline Packet packetOp(const Packet& a) const { return internal::psqrt(a); }
 };
@@ -692,7 +702,7 @@ struct functor_traits<scalar_sqrt_op<Scalar> >
   */
 template<typename Scalar> struct scalar_cos_op {
   EIGEN_EMPTY_STRUCT_CTOR(scalar_cos_op)
-  inline Scalar operator() (const Scalar& a) const { return internal::cos(a); }
+  inline Scalar operator() (const Scalar& a) const { using std::cos; return cos(a); }
   typedef typename packet_traits<Scalar>::type Packet;
   inline Packet packetOp(const Packet& a) const { return internal::pcos(a); }
 };
@@ -711,7 +721,7 @@ struct functor_traits<scalar_cos_op<Scalar> >
   */
 template<typename Scalar> struct scalar_sin_op {
   EIGEN_EMPTY_STRUCT_CTOR(scalar_sin_op)
-  inline const Scalar operator() (const Scalar& a) const { return internal::sin(a); }
+  inline const Scalar operator() (const Scalar& a) const { using std::sin; return sin(a); }
   typedef typename packet_traits<Scalar>::type Packet;
   inline Packet packetOp(const Packet& a) const { return internal::psin(a); }
 };
@@ -731,7 +741,7 @@ struct functor_traits<scalar_sin_op<Scalar> >
   */
 template<typename Scalar> struct scalar_tan_op {
   EIGEN_EMPTY_STRUCT_CTOR(scalar_tan_op)
-  inline const Scalar operator() (const Scalar& a) const { return internal::tan(a); }
+  inline const Scalar operator() (const Scalar& a) const { using std::tan; return tan(a); }
   typedef typename packet_traits<Scalar>::type Packet;
   inline Packet packetOp(const Packet& a) const { return internal::ptan(a); }
 };
@@ -750,7 +760,7 @@ struct functor_traits<scalar_tan_op<Scalar> >
   */
 template<typename Scalar> struct scalar_acos_op {
   EIGEN_EMPTY_STRUCT_CTOR(scalar_acos_op)
-  inline const Scalar operator() (const Scalar& a) const { return internal::acos(a); }
+  inline const Scalar operator() (const Scalar& a) const { using std::acos; return acos(a); }
   typedef typename packet_traits<Scalar>::type Packet;
   inline Packet packetOp(const Packet& a) const { return internal::pacos(a); }
 };
@@ -769,7 +779,7 @@ struct functor_traits<scalar_acos_op<Scalar> >
   */
 template<typename Scalar> struct scalar_asin_op {
   EIGEN_EMPTY_STRUCT_CTOR(scalar_asin_op)
-  inline const Scalar operator() (const Scalar& a) const { return internal::asin(a); }
+  inline const Scalar operator() (const Scalar& a) const { using std::asin; return asin(a); }
   typedef typename packet_traits<Scalar>::type Packet;
   inline Packet packetOp(const Packet& a) const { return internal::pasin(a); }
 };
@@ -791,7 +801,7 @@ struct scalar_pow_op {
   // FIXME default copy constructors seems bugged with std::complex<>
   inline scalar_pow_op(const scalar_pow_op& other) : m_exponent(other.m_exponent) { }
   inline scalar_pow_op(const Scalar& exponent) : m_exponent(exponent) {}
-  inline Scalar operator() (const Scalar& a) const { return internal::pow(a, m_exponent); }
+  inline Scalar operator() (const Scalar& a) const { return numext::pow(a, m_exponent); }
   const Scalar m_exponent;
 };
 template<typename Scalar>
diff --git a/ThirdParty/eigen3/Eigen/src/Core/Fuzzy.h b/ThirdParty/eigen3/Eigen/src/Core/Fuzzy.h
index d74edcfdb9d..fe63bd2984d 100644
--- a/ThirdParty/eigen3/Eigen/src/Core/Fuzzy.h
+++ b/ThirdParty/eigen3/Eigen/src/Core/Fuzzy.h
@@ -19,7 +19,7 @@ namespace internal
 template<typename Derived, typename OtherDerived, bool is_integer = NumTraits<typename Derived::Scalar>::IsInteger>
 struct isApprox_selector
 {
-  static bool run(const Derived& x, const OtherDerived& y, typename Derived::RealScalar prec)
+  static bool run(const Derived& x, const OtherDerived& y, const typename Derived::RealScalar& prec)
   {
     using std::min;
     typename internal::nested<Derived,2>::type nested(x);
@@ -31,7 +31,7 @@ struct isApprox_selector
 template<typename Derived, typename OtherDerived>
 struct isApprox_selector<Derived, OtherDerived, true>
 {
-  static bool run(const Derived& x, const OtherDerived& y, typename Derived::RealScalar)
+  static bool run(const Derived& x, const OtherDerived& y, const typename Derived::RealScalar&)
   {
     return x.matrix() == y.matrix();
   }
@@ -40,16 +40,16 @@ struct isApprox_selector<Derived, OtherDerived, true>
 template<typename Derived, typename OtherDerived, bool is_integer = NumTraits<typename Derived::Scalar>::IsInteger>
 struct isMuchSmallerThan_object_selector
 {
-  static bool run(const Derived& x, const OtherDerived& y, typename Derived::RealScalar prec)
+  static bool run(const Derived& x, const OtherDerived& y, const typename Derived::RealScalar& prec)
   {
-    return x.cwiseAbs2().sum() <= abs2(prec) * y.cwiseAbs2().sum();
+    return x.cwiseAbs2().sum() <= numext::abs2(prec) * y.cwiseAbs2().sum();
   }
 };
 
 template<typename Derived, typename OtherDerived>
 struct isMuchSmallerThan_object_selector<Derived, OtherDerived, true>
 {
-  static bool run(const Derived& x, const OtherDerived&, typename Derived::RealScalar)
+  static bool run(const Derived& x, const OtherDerived&, const typename Derived::RealScalar&)
   {
     return x.matrix() == Derived::Zero(x.rows(), x.cols()).matrix();
   }
@@ -58,16 +58,16 @@ struct isMuchSmallerThan_object_selector<Derived, OtherDerived, true>
 template<typename Derived, bool is_integer = NumTraits<typename Derived::Scalar>::IsInteger>
 struct isMuchSmallerThan_scalar_selector
 {
-  static bool run(const Derived& x, const typename Derived::RealScalar& y, typename Derived::RealScalar prec)
+  static bool run(const Derived& x, const typename Derived::RealScalar& y, const typename Derived::RealScalar& prec)
   {
-    return x.cwiseAbs2().sum() <= abs2(prec * y);
+    return x.cwiseAbs2().sum() <= numext::abs2(prec * y);
   }
 };
 
 template<typename Derived>
 struct isMuchSmallerThan_scalar_selector<Derived, true>
 {
-  static bool run(const Derived& x, const typename Derived::RealScalar&, typename Derived::RealScalar)
+  static bool run(const Derived& x, const typename Derived::RealScalar&, const typename Derived::RealScalar&)
   {
     return x.matrix() == Derived::Zero(x.rows(), x.cols()).matrix();
   }
@@ -97,7 +97,7 @@ template<typename Derived>
 template<typename OtherDerived>
 bool DenseBase<Derived>::isApprox(
   const DenseBase<OtherDerived>& other,
-  RealScalar prec
+  const RealScalar& prec
 ) const
 {
   return internal::isApprox_selector<Derived, OtherDerived>::run(derived(), other.derived(), prec);
@@ -119,7 +119,7 @@ bool DenseBase<Derived>::isApprox(
 template<typename Derived>
 bool DenseBase<Derived>::isMuchSmallerThan(
   const typename NumTraits<Scalar>::Real& other,
-  RealScalar prec
+  const RealScalar& prec
 ) const
 {
   return internal::isMuchSmallerThan_scalar_selector<Derived>::run(derived(), other, prec);
@@ -139,7 +139,7 @@ template<typename Derived>
 template<typename OtherDerived>
 bool DenseBase<Derived>::isMuchSmallerThan(
   const DenseBase<OtherDerived>& other,
-  RealScalar prec
+  const RealScalar& prec
 ) const
 {
   return internal::isMuchSmallerThan_object_selector<Derived, OtherDerived>::run(derived(), other.derived(), prec);
diff --git a/ThirdParty/eigen3/Eigen/src/Core/GeneralProduct.h b/ThirdParty/eigen3/Eigen/src/Core/GeneralProduct.h
index bfc2a67b12f..2a59d94645e 100644
--- a/ThirdParty/eigen3/Eigen/src/Core/GeneralProduct.h
+++ b/ThirdParty/eigen3/Eigen/src/Core/GeneralProduct.h
@@ -222,7 +222,29 @@ class GeneralProduct<Lhs, Rhs, InnerProduct>
 ***********************************************************************/
 
 namespace internal {
-template<int StorageOrder> struct outer_product_selector;
+
+// Column major
+template<typename ProductType, typename Dest, typename Func>
+EIGEN_DONT_INLINE void outer_product_selector_run(const ProductType& prod, Dest& dest, const Func& func, const false_type&)
+{
+  typedef typename Dest::Index Index;
+  // FIXME make sure lhs is sequentially stored
+  // FIXME not very good if rhs is real and lhs complex while alpha is real too
+  const Index cols = dest.cols();
+  for (Index j=0; j<cols; ++j)
+    func(dest.col(j), prod.rhs().coeff(j) * prod.lhs());
+}
+
+// Row major
+template<typename ProductType, typename Dest, typename Func>
+EIGEN_DONT_INLINE void outer_product_selector_run(const ProductType& prod, Dest& dest, const Func& func, const true_type&) {
+  typedef typename Dest::Index Index;
+  // FIXME make sure rhs is sequentially stored
+  // FIXME not very good if lhs is real and rhs complex while alpha is real too
+  const Index rows = dest.rows();
+  for (Index i=0; i<rows; ++i)
+    func(dest.row(i), prod.lhs().coeff(i) * prod.rhs());
+}
 
 template<typename Lhs, typename Rhs>
 struct traits<GeneralProduct<Lhs,Rhs,OuterProduct> >
@@ -235,6 +257,8 @@ template<typename Lhs, typename Rhs>
 class GeneralProduct<Lhs, Rhs, OuterProduct>
   : public ProductBase<GeneralProduct<Lhs,Rhs,OuterProduct>, Lhs, Rhs>
 {
+    template<typename T> struct IsRowMajor : internal::conditional<(int(T::Flags)&RowMajorBit), internal::true_type, internal::false_type>::type {};
+    
   public:
     EIGEN_PRODUCT_PUBLIC_INTERFACE(GeneralProduct)
 
@@ -243,41 +267,39 @@ class GeneralProduct<Lhs, Rhs, OuterProduct>
       EIGEN_STATIC_ASSERT((internal::is_same<typename Lhs::RealScalar, typename Rhs::RealScalar>::value),
         YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
     }
-
-    template<typename Dest> void scaleAndAddTo(Dest& dest, Scalar alpha) const
-    {
-      internal::outer_product_selector<(int(Dest::Flags)&RowMajorBit) ? RowMajor : ColMajor>::run(*this, dest, alpha);
+    
+    struct set  { template<typename Dst, typename Src> void operator()(const Dst& dst, const Src& src) const { dst.const_cast_derived()  = src; } };
+    struct add  { template<typename Dst, typename Src> void operator()(const Dst& dst, const Src& src) const { dst.const_cast_derived() += src; } };
+    struct sub  { template<typename Dst, typename Src> void operator()(const Dst& dst, const Src& src) const { dst.const_cast_derived() -= src; } };
+    struct adds {
+      Scalar m_scale;
+      adds(const Scalar& s) : m_scale(s) {}
+      template<typename Dst, typename Src> void operator()(const Dst& dst, const Src& src) const {
+        dst.const_cast_derived() += m_scale * src;
+      }
+    };
+    
+    template<typename Dest>
+    inline void evalTo(Dest& dest) const {
+      internal::outer_product_selector_run(*this, dest, set(), IsRowMajor<Dest>());
+    }
+    
+    template<typename Dest>
+    inline void addTo(Dest& dest) const {
+      internal::outer_product_selector_run(*this, dest, add(), IsRowMajor<Dest>());
     }
-};
-
-namespace internal {
 
-template<> struct outer_product_selector<ColMajor> {
-  template<typename ProductType, typename Dest>
-  static EIGEN_DONT_INLINE void run(const ProductType& prod, Dest& dest, typename ProductType::Scalar alpha) {
-    typedef typename Dest::Index Index;
-    // FIXME make sure lhs is sequentially stored
-    // FIXME not very good if rhs is real and lhs complex while alpha is real too
-    const Index cols = dest.cols();
-    for (Index j=0; j<cols; ++j)
-      dest.col(j) += (alpha * prod.rhs().coeff(j)) * prod.lhs();
-  }
-};
+    template<typename Dest>
+    inline void subTo(Dest& dest) const {
+      internal::outer_product_selector_run(*this, dest, sub(), IsRowMajor<Dest>());
+    }
 
-template<> struct outer_product_selector<RowMajor> {
-  template<typename ProductType, typename Dest>
-  static EIGEN_DONT_INLINE void run(const ProductType& prod, Dest& dest, typename ProductType::Scalar alpha) {
-    typedef typename Dest::Index Index;
-    // FIXME make sure rhs is sequentially stored
-    // FIXME not very good if lhs is real and rhs complex while alpha is real too
-    const Index rows = dest.rows();
-    for (Index i=0; i<rows; ++i)
-      dest.row(i) += (alpha * prod.lhs().coeff(i)) * prod.rhs();
-  }
+    template<typename Dest> void scaleAndAddTo(Dest& dest, const Scalar& alpha) const
+    {
+      internal::outer_product_selector_run(*this, dest, adds(alpha), IsRowMajor<Dest>());
+    }
 };
 
-} // end namespace internal
-
 /***********************************************************************
 *  Implementation of General Matrix Vector Product
 ***********************************************************************/
@@ -311,7 +333,7 @@ class GeneralProduct<Lhs, Rhs, GemvProduct>
     typedef typename Lhs::Scalar LhsScalar;
     typedef typename Rhs::Scalar RhsScalar;
 
-    GeneralProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs)
+    GeneralProduct(const Lhs& a_lhs, const Rhs& a_rhs) : Base(a_lhs,a_rhs)
     {
 //       EIGEN_STATIC_ASSERT((internal::is_same<typename Lhs::Scalar, typename Rhs::Scalar>::value),
 //         YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
@@ -320,7 +342,7 @@ class GeneralProduct<Lhs, Rhs, GemvProduct>
     enum { Side = Lhs::IsVectorAtCompileTime ? OnTheLeft : OnTheRight };
     typedef typename internal::conditional<int(Side)==OnTheRight,_LhsNested,_RhsNested>::type MatrixType;
 
-    template<typename Dest> void scaleAndAddTo(Dest& dst, Scalar alpha) const
+    template<typename Dest> void scaleAndAddTo(Dest& dst, const Scalar& alpha) const
     {
       eigen_assert(m_lhs.rows() == dst.rows() && m_rhs.cols() == dst.cols());
       internal::gemv_selector<Side,(int(MatrixType::Flags)&RowMajorBit) ? RowMajor : ColMajor,
@@ -335,7 +357,7 @@ template<int StorageOrder, bool BlasCompatible>
 struct gemv_selector<OnTheLeft,StorageOrder,BlasCompatible>
 {
   template<typename ProductType, typename Dest>
-  static void run(const ProductType& prod, Dest& dest, typename ProductType::Scalar alpha)
+  static void run(const ProductType& prod, Dest& dest, const typename ProductType::Scalar& alpha)
   {
     Transpose<Dest> destT(dest);
     enum { OtherStorageOrder = StorageOrder == RowMajor ? ColMajor : RowMajor };
@@ -384,7 +406,7 @@ struct gemv_static_vector_if<Scalar,Size,MaxSize,true>
 template<> struct gemv_selector<OnTheRight,ColMajor,true>
 {
   template<typename ProductType, typename Dest>
-  static inline void run(const ProductType& prod, Dest& dest, typename ProductType::Scalar alpha)
+  static inline void run(const ProductType& prod, Dest& dest, const typename ProductType::Scalar& alpha)
   {
     typedef typename ProductType::Index Index;
     typedef typename ProductType::LhsScalar   LhsScalar;
@@ -413,7 +435,7 @@ template<> struct gemv_selector<OnTheRight,ColMajor,true>
 
     gemv_static_vector_if<ResScalar,Dest::SizeAtCompileTime,Dest::MaxSizeAtCompileTime,MightCannotUseDest> static_dest;
 
-    bool alphaIsCompatible = (!ComplexByReal) || (imag(actualAlpha)==RealScalar(0));
+    bool alphaIsCompatible = (!ComplexByReal) || (numext::imag(actualAlpha)==RealScalar(0));
     bool evalToDest = EvalToDestAtCompileTime && alphaIsCompatible;
     
     RhsScalar compatibleAlpha = get_factor<ResScalar,RhsScalar>::run(actualAlpha);
@@ -457,7 +479,7 @@ template<> struct gemv_selector<OnTheRight,ColMajor,true>
 template<> struct gemv_selector<OnTheRight,RowMajor,true>
 {
   template<typename ProductType, typename Dest>
-  static void run(const ProductType& prod, Dest& dest, typename ProductType::Scalar alpha)
+  static void run(const ProductType& prod, Dest& dest, const typename ProductType::Scalar& alpha)
   {
     typedef typename ProductType::LhsScalar LhsScalar;
     typedef typename ProductType::RhsScalar RhsScalar;
@@ -508,7 +530,7 @@ template<> struct gemv_selector<OnTheRight,RowMajor,true>
 template<> struct gemv_selector<OnTheRight,ColMajor,false>
 {
   template<typename ProductType, typename Dest>
-  static void run(const ProductType& prod, Dest& dest, typename ProductType::Scalar alpha)
+  static void run(const ProductType& prod, Dest& dest, const typename ProductType::Scalar& alpha)
   {
     typedef typename Dest::Index Index;
     // TODO makes sure dest is sequentially stored in memory, otherwise use a temp
@@ -521,7 +543,7 @@ template<> struct gemv_selector<OnTheRight,ColMajor,false>
 template<> struct gemv_selector<OnTheRight,RowMajor,false>
 {
   template<typename ProductType, typename Dest>
-  static void run(const ProductType& prod, Dest& dest, typename ProductType::Scalar alpha)
+  static void run(const ProductType& prod, Dest& dest, const typename ProductType::Scalar& alpha)
   {
     typedef typename Dest::Index Index;
     // TODO makes sure rhs is sequentially stored in memory, otherwise use a temp
diff --git a/ThirdParty/eigen3/Eigen/src/Core/GenericPacketMath.h b/ThirdParty/eigen3/Eigen/src/Core/GenericPacketMath.h
index 858fb243ec8..5f783ebeee8 100644
--- a/ThirdParty/eigen3/Eigen/src/Core/GenericPacketMath.h
+++ b/ThirdParty/eigen3/Eigen/src/Core/GenericPacketMath.h
@@ -106,7 +106,7 @@ pnegate(const Packet& a) { return -a; }
 
 /** \internal \returns conj(a) (coeff-wise) */
 template<typename Packet> inline Packet
-pconj(const Packet& a) { return conj(a); }
+pconj(const Packet& a) { return numext::conj(a); }
 
 /** \internal \returns a * b (coeff-wise) */
 template<typename Packet> inline Packet
@@ -130,7 +130,7 @@ pmax(const Packet& a,
 
 /** \internal \returns the absolute value of \a a */
 template<typename Packet> inline Packet
-pabs(const Packet& a) { return abs(a); }
+pabs(const Packet& a) { using std::abs; return abs(a); }
 
 /** \internal \returns the bitwise and of \a a and \a b */
 template<typename Packet> inline Packet
@@ -156,7 +156,11 @@ pload(const typename unpacket_traits<Packet>::type* from) { return *from; }
 template<typename Packet> inline Packet
 ploadu(const typename unpacket_traits<Packet>::type* from) { return *from; }
 
-/** \internal \returns a packet with elements of \a *from duplicated, e.g.: (from[0],from[0],from[1],from[1]) */
+/** \internal \returns a packet with elements of \a *from duplicated.
+  * For instance, for a packet of 8 elements, 4 scalar will be read from \a *from and
+  * duplicated to form: {from[0],from[0],from[1],from[1],,from[2],from[2],,from[3],from[3]}
+  * Currently, this function is only used for scalar * complex products.
+ */
 template<typename Packet> inline Packet
 ploaddup(const typename unpacket_traits<Packet>::type* from) { return *from; }
 
@@ -215,7 +219,12 @@ template<typename Packet> inline Packet preverse(const Packet& a)
 
 /** \internal \returns \a a with real and imaginary part flipped (for complex type only) */
 template<typename Packet> inline Packet pcplxflip(const Packet& a)
-{ return Packet(imag(a),real(a)); }
+{
+  // FIXME: uncomment the following in case we drop the internal imag and real functions.
+//   using std::imag;
+//   using std::real;
+  return Packet(imag(a),real(a));
+}
 
 /**************************
 * Special math functions
@@ -223,35 +232,35 @@ template<typename Packet> inline Packet pcplxflip(const Packet& a)
 
 /** \internal \returns the sine of \a a (coeff-wise) */
 template<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
-Packet psin(const Packet& a) { return sin(a); }
+Packet psin(const Packet& a) { using std::sin; return sin(a); }
 
 /** \internal \returns the cosine of \a a (coeff-wise) */
 template<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
-Packet pcos(const Packet& a) { return cos(a); }
+Packet pcos(const Packet& a) { using std::cos; return cos(a); }
 
 /** \internal \returns the tan of \a a (coeff-wise) */
 template<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
-Packet ptan(const Packet& a) { return tan(a); }
+Packet ptan(const Packet& a) { using std::tan; return tan(a); }
 
 /** \internal \returns the arc sine of \a a (coeff-wise) */
 template<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
-Packet pasin(const Packet& a) { return asin(a); }
+Packet pasin(const Packet& a) { using std::asin; return asin(a); }
 
 /** \internal \returns the arc cosine of \a a (coeff-wise) */
 template<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
-Packet pacos(const Packet& a) { return acos(a); }
+Packet pacos(const Packet& a) { using std::acos; return acos(a); }
 
 /** \internal \returns the exp of \a a (coeff-wise) */
 template<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
-Packet pexp(const Packet& a) { return exp(a); }
+Packet pexp(const Packet& a) { using std::exp; return exp(a); }
 
 /** \internal \returns the log of \a a (coeff-wise) */
 template<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
-Packet plog(const Packet& a) { return log(a); }
+Packet plog(const Packet& a) { using std::log; return log(a); }
 
 /** \internal \returns the square-root of \a a (coeff-wise) */
 template<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
-Packet psqrt(const Packet& a) { return sqrt(a); }
+Packet psqrt(const Packet& a) { using std::sqrt; return sqrt(a); }
 
 /***************************************************************************
 * The following functions might not have to be overwritten for vectorized types
@@ -302,8 +311,21 @@ struct palign_impl
   static inline void run(PacketType&, const PacketType&) {}
 };
 
-/** \internal update \a first using the concatenation of the \a Offset last elements
-  * of \a first and packet_size minus \a Offset first elements of \a second */
+/** \internal update \a first using the concatenation of the packet_size minus \a Offset last elements
+  * of \a first and \a Offset first elements of \a second.
+  * 
+  * This function is currently only used to optimize matrix-vector products on unligned matrices.
+  * It takes 2 packets that represent a contiguous memory array, and returns a packet starting
+  * at the position \a Offset. For instance, for packets of 4 elements, we have:
+  *  Input:
+  *  - first = {f0,f1,f2,f3}
+  *  - second = {s0,s1,s2,s3}
+  * Output: 
+  *   - if Offset==0 then {f0,f1,f2,f3}
+  *   - if Offset==1 then {f1,f2,f3,s0}
+  *   - if Offset==2 then {f2,f3,s0,s1}
+  *   - if Offset==3 then {f3,s0,s1,s3}
+  */
 template<int Offset,typename PacketType>
 inline void palign(PacketType& first, const PacketType& second)
 {
diff --git a/ThirdParty/eigen3/Eigen/src/Core/GlobalFunctions.h b/ThirdParty/eigen3/Eigen/src/Core/GlobalFunctions.h
index e63726c4735..2acf9772334 100644
--- a/ThirdParty/eigen3/Eigen/src/Core/GlobalFunctions.h
+++ b/ThirdParty/eigen3/Eigen/src/Core/GlobalFunctions.h
@@ -1,7 +1,7 @@
 // This file is part of Eigen, a lightweight C++ template library
 // for linear algebra.
 //
-// Copyright (C) 2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2010-2012 Gael Guennebaud <gael.guennebaud@inria.fr>
 // Copyright (C) 2010 Benoit Jacob <jacob.benoit.1@gmail.com>
 //
 // This Source Code Form is subject to the terms of the Mozilla
@@ -11,7 +11,7 @@
 #ifndef EIGEN_GLOBAL_FUNCTIONS_H
 #define EIGEN_GLOBAL_FUNCTIONS_H
 
-#define EIGEN_ARRAY_DECLARE_GLOBAL_STD_UNARY(NAME,FUNCTOR) \
+#define EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(NAME,FUNCTOR) \
   template<typename Derived> \
   inline const Eigen::CwiseUnaryOp<Eigen::internal::FUNCTOR<typename Derived::Scalar>, const Derived> \
   NAME(const Eigen::ArrayBase<Derived>& x) { \
@@ -35,20 +35,21 @@
   };
 
 
-namespace std
+namespace Eigen
 {
-  EIGEN_ARRAY_DECLARE_GLOBAL_STD_UNARY(real,scalar_real_op)
-  EIGEN_ARRAY_DECLARE_GLOBAL_STD_UNARY(imag,scalar_imag_op)
-  EIGEN_ARRAY_DECLARE_GLOBAL_STD_UNARY(sin,scalar_sin_op)
-  EIGEN_ARRAY_DECLARE_GLOBAL_STD_UNARY(cos,scalar_cos_op)
-  EIGEN_ARRAY_DECLARE_GLOBAL_STD_UNARY(asin,scalar_asin_op)
-  EIGEN_ARRAY_DECLARE_GLOBAL_STD_UNARY(acos,scalar_acos_op)
-  EIGEN_ARRAY_DECLARE_GLOBAL_STD_UNARY(tan,scalar_tan_op)
-  EIGEN_ARRAY_DECLARE_GLOBAL_STD_UNARY(exp,scalar_exp_op)
-  EIGEN_ARRAY_DECLARE_GLOBAL_STD_UNARY(log,scalar_log_op)
-  EIGEN_ARRAY_DECLARE_GLOBAL_STD_UNARY(abs,scalar_abs_op)
-  EIGEN_ARRAY_DECLARE_GLOBAL_STD_UNARY(sqrt,scalar_sqrt_op)
-
+  EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(real,scalar_real_op)
+  EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(imag,scalar_imag_op)
+  EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(conj,scalar_conjugate_op)
+  EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(sin,scalar_sin_op)
+  EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(cos,scalar_cos_op)
+  EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(asin,scalar_asin_op)
+  EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(acos,scalar_acos_op)
+  EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(tan,scalar_tan_op)
+  EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(exp,scalar_exp_op)
+  EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(log,scalar_log_op)
+  EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(abs,scalar_abs_op)
+  EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(sqrt,scalar_sqrt_op)
+  
   template<typename Derived>
   inline const Eigen::CwiseUnaryOp<Eigen::internal::scalar_pow_op<typename Derived::Scalar>, const Derived>
   pow(const Eigen::ArrayBase<Derived>& x, const typename Derived::Scalar& exponent) {
@@ -64,16 +65,13 @@ namespace std
       exponents.derived()
     );
   }
-}
-
-namespace Eigen
-{
+  
   /**
   * \brief Component-wise division of a scalar by array elements.
   **/
   template <typename Derived>
   inline const Eigen::CwiseUnaryOp<Eigen::internal::scalar_inverse_mult_op<typename Derived::Scalar>, const Derived>
-    operator/(typename Derived::Scalar s, const Eigen::ArrayBase<Derived>& a)
+    operator/(const typename Derived::Scalar& s, const Eigen::ArrayBase<Derived>& a)
   {
     return Eigen::CwiseUnaryOp<Eigen::internal::scalar_inverse_mult_op<typename Derived::Scalar>, const Derived>(
       a.derived(),
@@ -85,19 +83,10 @@ namespace Eigen
   {
     EIGEN_ARRAY_DECLARE_GLOBAL_EIGEN_UNARY(real,scalar_real_op)
     EIGEN_ARRAY_DECLARE_GLOBAL_EIGEN_UNARY(imag,scalar_imag_op)
-    EIGEN_ARRAY_DECLARE_GLOBAL_EIGEN_UNARY(sin,scalar_sin_op)
-    EIGEN_ARRAY_DECLARE_GLOBAL_EIGEN_UNARY(cos,scalar_cos_op)
-    EIGEN_ARRAY_DECLARE_GLOBAL_EIGEN_UNARY(asin,scalar_asin_op)
-    EIGEN_ARRAY_DECLARE_GLOBAL_EIGEN_UNARY(acos,scalar_acos_op)
-    EIGEN_ARRAY_DECLARE_GLOBAL_EIGEN_UNARY(tan,scalar_tan_op)
-    EIGEN_ARRAY_DECLARE_GLOBAL_EIGEN_UNARY(exp,scalar_exp_op)
-    EIGEN_ARRAY_DECLARE_GLOBAL_EIGEN_UNARY(log,scalar_log_op)
-    EIGEN_ARRAY_DECLARE_GLOBAL_EIGEN_UNARY(abs,scalar_abs_op)
     EIGEN_ARRAY_DECLARE_GLOBAL_EIGEN_UNARY(abs2,scalar_abs2_op)
-    EIGEN_ARRAY_DECLARE_GLOBAL_EIGEN_UNARY(sqrt,scalar_sqrt_op)
   }
 }
 
-// TODO: cleanly disable those functions that are not supported on Array (internal::real_ref, internal::random, internal::isApprox...)
+// TODO: cleanly disable those functions that are not supported on Array (numext::real_ref, internal::random, internal::isApprox...)
 
 #endif // EIGEN_GLOBAL_FUNCTIONS_H
diff --git a/ThirdParty/eigen3/Eigen/src/Core/IO.h b/ThirdParty/eigen3/Eigen/src/Core/IO.h
index cc8e18a0076..c8d5f6379b9 100644
--- a/ThirdParty/eigen3/Eigen/src/Core/IO.h
+++ b/ThirdParty/eigen3/Eigen/src/Core/IO.h
@@ -55,9 +55,8 @@ struct IOFormat
     const std::string& _rowSeparator = "\n", const std::string& _rowPrefix="", const std::string& _rowSuffix="",
     const std::string& _matPrefix="", const std::string& _matSuffix="")
   : matPrefix(_matPrefix), matSuffix(_matSuffix), rowPrefix(_rowPrefix), rowSuffix(_rowSuffix), rowSeparator(_rowSeparator),
-    coeffSeparator(_coeffSeparator), precision(_precision), flags(_flags)
+    rowSpacer(""), coeffSeparator(_coeffSeparator), precision(_precision), flags(_flags)
   {
-    rowSpacer = "";
     int i = int(matSuffix.length())-1;
     while (i>=0 && matSuffix[i]!='\n')
     {
@@ -129,6 +128,7 @@ struct significant_decimals_default_impl
   static inline int run()
   {
     using std::ceil;
+    using std::log;
     return cast<RealScalar,int>(ceil(-log(NumTraits<RealScalar>::epsilon())/log(RealScalar(10))));
   }
 };
diff --git a/ThirdParty/eigen3/Eigen/src/Core/Map.h b/ThirdParty/eigen3/Eigen/src/Core/Map.h
index 15a19226e29..f804c89d63e 100644
--- a/ThirdParty/eigen3/Eigen/src/Core/Map.h
+++ b/ThirdParty/eigen3/Eigen/src/Core/Map.h
@@ -133,36 +133,36 @@ template<typename PlainObjectType, int MapOptions, typename StrideType> class Ma
 
     /** Constructor in the fixed-size case.
       *
-      * \param data pointer to the array to map
-      * \param stride optional Stride object, passing the strides.
+      * \param dataPtr pointer to the array to map
+      * \param a_stride optional Stride object, passing the strides.
       */
-    inline Map(PointerArgType data, const StrideType& stride = StrideType())
-      : Base(cast_to_pointer_type(data)), m_stride(stride)
+    inline Map(PointerArgType dataPtr, const StrideType& a_stride = StrideType())
+      : Base(cast_to_pointer_type(dataPtr)), m_stride(a_stride)
     {
       PlainObjectType::Base::_check_template_params();
     }
 
     /** Constructor in the dynamic-size vector case.
       *
-      * \param data pointer to the array to map
-      * \param size the size of the vector expression
-      * \param stride optional Stride object, passing the strides.
+      * \param dataPtr pointer to the array to map
+      * \param a_size the size of the vector expression
+      * \param a_stride optional Stride object, passing the strides.
       */
-    inline Map(PointerArgType data, Index size, const StrideType& stride = StrideType())
-      : Base(cast_to_pointer_type(data), size), m_stride(stride)
+    inline Map(PointerArgType dataPtr, Index a_size, const StrideType& a_stride = StrideType())
+      : Base(cast_to_pointer_type(dataPtr), a_size), m_stride(a_stride)
     {
       PlainObjectType::Base::_check_template_params();
     }
 
     /** Constructor in the dynamic-size matrix case.
       *
-      * \param data pointer to the array to map
-      * \param rows the number of rows of the matrix expression
-      * \param cols the number of columns of the matrix expression
-      * \param stride optional Stride object, passing the strides.
+      * \param dataPtr pointer to the array to map
+      * \param nbRows the number of rows of the matrix expression
+      * \param nbCols the number of columns of the matrix expression
+      * \param a_stride optional Stride object, passing the strides.
       */
-    inline Map(PointerArgType data, Index rows, Index cols, const StrideType& stride = StrideType())
-      : Base(cast_to_pointer_type(data), rows, cols), m_stride(stride)
+    inline Map(PointerArgType dataPtr, Index nbRows, Index nbCols, const StrideType& a_stride = StrideType())
+      : Base(cast_to_pointer_type(dataPtr), nbRows, nbCols), m_stride(a_stride)
     {
       PlainObjectType::Base::_check_template_params();
     }
diff --git a/ThirdParty/eigen3/Eigen/src/Core/MapBase.h b/ThirdParty/eigen3/Eigen/src/Core/MapBase.h
index a388d61ea92..6876de588c0 100644
--- a/ThirdParty/eigen3/Eigen/src/Core/MapBase.h
+++ b/ThirdParty/eigen3/Eigen/src/Core/MapBase.h
@@ -87,9 +87,9 @@ template<typename Derived> class MapBase<Derived, ReadOnlyAccessors>
       */
     inline const Scalar* data() const { return m_data; }
 
-    inline const Scalar& coeff(Index row, Index col) const
+    inline const Scalar& coeff(Index rowId, Index colId) const
     {
-      return m_data[col * colStride() + row * rowStride()];
+      return m_data[colId * colStride() + rowId * rowStride()];
     }
 
     inline const Scalar& coeff(Index index) const
@@ -98,9 +98,9 @@ template<typename Derived> class MapBase<Derived, ReadOnlyAccessors>
       return m_data[index * innerStride()];
     }
 
-    inline const Scalar& coeffRef(Index row, Index col) const
+    inline const Scalar& coeffRef(Index rowId, Index colId) const
     {
-      return this->m_data[col * colStride() + row * rowStride()];
+      return this->m_data[colId * colStride() + rowId * rowStride()];
     }
 
     inline const Scalar& coeffRef(Index index) const
@@ -110,10 +110,10 @@ template<typename Derived> class MapBase<Derived, ReadOnlyAccessors>
     }
 
     template<int LoadMode>
-    inline PacketScalar packet(Index row, Index col) const
+    inline PacketScalar packet(Index rowId, Index colId) const
     {
       return internal::ploadt<PacketScalar, LoadMode>
-               (m_data + (col * colStride() + row * rowStride()));
+               (m_data + (colId * colStride() + rowId * rowStride()));
     }
 
     template<int LoadMode>
@@ -123,29 +123,29 @@ template<typename Derived> class MapBase<Derived, ReadOnlyAccessors>
       return internal::ploadt<PacketScalar, LoadMode>(m_data + index * innerStride());
     }
 
-    inline MapBase(PointerType data) : m_data(data), m_rows(RowsAtCompileTime), m_cols(ColsAtCompileTime)
+    inline MapBase(PointerType dataPtr) : m_data(dataPtr), m_rows(RowsAtCompileTime), m_cols(ColsAtCompileTime)
     {
       EIGEN_STATIC_ASSERT_FIXED_SIZE(Derived)
       checkSanity();
     }
 
-    inline MapBase(PointerType data, Index size)
-            : m_data(data),
-              m_rows(RowsAtCompileTime == Dynamic ? size : Index(RowsAtCompileTime)),
-              m_cols(ColsAtCompileTime == Dynamic ? size : Index(ColsAtCompileTime))
+    inline MapBase(PointerType dataPtr, Index vecSize)
+            : m_data(dataPtr),
+              m_rows(RowsAtCompileTime == Dynamic ? vecSize : Index(RowsAtCompileTime)),
+              m_cols(ColsAtCompileTime == Dynamic ? vecSize : Index(ColsAtCompileTime))
     {
       EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
-      eigen_assert(size >= 0);
-      eigen_assert(data == 0 || SizeAtCompileTime == Dynamic || SizeAtCompileTime == size);
+      eigen_assert(vecSize >= 0);
+      eigen_assert(dataPtr == 0 || SizeAtCompileTime == Dynamic || SizeAtCompileTime == vecSize);
       checkSanity();
     }
 
-    inline MapBase(PointerType data, Index rows, Index cols)
-            : m_data(data), m_rows(rows), m_cols(cols)
+    inline MapBase(PointerType dataPtr, Index nbRows, Index nbCols)
+            : m_data(dataPtr), m_rows(nbRows), m_cols(nbCols)
     {
-      eigen_assert( (data == 0)
-              || (   rows >= 0 && (RowsAtCompileTime == Dynamic || RowsAtCompileTime == rows)
-                  && cols >= 0 && (ColsAtCompileTime == Dynamic || ColsAtCompileTime == cols)));
+      eigen_assert( (dataPtr == 0)
+              || (   nbRows >= 0 && (RowsAtCompileTime == Dynamic || RowsAtCompileTime == nbRows)
+                  && nbCols >= 0 && (ColsAtCompileTime == Dynamic || ColsAtCompileTime == nbCols)));
       checkSanity();
     }
 
@@ -210,23 +210,23 @@ template<typename Derived> class MapBase<Derived, WriteAccessors>
     }
 
     template<int StoreMode>
-    inline void writePacket(Index row, Index col, const PacketScalar& x)
+    inline void writePacket(Index row, Index col, const PacketScalar& val)
     {
       internal::pstoret<Scalar, PacketScalar, StoreMode>
-               (this->m_data + (col * colStride() + row * rowStride()), x);
+               (this->m_data + (col * colStride() + row * rowStride()), val);
     }
 
     template<int StoreMode>
-    inline void writePacket(Index index, const PacketScalar& x)
+    inline void writePacket(Index index, const PacketScalar& val)
     {
       EIGEN_STATIC_ASSERT_INDEX_BASED_ACCESS(Derived)
       internal::pstoret<Scalar, PacketScalar, StoreMode>
-                (this->m_data + index * innerStride(), x);
+                (this->m_data + index * innerStride(), val);
     }
 
-    explicit inline MapBase(PointerType data) : Base(data) {}
-    inline MapBase(PointerType data, Index size) : Base(data, size) {}
-    inline MapBase(PointerType data, Index rows, Index cols) : Base(data, rows, cols) {}
+    explicit inline MapBase(PointerType dataPtr) : Base(dataPtr) {}
+    inline MapBase(PointerType dataPtr, Index vecSize) : Base(dataPtr, vecSize) {}
+    inline MapBase(PointerType dataPtr, Index nbRows, Index nbCols) : Base(dataPtr, nbRows, nbCols) {}
 
     Derived& operator=(const MapBase& other)
     {
diff --git a/ThirdParty/eigen3/Eigen/src/Core/MathFunctions.h b/ThirdParty/eigen3/Eigen/src/Core/MathFunctions.h
index 05e913f2fec..2bfc5ebd98a 100644
--- a/ThirdParty/eigen3/Eigen/src/Core/MathFunctions.h
+++ b/ThirdParty/eigen3/Eigen/src/Core/MathFunctions.h
@@ -51,16 +51,15 @@ struct global_math_functions_filtering_base
   typedef typename T::Eigen_BaseClassForSpecializationOfGlobalMathFuncImpl type;
 };
 
-#define EIGEN_MATHFUNC_IMPL(func, scalar) func##_impl<typename global_math_functions_filtering_base<scalar>::type>
-#define EIGEN_MATHFUNC_RETVAL(func, scalar) typename func##_retval<typename global_math_functions_filtering_base<scalar>::type>::type
-
+#define EIGEN_MATHFUNC_IMPL(func, scalar) Eigen::internal::func##_impl<typename Eigen::internal::global_math_functions_filtering_base<scalar>::type>
+#define EIGEN_MATHFUNC_RETVAL(func, scalar) typename Eigen::internal::func##_retval<typename Eigen::internal::global_math_functions_filtering_base<scalar>::type>::type
 
 /****************************************************************************
 * Implementation of real                                                 *
 ****************************************************************************/
 
-template<typename Scalar>
-struct real_impl
+template<typename Scalar, bool IsComplex = NumTraits<Scalar>::IsComplex>
+struct real_default_impl
 {
   typedef typename NumTraits<Scalar>::Real RealScalar;
   static inline RealScalar run(const Scalar& x)
@@ -69,34 +68,32 @@ struct real_impl
   }
 };
 
-template<typename RealScalar>
-struct real_impl<std::complex<RealScalar> >
+template<typename Scalar>
+struct real_default_impl<Scalar,true>
 {
-  static inline RealScalar run(const std::complex<RealScalar>& x)
+  typedef typename NumTraits<Scalar>::Real RealScalar;
+  static inline RealScalar run(const Scalar& x)
   {
     using std::real;
     return real(x);
   }
 };
 
+template<typename Scalar> struct real_impl : real_default_impl<Scalar> {};
+
 template<typename Scalar>
 struct real_retval
 {
   typedef typename NumTraits<Scalar>::Real type;
 };
 
-template<typename Scalar>
-inline EIGEN_MATHFUNC_RETVAL(real, Scalar) real(const Scalar& x)
-{
-  return EIGEN_MATHFUNC_IMPL(real, Scalar)::run(x);
-}
 
 /****************************************************************************
 * Implementation of imag                                                 *
 ****************************************************************************/
 
-template<typename Scalar>
-struct imag_impl
+template<typename Scalar, bool IsComplex = NumTraits<Scalar>::IsComplex>
+struct imag_default_impl
 {
   typedef typename NumTraits<Scalar>::Real RealScalar;
   static inline RealScalar run(const Scalar&)
@@ -105,28 +102,25 @@ struct imag_impl
   }
 };
 
-template<typename RealScalar>
-struct imag_impl<std::complex<RealScalar> >
+template<typename Scalar>
+struct imag_default_impl<Scalar,true>
 {
-  static inline RealScalar run(const std::complex<RealScalar>& x)
+  typedef typename NumTraits<Scalar>::Real RealScalar;
+  static inline RealScalar run(const Scalar& x)
   {
     using std::imag;
     return imag(x);
   }
 };
 
+template<typename Scalar> struct imag_impl : imag_default_impl<Scalar> {};
+
 template<typename Scalar>
 struct imag_retval
 {
   typedef typename NumTraits<Scalar>::Real type;
 };
 
-template<typename Scalar>
-inline EIGEN_MATHFUNC_RETVAL(imag, Scalar) imag(const Scalar& x)
-{
-  return EIGEN_MATHFUNC_IMPL(imag, Scalar)::run(x);
-}
-
 /****************************************************************************
 * Implementation of real_ref                                             *
 ****************************************************************************/
@@ -151,18 +145,6 @@ struct real_ref_retval
   typedef typename NumTraits<Scalar>::Real & type;
 };
 
-template<typename Scalar>
-inline typename add_const_on_value_type< EIGEN_MATHFUNC_RETVAL(real_ref, Scalar) >::type real_ref(const Scalar& x)
-{
-  return real_ref_impl<Scalar>::run(x);
-}
-
-template<typename Scalar>
-inline EIGEN_MATHFUNC_RETVAL(real_ref, Scalar) real_ref(Scalar& x)
-{
-  return EIGEN_MATHFUNC_IMPL(real_ref, Scalar)::run(x);
-}
-
 /****************************************************************************
 * Implementation of imag_ref                                             *
 ****************************************************************************/
@@ -203,23 +185,11 @@ struct imag_ref_retval
   typedef typename NumTraits<Scalar>::Real & type;
 };
 
-template<typename Scalar>
-inline typename add_const_on_value_type< EIGEN_MATHFUNC_RETVAL(imag_ref, Scalar) >::type imag_ref(const Scalar& x)
-{
-  return imag_ref_impl<Scalar>::run(x);
-}
-
-template<typename Scalar>
-inline EIGEN_MATHFUNC_RETVAL(imag_ref, Scalar) imag_ref(Scalar& x)
-{
-  return EIGEN_MATHFUNC_IMPL(imag_ref, Scalar)::run(x);
-}
-
 /****************************************************************************
 * Implementation of conj                                                 *
 ****************************************************************************/
 
-template<typename Scalar>
+template<typename Scalar, bool IsComplex = NumTraits<Scalar>::IsComplex>
 struct conj_impl
 {
   static inline Scalar run(const Scalar& x)
@@ -228,10 +198,10 @@ struct conj_impl
   }
 };
 
-template<typename RealScalar>
-struct conj_impl<std::complex<RealScalar> >
+template<typename Scalar>
+struct conj_impl<Scalar,true>
 {
-  static inline std::complex<RealScalar> run(const std::complex<RealScalar>& x)
+  static inline Scalar run(const Scalar& x)
   {
     using std::conj;
     return conj(x);
@@ -244,39 +214,6 @@ struct conj_retval
   typedef Scalar type;
 };
 
-template<typename Scalar>
-inline EIGEN_MATHFUNC_RETVAL(conj, Scalar) conj(const Scalar& x)
-{
-  return EIGEN_MATHFUNC_IMPL(conj, Scalar)::run(x);
-}
-
-/****************************************************************************
-* Implementation of abs                                                  *
-****************************************************************************/
-
-template<typename Scalar>
-struct abs_impl
-{
-  typedef typename NumTraits<Scalar>::Real RealScalar;
-  static inline RealScalar run(const Scalar& x)
-  {
-    using std::abs;
-    return abs(x);
-  }
-};
-
-template<typename Scalar>
-struct abs_retval
-{
-  typedef typename NumTraits<Scalar>::Real type;
-};
-
-template<typename Scalar>
-inline EIGEN_MATHFUNC_RETVAL(abs, Scalar) abs(const Scalar& x)
-{
-  return EIGEN_MATHFUNC_IMPL(abs, Scalar)::run(x);
-}
-
 /****************************************************************************
 * Implementation of abs2                                                 *
 ****************************************************************************/
@@ -306,12 +243,6 @@ struct abs2_retval
   typedef typename NumTraits<Scalar>::Real type;
 };
 
-template<typename Scalar>
-inline EIGEN_MATHFUNC_RETVAL(abs2, Scalar) abs2(const Scalar& x)
-{
-  return EIGEN_MATHFUNC_IMPL(abs2, Scalar)::run(x);
-}
-
 /****************************************************************************
 * Implementation of norm1                                                *
 ****************************************************************************/
@@ -322,6 +253,7 @@ struct norm1_default_impl
   typedef typename NumTraits<Scalar>::Real RealScalar;
   static inline RealScalar run(const Scalar& x)
   {
+    using std::abs;
     return abs(real(x)) + abs(imag(x));
   }
 };
@@ -331,6 +263,7 @@ struct norm1_default_impl<Scalar, false>
 {
   static inline Scalar run(const Scalar& x)
   {
+    using std::abs;
     return abs(x);
   }
 };
@@ -344,12 +277,6 @@ struct norm1_retval
   typedef typename NumTraits<Scalar>::Real type;
 };
 
-template<typename Scalar>
-inline EIGEN_MATHFUNC_RETVAL(norm1, Scalar) norm1(const Scalar& x)
-{
-  return EIGEN_MATHFUNC_IMPL(norm1, Scalar)::run(x);
-}
-
 /****************************************************************************
 * Implementation of hypot                                                *
 ****************************************************************************/
@@ -362,9 +289,12 @@ struct hypot_impl
   {
     using std::max;
     using std::min;
+    using std::abs;
+    using std::sqrt;
     RealScalar _x = abs(x);
     RealScalar _y = abs(y);
     RealScalar p = (max)(_x, _y);
+    if(p==RealScalar(0)) return 0;
     RealScalar q = (min)(_x, _y);
     RealScalar qp = q/p;
     return p * sqrt(RealScalar(1) + qp*qp);
@@ -377,12 +307,6 @@ struct hypot_retval
   typedef typename NumTraits<Scalar>::Real type;
 };
 
-template<typename Scalar>
-inline EIGEN_MATHFUNC_RETVAL(hypot, Scalar) hypot(const Scalar& x, const Scalar& y)
-{
-  return EIGEN_MATHFUNC_IMPL(hypot, Scalar)::run(x, y);
-}
-
 /****************************************************************************
 * Implementation of cast                                                 *
 ****************************************************************************/
@@ -405,97 +329,29 @@ inline NewType cast(const OldType& x)
 }
 
 /****************************************************************************
-* Implementation of sqrt                                                 *
+* Implementation of atanh2                                                *
 ****************************************************************************/
 
 template<typename Scalar, bool IsInteger>
-struct sqrt_default_impl
-{
-  static inline Scalar run(const Scalar& x)
-  {
-    using std::sqrt;
-    return sqrt(x);
-  }
-};
-
-template<typename Scalar>
-struct sqrt_default_impl<Scalar, true>
-{
-  static inline Scalar run(const Scalar&)
-  {
-#ifdef EIGEN2_SUPPORT
-    eigen_assert(!NumTraits<Scalar>::IsInteger);
-#else
-    EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar)
-#endif
-    return Scalar(0);
-  }
-};
-
-template<typename Scalar>
-struct sqrt_impl : sqrt_default_impl<Scalar, NumTraits<Scalar>::IsInteger> {};
-
-template<typename Scalar>
-struct sqrt_retval
-{
-  typedef Scalar type;
-};
-
-template<typename Scalar>
-inline EIGEN_MATHFUNC_RETVAL(sqrt, Scalar) sqrt(const Scalar& x)
-{
-  return EIGEN_MATHFUNC_IMPL(sqrt, Scalar)::run(x);
-}
-
-/****************************************************************************
-* Implementation of standard unary real functions (exp, log, sin, cos, ...  *
-****************************************************************************/
-
-// This macro instanciate all the necessary template mechanism which is common to all unary real functions.
-#define EIGEN_MATHFUNC_STANDARD_REAL_UNARY(NAME) \
-  template<typename Scalar, bool IsInteger> struct NAME##_default_impl {            \
-    static inline Scalar run(const Scalar& x) { using std::NAME; return NAME(x); }  \
-  };                                                                                \
-  template<typename Scalar> struct NAME##_default_impl<Scalar, true> {              \
-    static inline Scalar run(const Scalar&) {                                       \
-      EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar)                                       \
-      return Scalar(0);                                                             \
-    }                                                                               \
-  };                                                                                \
-  template<typename Scalar> struct NAME##_impl                                      \
-    : NAME##_default_impl<Scalar, NumTraits<Scalar>::IsInteger>                     \
-  {};                                                                               \
-  template<typename Scalar> struct NAME##_retval { typedef Scalar type; };          \
-  template<typename Scalar>                                                         \
-  inline EIGEN_MATHFUNC_RETVAL(NAME, Scalar) NAME(const Scalar& x) {                \
-    return EIGEN_MATHFUNC_IMPL(NAME, Scalar)::run(x);                               \
-  }
-
-EIGEN_MATHFUNC_STANDARD_REAL_UNARY(exp)
-EIGEN_MATHFUNC_STANDARD_REAL_UNARY(log)
-EIGEN_MATHFUNC_STANDARD_REAL_UNARY(sin)
-EIGEN_MATHFUNC_STANDARD_REAL_UNARY(cos)
-EIGEN_MATHFUNC_STANDARD_REAL_UNARY(tan)
-EIGEN_MATHFUNC_STANDARD_REAL_UNARY(asin)
-EIGEN_MATHFUNC_STANDARD_REAL_UNARY(acos)
-
-/****************************************************************************
-* Implementation of atan2                                                *
-****************************************************************************/
-
-template<typename Scalar, bool IsInteger>
-struct atan2_default_impl
+struct atanh2_default_impl
 {
   typedef Scalar retval;
+  typedef typename NumTraits<Scalar>::Real RealScalar;
   static inline Scalar run(const Scalar& x, const Scalar& y)
   {
-    using std::atan2;
-    return atan2(x, y);
+    using std::abs;
+    using std::log;
+    using std::sqrt;
+    Scalar z = x / y;
+    if (y == Scalar(0) || abs(z) > sqrt(NumTraits<RealScalar>::epsilon()))
+      return RealScalar(0.5) * log((y + x) / (y - x));
+    else
+      return z + z*z*z / RealScalar(3);
   }
 };
 
 template<typename Scalar>
-struct atan2_default_impl<Scalar, true>
+struct atanh2_default_impl<Scalar, true>
 {
   static inline Scalar run(const Scalar&, const Scalar&)
   {
@@ -505,20 +361,14 @@ struct atan2_default_impl<Scalar, true>
 };
 
 template<typename Scalar>
-struct atan2_impl : atan2_default_impl<Scalar, NumTraits<Scalar>::IsInteger> {};
+struct atanh2_impl : atanh2_default_impl<Scalar, NumTraits<Scalar>::IsInteger> {};
 
 template<typename Scalar>
-struct atan2_retval
+struct atanh2_retval
 {
   typedef Scalar type;
 };
 
-template<typename Scalar>
-inline EIGEN_MATHFUNC_RETVAL(atan2, Scalar) atan2(const Scalar& x, const Scalar& y)
-{
-  return EIGEN_MATHFUNC_IMPL(atan2, Scalar)::run(x, y);
-}
-
 /****************************************************************************
 * Implementation of pow                                                  *
 ****************************************************************************/
@@ -562,12 +412,6 @@ struct pow_retval
   typedef Scalar type;
 };
 
-template<typename Scalar>
-inline EIGEN_MATHFUNC_RETVAL(pow, Scalar) pow(const Scalar& x, const Scalar& y)
-{
-  return EIGEN_MATHFUNC_IMPL(pow, Scalar)::run(x, y);
-}
-
 /****************************************************************************
 * Implementation of random                                               *
 ****************************************************************************/
@@ -666,11 +510,10 @@ struct random_default_impl<Scalar, false, true>
 #else
     enum { rand_bits = floor_log2<(unsigned int)(RAND_MAX)+1>::value,
            scalar_bits = sizeof(Scalar) * CHAR_BIT,
-           shift = EIGEN_PLAIN_ENUM_MAX(0, int(rand_bits) - int(scalar_bits))
+           shift = EIGEN_PLAIN_ENUM_MAX(0, int(rand_bits) - int(scalar_bits)),
+           offset = NumTraits<Scalar>::IsSigned ? (1 << (EIGEN_PLAIN_ENUM_MIN(rand_bits,scalar_bits)-1)) : 0
     };
-    Scalar x = Scalar(std::rand() >> shift);
-    Scalar offset = NumTraits<Scalar>::IsSigned ? Scalar(1 << (rand_bits-1)) : Scalar(0);
-    return x - offset;
+    return Scalar((std::rand() >> shift) - offset);
 #endif
   }
 };
@@ -702,6 +545,97 @@ inline EIGEN_MATHFUNC_RETVAL(random, Scalar) random()
   return EIGEN_MATHFUNC_IMPL(random, Scalar)::run();
 }
 
+} // end namespace internal
+
+/****************************************************************************
+* Generic math function                                                    *
+****************************************************************************/
+
+namespace numext {
+
+template<typename Scalar>
+inline EIGEN_MATHFUNC_RETVAL(real, Scalar) real(const Scalar& x)
+{
+  return EIGEN_MATHFUNC_IMPL(real, Scalar)::run(x);
+}  
+
+template<typename Scalar>
+inline typename internal::add_const_on_value_type< EIGEN_MATHFUNC_RETVAL(real_ref, Scalar) >::type real_ref(const Scalar& x)
+{
+  return internal::real_ref_impl<Scalar>::run(x);
+}
+
+template<typename Scalar>
+inline EIGEN_MATHFUNC_RETVAL(real_ref, Scalar) real_ref(Scalar& x)
+{
+  return EIGEN_MATHFUNC_IMPL(real_ref, Scalar)::run(x);
+}
+
+template<typename Scalar>
+inline EIGEN_MATHFUNC_RETVAL(imag, Scalar) imag(const Scalar& x)
+{
+  return EIGEN_MATHFUNC_IMPL(imag, Scalar)::run(x);
+}
+
+template<typename Scalar>
+inline typename internal::add_const_on_value_type< EIGEN_MATHFUNC_RETVAL(imag_ref, Scalar) >::type imag_ref(const Scalar& x)
+{
+  return internal::imag_ref_impl<Scalar>::run(x);
+}
+
+template<typename Scalar>
+inline EIGEN_MATHFUNC_RETVAL(imag_ref, Scalar) imag_ref(Scalar& x)
+{
+  return EIGEN_MATHFUNC_IMPL(imag_ref, Scalar)::run(x);
+}
+
+template<typename Scalar>
+inline EIGEN_MATHFUNC_RETVAL(conj, Scalar) conj(const Scalar& x)
+{
+  return EIGEN_MATHFUNC_IMPL(conj, Scalar)::run(x);
+}
+
+template<typename Scalar>
+inline EIGEN_MATHFUNC_RETVAL(abs2, Scalar) abs2(const Scalar& x)
+{
+  return EIGEN_MATHFUNC_IMPL(abs2, Scalar)::run(x);
+}
+
+template<typename Scalar>
+inline EIGEN_MATHFUNC_RETVAL(norm1, Scalar) norm1(const Scalar& x)
+{
+  return EIGEN_MATHFUNC_IMPL(norm1, Scalar)::run(x);
+}
+
+template<typename Scalar>
+inline EIGEN_MATHFUNC_RETVAL(hypot, Scalar) hypot(const Scalar& x, const Scalar& y)
+{
+  return EIGEN_MATHFUNC_IMPL(hypot, Scalar)::run(x, y);
+}
+
+template<typename Scalar>
+inline EIGEN_MATHFUNC_RETVAL(atanh2, Scalar) atanh2(const Scalar& x, const Scalar& y)
+{
+  return EIGEN_MATHFUNC_IMPL(atanh2, Scalar)::run(x, y);
+}
+
+template<typename Scalar>
+inline EIGEN_MATHFUNC_RETVAL(pow, Scalar) pow(const Scalar& x, const Scalar& y)
+{
+  return EIGEN_MATHFUNC_IMPL(pow, Scalar)::run(x, y);
+}
+
+// std::isfinite is non standard, so let's define our own version,
+// even though it is not very efficient.
+template<typename T> bool (isfinite)(const T& x)
+{
+  return x<NumTraits<T>::highest() && x>NumTraits<T>::lowest();
+}
+
+} // end namespace numext
+
+namespace internal {
+
 /****************************************************************************
 * Implementation of fuzzy comparisons                                       *
 ****************************************************************************/
@@ -718,11 +652,13 @@ struct scalar_fuzzy_default_impl<Scalar, false, false>
   template<typename OtherScalar>
   static inline bool isMuchSmallerThan(const Scalar& x, const OtherScalar& y, const RealScalar& prec)
   {
+    using std::abs;
     return abs(x) <= abs(y) * prec;
   }
   static inline bool isApprox(const Scalar& x, const Scalar& y, const RealScalar& prec)
   {
     using std::min;
+    using std::abs;
     return abs(x - y) <= (min)(abs(x), abs(y)) * prec;
   }
   static inline bool isApproxOrLessThan(const Scalar& x, const Scalar& y, const RealScalar& prec)
@@ -757,12 +693,12 @@ struct scalar_fuzzy_default_impl<Scalar, true, false>
   template<typename OtherScalar>
   static inline bool isMuchSmallerThan(const Scalar& x, const OtherScalar& y, const RealScalar& prec)
   {
-    return abs2(x) <= abs2(y) * prec * prec;
+    return numext::abs2(x) <= numext::abs2(y) * prec * prec;
   }
   static inline bool isApprox(const Scalar& x, const Scalar& y, const RealScalar& prec)
   {
     using std::min;
-    return abs2(x - y) <= (min)(abs2(x), abs2(y)) * prec * prec;
+    return numext::abs2(x - y) <= (min)(numext::abs2(x), numext::abs2(y)) * prec * prec;
   }
 };
 
@@ -824,17 +760,7 @@ template<> struct scalar_fuzzy_impl<bool>
   
 };
 
-/****************************************************************************
-* Special functions                                                          *
-****************************************************************************/
-
-// std::isfinite is non standard, so let's define our own version,
-// even though it is not very efficient.
-template<typename T> bool (isfinite)(const T& x)
-{
-  return x<NumTraits<T>::highest() && x>NumTraits<T>::lowest();
-}
-
+  
 } // end namespace internal
 
 } // end namespace Eigen
diff --git a/ThirdParty/eigen3/Eigen/src/Core/Matrix.h b/ThirdParty/eigen3/Eigen/src/Core/Matrix.h
index 99160b591b0..0ba5d90cc60 100644
--- a/ThirdParty/eigen3/Eigen/src/Core/Matrix.h
+++ b/ThirdParty/eigen3/Eigen/src/Core/Matrix.h
@@ -200,16 +200,16 @@ class Matrix
       *
       * \sa resize(Index,Index)
       */
-    EIGEN_STRONG_INLINE explicit Matrix() : Base()
+    EIGEN_STRONG_INLINE Matrix() : Base()
     {
       Base::_check_template_params();
-      EIGEN_INITIALIZE_BY_ZERO_IF_THAT_OPTION_IS_ENABLED
+      EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED
     }
 
     // FIXME is it still needed
     Matrix(internal::constructor_without_unaligned_array_assert)
       : Base(internal::constructor_without_unaligned_array_assert())
-    { Base::_check_template_params(); EIGEN_INITIALIZE_BY_ZERO_IF_THAT_OPTION_IS_ENABLED }
+    { Base::_check_template_params(); EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED }
 
     /** \brief Constructs a vector or row-vector with given dimension. \only_for_vectors
       *
@@ -224,7 +224,7 @@ class Matrix
       EIGEN_STATIC_ASSERT_VECTOR_ONLY(Matrix)
       eigen_assert(dim >= 0);
       eigen_assert(SizeAtCompileTime == Dynamic || SizeAtCompileTime == dim);
-      EIGEN_INITIALIZE_BY_ZERO_IF_THAT_OPTION_IS_ENABLED
+      EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED
     }
 
     #ifndef EIGEN_PARSED_BY_DOXYGEN
diff --git a/ThirdParty/eigen3/Eigen/src/Core/MatrixBase.h b/ThirdParty/eigen3/Eigen/src/Core/MatrixBase.h
index 36ea2cee81a..9193b6abb3b 100644
--- a/ThirdParty/eigen3/Eigen/src/Core/MatrixBase.h
+++ b/ThirdParty/eigen3/Eigen/src/Core/MatrixBase.h
@@ -162,6 +162,9 @@ template<typename Derived> class MatrixBase
 #ifndef EIGEN_PARSED_BY_DOXYGEN
     template<typename ProductDerived, typename Lhs, typename Rhs>
     Derived& lazyAssign(const ProductBase<ProductDerived, Lhs,Rhs>& other);
+
+    template<typename MatrixPower, typename Lhs, typename Rhs>
+    Derived& lazyAssign(const MatrixPowerProduct<MatrixPower, Lhs,Rhs>& other);
 #endif // not EIGEN_PARSED_BY_DOXYGEN
 
     template<typename OtherDerived>
@@ -212,8 +215,8 @@ template<typename Derived> class MatrixBase
 
     typedef Diagonal<Derived> DiagonalReturnType;
     DiagonalReturnType diagonal();
-    typedef const Diagonal<const Derived> ConstDiagonalReturnType;
-    const ConstDiagonalReturnType diagonal() const;
+	typedef typename internal::add_const<Diagonal<const Derived> >::type ConstDiagonalReturnType;
+    ConstDiagonalReturnType diagonal() const;
 
     template<int Index> struct DiagonalIndexReturnType { typedef Diagonal<Derived,Index> Type; };
     template<int Index> struct ConstDiagonalIndexReturnType { typedef const Diagonal<const Derived,Index> Type; };
@@ -224,11 +227,11 @@ template<typename Derived> class MatrixBase
     // Note: The "MatrixBase::" prefixes are added to help MSVC9 to match these declarations with the later implementations.
     // On the other hand they confuse MSVC8...
     #if (defined _MSC_VER) && (_MSC_VER >= 1500) // 2008 or later
-    typename MatrixBase::template DiagonalIndexReturnType<Dynamic>::Type diagonal(Index index);
-    typename MatrixBase::template ConstDiagonalIndexReturnType<Dynamic>::Type diagonal(Index index) const;
+    typename MatrixBase::template DiagonalIndexReturnType<DynamicIndex>::Type diagonal(Index index);
+    typename MatrixBase::template ConstDiagonalIndexReturnType<DynamicIndex>::Type diagonal(Index index) const;
     #else
-    typename DiagonalIndexReturnType<Dynamic>::Type diagonal(Index index);
-    typename ConstDiagonalIndexReturnType<Dynamic>::Type diagonal(Index index) const;
+    typename DiagonalIndexReturnType<DynamicIndex>::Type diagonal(Index index);
+    typename ConstDiagonalIndexReturnType<DynamicIndex>::Type diagonal(Index index) const;
     #endif
 
     #ifdef EIGEN2_SUPPORT
@@ -255,7 +258,7 @@ template<typename Derived> class MatrixBase
     template<unsigned int UpLo> typename ConstSelfAdjointViewReturnType<UpLo>::Type selfadjointView() const;
 
     const SparseView<Derived> sparseView(const Scalar& m_reference = Scalar(0),
-                                         typename NumTraits<Scalar>::Real m_epsilon = NumTraits<Scalar>::dummy_precision()) const;
+                                         const typename NumTraits<Scalar>::Real& m_epsilon = NumTraits<Scalar>::dummy_precision()) const;
     static const IdentityReturnType Identity();
     static const IdentityReturnType Identity(Index rows, Index cols);
     static const BasisReturnType Unit(Index size, Index i);
@@ -271,16 +274,16 @@ template<typename Derived> class MatrixBase
     Derived& setIdentity();
     Derived& setIdentity(Index rows, Index cols);
 
-    bool isIdentity(RealScalar prec = NumTraits<Scalar>::dummy_precision()) const;
-    bool isDiagonal(RealScalar prec = NumTraits<Scalar>::dummy_precision()) const;
+    bool isIdentity(const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const;
+    bool isDiagonal(const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const;
 
-    bool isUpperTriangular(RealScalar prec = NumTraits<Scalar>::dummy_precision()) const;
-    bool isLowerTriangular(RealScalar prec = NumTraits<Scalar>::dummy_precision()) const;
+    bool isUpperTriangular(const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const;
+    bool isLowerTriangular(const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const;
 
     template<typename OtherDerived>
     bool isOrthogonal(const MatrixBase<OtherDerived>& other,
-                      RealScalar prec = NumTraits<Scalar>::dummy_precision()) const;
-    bool isUnitary(RealScalar prec = NumTraits<Scalar>::dummy_precision()) const;
+                      const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const;
+    bool isUnitary(const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const;
 
     /** \returns true if each coefficients of \c *this and \a other are all exactly equal.
       * \warning When using floating point scalar values you probably should rather use a
@@ -314,7 +317,7 @@ template<typename Derived> class MatrixBase
     MatrixBase<Derived>& matrix() { return *this; }
     const MatrixBase<Derived>& matrix() const { return *this; }
 
-    /** \returns an \link ArrayBase Array \endlink expression of this matrix
+    /** \returns an \link Eigen::ArrayBase Array \endlink expression of this matrix
       * \sa ArrayBase::matrix() */
     ArrayWrapper<Derived> array() { return derived(); }
     const ArrayWrapper<const Derived> array() const { return derived(); }
@@ -454,6 +457,7 @@ template<typename Derived> class MatrixBase
     const MatrixFunctionReturnValue<Derived> sin() const;
     const MatrixSquareRootReturnValue<Derived> sqrt() const;
     const MatrixLogarithmReturnValue<Derived> log() const;
+    const MatrixPowerReturnValue<Derived> pow(const RealScalar& p) const;
 
 #ifdef EIGEN2_SUPPORT
     template<typename ProductDerived, typename Lhs, typename Rhs>
diff --git a/ThirdParty/eigen3/Eigen/src/Core/NoAlias.h b/ThirdParty/eigen3/Eigen/src/Core/NoAlias.h
index ecb3fa2850e..768bfb18ca4 100644
--- a/ThirdParty/eigen3/Eigen/src/Core/NoAlias.h
+++ b/ThirdParty/eigen3/Eigen/src/Core/NoAlias.h
@@ -80,8 +80,17 @@ class NoAlias
     template<typename Lhs, typename Rhs, int NestingFlags>
     EIGEN_STRONG_INLINE ExpressionType& operator-=(const CoeffBasedProduct<Lhs,Rhs,NestingFlags>& other)
     { return m_expression.derived() -= CoeffBasedProduct<Lhs,Rhs,NestByRefBit>(other.lhs(), other.rhs()); }
+    
+    template<typename OtherDerived>
+    ExpressionType& operator=(const ReturnByValue<OtherDerived>& func)
+    { return m_expression = func; }
 #endif
 
+    ExpressionType& expression() const
+    {
+      return m_expression;
+    }
+
   protected:
     ExpressionType& m_expression;
 };
diff --git a/ThirdParty/eigen3/Eigen/src/Core/NumTraits.h b/ThirdParty/eigen3/Eigen/src/Core/NumTraits.h
index c94ef026b42..bac9e50b857 100644
--- a/ThirdParty/eigen3/Eigen/src/Core/NumTraits.h
+++ b/ThirdParty/eigen3/Eigen/src/Core/NumTraits.h
@@ -140,6 +140,9 @@ struct NumTraits<Array<Scalar, Rows, Cols, Options, MaxRows, MaxCols> >
     AddCost  = ArrayType::SizeAtCompileTime==Dynamic ? Dynamic : ArrayType::SizeAtCompileTime * NumTraits<Scalar>::AddCost,
     MulCost  = ArrayType::SizeAtCompileTime==Dynamic ? Dynamic : ArrayType::SizeAtCompileTime * NumTraits<Scalar>::MulCost
   };
+  
+  static inline RealScalar epsilon() { return NumTraits<RealScalar>::epsilon(); }
+  static inline RealScalar dummy_precision() { return NumTraits<RealScalar>::dummy_precision(); }
 };
 
 } // end namespace Eigen
diff --git a/ThirdParty/eigen3/Eigen/src/Core/PermutationMatrix.h b/ThirdParty/eigen3/Eigen/src/Core/PermutationMatrix.h
index 60a05c8618d..4fc5dd3189a 100644
--- a/ThirdParty/eigen3/Eigen/src/Core/PermutationMatrix.h
+++ b/ThirdParty/eigen3/Eigen/src/Core/PermutationMatrix.h
@@ -139,9 +139,9 @@ class PermutationBase : public EigenBase<Derived>
 
     /** Resizes to given size.
       */
-    inline void resize(Index size)
+    inline void resize(Index newSize)
     {
-      indices().resize(size);
+      indices().resize(newSize);
     }
 
     /** Sets *this to be the identity permutation matrix */
@@ -153,9 +153,9 @@ class PermutationBase : public EigenBase<Derived>
 
     /** Sets *this to be the identity permutation matrix of given size.
       */
-    void setIdentity(Index size)
+    void setIdentity(Index newSize)
     {
-      resize(size);
+      resize(newSize);
       setIdentity();
     }
 
@@ -317,7 +317,7 @@ class PermutationMatrix : public PermutationBase<PermutationMatrix<SizeAtCompile
       * array's size.
       */
     template<typename Other>
-    explicit inline PermutationMatrix(const MatrixBase<Other>& indices) : m_indices(indices)
+    explicit inline PermutationMatrix(const MatrixBase<Other>& a_indices) : m_indices(a_indices)
     {}
 
     /** Convert the Transpositions \a tr to a permutation matrix */
@@ -406,12 +406,12 @@ class Map<PermutationMatrix<SizeAtCompileTime, MaxSizeAtCompileTime, IndexType>,
     typedef typename IndicesType::Scalar Index;
     #endif
 
-    inline Map(const Index* indices)
-      : m_indices(indices)
+    inline Map(const Index* indicesPtr)
+      : m_indices(indicesPtr)
     {}
 
-    inline Map(const Index* indices, Index size)
-      : m_indices(indices,size)
+    inline Map(const Index* indicesPtr, Index size)
+      : m_indices(indicesPtr,size)
     {}
 
     /** Copies the other permutation into *this */
@@ -490,8 +490,8 @@ class PermutationWrapper : public PermutationBase<PermutationWrapper<_IndicesTyp
     typedef typename Traits::IndicesType IndicesType;
     #endif
 
-    inline PermutationWrapper(const IndicesType& indices)
-      : m_indices(indices)
+    inline PermutationWrapper(const IndicesType& a_indices)
+      : m_indices(a_indices)
     {}
 
     /** const version of indices(). */
@@ -541,24 +541,25 @@ struct permut_matrix_product_retval
  : public ReturnByValue<permut_matrix_product_retval<PermutationType, MatrixType, Side, Transposed> >
 {
     typedef typename remove_all<typename MatrixType::Nested>::type MatrixTypeNestedCleaned;
+    typedef typename MatrixType::Index Index;
 
     permut_matrix_product_retval(const PermutationType& perm, const MatrixType& matrix)
       : m_permutation(perm), m_matrix(matrix)
     {}
 
-    inline int rows() const { return m_matrix.rows(); }
-    inline int cols() const { return m_matrix.cols(); }
+    inline Index rows() const { return m_matrix.rows(); }
+    inline Index cols() const { return m_matrix.cols(); }
 
     template<typename Dest> inline void evalTo(Dest& dst) const
     {
-      const int n = Side==OnTheLeft ? rows() : cols();
+      const Index n = Side==OnTheLeft ? rows() : cols();
 
       if(is_same<MatrixTypeNestedCleaned,Dest>::value && extract_data(dst) == extract_data(m_matrix))
       {
         // apply the permutation inplace
         Matrix<bool,PermutationType::RowsAtCompileTime,1,0,PermutationType::MaxRowsAtCompileTime> mask(m_permutation.size());
         mask.fill(false);
-        int r = 0;
+        Index r = 0;
         while(r < m_permutation.size())
         {
           // search for the next seed
@@ -566,10 +567,10 @@ struct permut_matrix_product_retval
           if(r>=m_permutation.size())
             break;
           // we got one, let's follow it until we are back to the seed
-          int k0 = r++;
-          int kPrev = k0;
+          Index k0 = r++;
+          Index kPrev = k0;
           mask.coeffRef(k0) = true;
-          for(int k=m_permutation.indices().coeff(k0); k!=k0; k=m_permutation.indices().coeff(k))
+          for(Index k=m_permutation.indices().coeff(k0); k!=k0; k=m_permutation.indices().coeff(k))
           {
                   Block<Dest, Side==OnTheLeft ? 1 : Dest::RowsAtCompileTime, Side==OnTheRight ? 1 : Dest::ColsAtCompileTime>(dst, k)
             .swap(Block<Dest, Side==OnTheLeft ? 1 : Dest::RowsAtCompileTime, Side==OnTheRight ? 1 : Dest::ColsAtCompileTime>
diff --git a/ThirdParty/eigen3/Eigen/src/Core/PlainObjectBase.h b/ThirdParty/eigen3/Eigen/src/Core/PlainObjectBase.h
index cbe9e3b020c..af0a479c764 100644
--- a/ThirdParty/eigen3/Eigen/src/Core/PlainObjectBase.h
+++ b/ThirdParty/eigen3/Eigen/src/Core/PlainObjectBase.h
@@ -11,28 +11,41 @@
 #ifndef EIGEN_DENSESTORAGEBASE_H
 #define EIGEN_DENSESTORAGEBASE_H
 
-#ifdef EIGEN_INITIALIZE_MATRICES_BY_ZERO
-# define EIGEN_INITIALIZE_BY_ZERO_IF_THAT_OPTION_IS_ENABLED for(int i=0;i<base().size();++i) coeffRef(i)=Scalar(0);
+#if defined(EIGEN_INITIALIZE_MATRICES_BY_ZERO)
+# define EIGEN_INITIALIZE_COEFFS
+# define EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED for(int i=0;i<base().size();++i) coeffRef(i)=Scalar(0);
+#elif defined(EIGEN_INITIALIZE_MATRICES_BY_NAN)
+# define EIGEN_INITIALIZE_COEFFS
+# define EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED for(int i=0;i<base().size();++i) coeffRef(i)=std::numeric_limits<Scalar>::quiet_NaN();
 #else
-# define EIGEN_INITIALIZE_BY_ZERO_IF_THAT_OPTION_IS_ENABLED
+# undef EIGEN_INITIALIZE_COEFFS
+# define EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED
 #endif
 
 namespace Eigen {
 
 namespace internal {
 
-template<typename Index>
-EIGEN_ALWAYS_INLINE void check_rows_cols_for_overflow(Index rows, Index cols)
-{
-  // http://hg.mozilla.org/mozilla-central/file/6c8a909977d3/xpcom/ds/CheckedInt.h#l242
-  // we assume Index is signed
-  Index max_index = (size_t(1) << (8 * sizeof(Index) - 1)) - 1; // assume Index is signed
-  bool error = (rows < 0  || cols < 0)  ? true
-             : (rows == 0 || cols == 0) ? false
-                                        : (rows > max_index / cols);
-  if (error)
-    throw_std_bad_alloc();
-}
+template<int MaxSizeAtCompileTime> struct check_rows_cols_for_overflow {
+  template<typename Index>
+  static EIGEN_ALWAYS_INLINE void run(Index, Index)
+  {
+  }
+};
+
+template<> struct check_rows_cols_for_overflow<Dynamic> {
+  template<typename Index>
+  static EIGEN_ALWAYS_INLINE void run(Index rows, Index cols)
+  {
+    // http://hg.mozilla.org/mozilla-central/file/6c8a909977d3/xpcom/ds/CheckedInt.h#l242
+    // we assume Index is signed
+    Index max_index = (size_t(1) << (8 * sizeof(Index) - 1)) - 1; // assume Index is signed
+    bool error = (rows == 0 || cols == 0) ? false
+               : (rows > max_index / cols);
+    if (error)
+      throw_std_bad_alloc();
+  }
+};
 
 template <typename Derived, typename OtherDerived = Derived, bool IsVector = bool(Derived::IsVectorAtCompileTime)> struct conservative_resize_like_impl;
 
@@ -119,12 +132,12 @@ class PlainObjectBase : public internal::dense_xpr_base<Derived>::type
     EIGEN_STRONG_INLINE Index rows() const { return m_storage.rows(); }
     EIGEN_STRONG_INLINE Index cols() const { return m_storage.cols(); }
 
-    EIGEN_STRONG_INLINE const Scalar& coeff(Index row, Index col) const
+    EIGEN_STRONG_INLINE const Scalar& coeff(Index rowId, Index colId) const
     {
       if(Flags & RowMajorBit)
-        return m_storage.data()[col + row * m_storage.cols()];
+        return m_storage.data()[colId + rowId * m_storage.cols()];
       else // column-major
-        return m_storage.data()[row + col * m_storage.rows()];
+        return m_storage.data()[rowId + colId * m_storage.rows()];
     }
 
     EIGEN_STRONG_INLINE const Scalar& coeff(Index index) const
@@ -132,12 +145,12 @@ class PlainObjectBase : public internal::dense_xpr_base<Derived>::type
       return m_storage.data()[index];
     }
 
-    EIGEN_STRONG_INLINE Scalar& coeffRef(Index row, Index col)
+    EIGEN_STRONG_INLINE Scalar& coeffRef(Index rowId, Index colId)
     {
       if(Flags & RowMajorBit)
-        return m_storage.data()[col + row * m_storage.cols()];
+        return m_storage.data()[colId + rowId * m_storage.cols()];
       else // column-major
-        return m_storage.data()[row + col * m_storage.rows()];
+        return m_storage.data()[rowId + colId * m_storage.rows()];
     }
 
     EIGEN_STRONG_INLINE Scalar& coeffRef(Index index)
@@ -145,12 +158,12 @@ class PlainObjectBase : public internal::dense_xpr_base<Derived>::type
       return m_storage.data()[index];
     }
 
-    EIGEN_STRONG_INLINE const Scalar& coeffRef(Index row, Index col) const
+    EIGEN_STRONG_INLINE const Scalar& coeffRef(Index rowId, Index colId) const
     {
       if(Flags & RowMajorBit)
-        return m_storage.data()[col + row * m_storage.cols()];
+        return m_storage.data()[colId + rowId * m_storage.cols()];
       else // column-major
-        return m_storage.data()[row + col * m_storage.rows()];
+        return m_storage.data()[rowId + colId * m_storage.rows()];
     }
 
     EIGEN_STRONG_INLINE const Scalar& coeffRef(Index index) const
@@ -160,12 +173,12 @@ class PlainObjectBase : public internal::dense_xpr_base<Derived>::type
 
     /** \internal */
     template<int LoadMode>
-    EIGEN_STRONG_INLINE PacketScalar packet(Index row, Index col) const
+    EIGEN_STRONG_INLINE PacketScalar packet(Index rowId, Index colId) const
     {
       return internal::ploadt<PacketScalar, LoadMode>
                (m_storage.data() + (Flags & RowMajorBit
-                                   ? col + row * m_storage.cols()
-                                   : row + col * m_storage.rows()));
+                                   ? colId + rowId * m_storage.cols()
+                                   : rowId + colId * m_storage.rows()));
     }
 
     /** \internal */
@@ -177,19 +190,19 @@ class PlainObjectBase : public internal::dense_xpr_base<Derived>::type
 
     /** \internal */
     template<int StoreMode>
-    EIGEN_STRONG_INLINE void writePacket(Index row, Index col, const PacketScalar& x)
+    EIGEN_STRONG_INLINE void writePacket(Index rowId, Index colId, const PacketScalar& val)
     {
       internal::pstoret<Scalar, PacketScalar, StoreMode>
               (m_storage.data() + (Flags & RowMajorBit
-                                   ? col + row * m_storage.cols()
-                                   : row + col * m_storage.rows()), x);
+                                   ? colId + rowId * m_storage.cols()
+                                   : rowId + colId * m_storage.rows()), val);
     }
 
     /** \internal */
     template<int StoreMode>
-    EIGEN_STRONG_INLINE void writePacket(Index index, const PacketScalar& x)
+    EIGEN_STRONG_INLINE void writePacket(Index index, const PacketScalar& val)
     {
-      internal::pstoret<Scalar, PacketScalar, StoreMode>(m_storage.data() + index, x);
+      internal::pstoret<Scalar, PacketScalar, StoreMode>(m_storage.data() + index, val);
     }
 
     /** \returns a const pointer to the data array of this matrix */
@@ -216,17 +229,22 @@ class PlainObjectBase : public internal::dense_xpr_base<Derived>::type
       *
       * \sa resize(Index) for vectors, resize(NoChange_t, Index), resize(Index, NoChange_t)
       */
-    EIGEN_STRONG_INLINE void resize(Index rows, Index cols)
-    {
-      #ifdef EIGEN_INITIALIZE_MATRICES_BY_ZERO
-        internal::check_rows_cols_for_overflow(rows, cols);
-        Index size = rows*cols;
+    EIGEN_STRONG_INLINE void resize(Index nbRows, Index nbCols)
+    {
+      eigen_assert(   EIGEN_IMPLIES(RowsAtCompileTime!=Dynamic,nbRows==RowsAtCompileTime)
+                   && EIGEN_IMPLIES(ColsAtCompileTime!=Dynamic,nbCols==ColsAtCompileTime)
+                   && EIGEN_IMPLIES(RowsAtCompileTime==Dynamic && MaxRowsAtCompileTime!=Dynamic,nbRows<=MaxRowsAtCompileTime)
+                   && EIGEN_IMPLIES(ColsAtCompileTime==Dynamic && MaxColsAtCompileTime!=Dynamic,nbCols<=MaxColsAtCompileTime)
+                   && nbRows>=0 && nbCols>=0 && "Invalid sizes when resizing a matrix or array.");
+      internal::check_rows_cols_for_overflow<MaxSizeAtCompileTime>::run(nbRows, nbCols);
+      #ifdef EIGEN_INITIALIZE_COEFFS
+        Index size = nbRows*nbCols;
         bool size_changed = size != this->size();
-        m_storage.resize(size, rows, cols);
-        if(size_changed) EIGEN_INITIALIZE_BY_ZERO_IF_THAT_OPTION_IS_ENABLED
+        m_storage.resize(size, nbRows, nbCols);
+        if(size_changed) EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED
       #else
-        internal::check_rows_cols_for_overflow(rows, cols);
-        m_storage.resize(rows*cols, rows, cols);
+        internal::check_rows_cols_for_overflow<MaxSizeAtCompileTime>::run(nbRows, nbCols);
+        m_storage.resize(nbRows*nbCols, nbRows, nbCols);
       #endif
     }
 
@@ -244,16 +262,16 @@ class PlainObjectBase : public internal::dense_xpr_base<Derived>::type
     inline void resize(Index size)
     {
       EIGEN_STATIC_ASSERT_VECTOR_ONLY(PlainObjectBase)
-      eigen_assert(SizeAtCompileTime == Dynamic || SizeAtCompileTime == size);
-      #ifdef EIGEN_INITIALIZE_MATRICES_BY_ZERO
+      eigen_assert(((SizeAtCompileTime == Dynamic && (MaxSizeAtCompileTime==Dynamic || size<=MaxSizeAtCompileTime)) || SizeAtCompileTime == size) && size>=0);
+      #ifdef EIGEN_INITIALIZE_COEFFS
         bool size_changed = size != this->size();
       #endif
       if(RowsAtCompileTime == 1)
         m_storage.resize(size, 1, size);
       else
         m_storage.resize(size, size, 1);
-      #ifdef EIGEN_INITIALIZE_MATRICES_BY_ZERO
-        if(size_changed) EIGEN_INITIALIZE_BY_ZERO_IF_THAT_OPTION_IS_ENABLED
+      #ifdef EIGEN_INITIALIZE_COEFFS
+        if(size_changed) EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED
       #endif
     }
 
@@ -265,9 +283,9 @@ class PlainObjectBase : public internal::dense_xpr_base<Derived>::type
       *
       * \sa resize(Index,Index)
       */
-    inline void resize(NoChange_t, Index cols)
+    inline void resize(NoChange_t, Index nbCols)
     {
-      resize(rows(), cols);
+      resize(rows(), nbCols);
     }
 
     /** Resizes the matrix, changing only the number of rows. For the parameter of type NoChange_t, just pass the special value \c NoChange
@@ -278,9 +296,9 @@ class PlainObjectBase : public internal::dense_xpr_base<Derived>::type
       *
       * \sa resize(Index,Index)
       */
-    inline void resize(Index rows, NoChange_t)
+    inline void resize(Index nbRows, NoChange_t)
     {
-      resize(rows, cols());
+      resize(nbRows, cols());
     }
 
     /** Resizes \c *this to have the same dimensions as \a other.
@@ -294,7 +312,7 @@ class PlainObjectBase : public internal::dense_xpr_base<Derived>::type
     EIGEN_STRONG_INLINE void resizeLike(const EigenBase<OtherDerived>& _other)
     {
       const OtherDerived& other = _other.derived();
-      internal::check_rows_cols_for_overflow(other.rows(), other.cols());
+      internal::check_rows_cols_for_overflow<MaxSizeAtCompileTime>::run(other.rows(), other.cols());
       const Index othersize = other.rows()*other.cols();
       if(RowsAtCompileTime == 1)
       {
@@ -318,9 +336,9 @@ class PlainObjectBase : public internal::dense_xpr_base<Derived>::type
       * Matrices are resized relative to the top-left element. In case values need to be 
       * appended to the matrix they will be uninitialized.
       */
-    EIGEN_STRONG_INLINE void conservativeResize(Index rows, Index cols)
+    EIGEN_STRONG_INLINE void conservativeResize(Index nbRows, Index nbCols)
     {
-      internal::conservative_resize_like_impl<Derived>::run(*this, rows, cols);
+      internal::conservative_resize_like_impl<Derived>::run(*this, nbRows, nbCols);
     }
 
     /** Resizes the matrix to \a rows x \a cols while leaving old values untouched.
@@ -330,10 +348,10 @@ class PlainObjectBase : public internal::dense_xpr_base<Derived>::type
       *
       * In case the matrix is growing, new rows will be uninitialized.
       */
-    EIGEN_STRONG_INLINE void conservativeResize(Index rows, NoChange_t)
+    EIGEN_STRONG_INLINE void conservativeResize(Index nbRows, NoChange_t)
     {
       // Note: see the comment in conservativeResize(Index,Index)
-      conservativeResize(rows, cols());
+      conservativeResize(nbRows, cols());
     }
 
     /** Resizes the matrix to \a rows x \a cols while leaving old values untouched.
@@ -343,10 +361,10 @@ class PlainObjectBase : public internal::dense_xpr_base<Derived>::type
       *
       * In case the matrix is growing, new columns will be uninitialized.
       */
-    EIGEN_STRONG_INLINE void conservativeResize(NoChange_t, Index cols)
+    EIGEN_STRONG_INLINE void conservativeResize(NoChange_t, Index nbCols)
     {
       // Note: see the comment in conservativeResize(Index,Index)
-      conservativeResize(rows(), cols);
+      conservativeResize(rows(), nbCols);
     }
 
     /** Resizes the vector to \a size while retaining old values.
@@ -400,10 +418,10 @@ class PlainObjectBase : public internal::dense_xpr_base<Derived>::type
       return Base::operator=(func);
     }
 
-    EIGEN_STRONG_INLINE explicit PlainObjectBase() : m_storage()
+    EIGEN_STRONG_INLINE PlainObjectBase() : m_storage()
     {
 //       _check_template_params();
-//       EIGEN_INITIALIZE_BY_ZERO_IF_THAT_OPTION_IS_ENABLED
+//       EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED
     }
 
 #ifndef EIGEN_PARSED_BY_DOXYGEN
@@ -412,15 +430,15 @@ class PlainObjectBase : public internal::dense_xpr_base<Derived>::type
     PlainObjectBase(internal::constructor_without_unaligned_array_assert)
       : m_storage(internal::constructor_without_unaligned_array_assert())
     {
-//       _check_template_params(); EIGEN_INITIALIZE_BY_ZERO_IF_THAT_OPTION_IS_ENABLED
+//       _check_template_params(); EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED
     }
 #endif
 
-    EIGEN_STRONG_INLINE PlainObjectBase(Index size, Index rows, Index cols)
-      : m_storage(size, rows, cols)
+    EIGEN_STRONG_INLINE PlainObjectBase(Index a_size, Index nbRows, Index nbCols)
+      : m_storage(a_size, nbRows, nbCols)
     {
 //       _check_template_params();
-//       EIGEN_INITIALIZE_BY_ZERO_IF_THAT_OPTION_IS_ENABLED
+//       EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED
     }
 
     /** \copydoc MatrixBase::operator=(const EigenBase<OtherDerived>&)
@@ -439,7 +457,7 @@ class PlainObjectBase : public internal::dense_xpr_base<Derived>::type
       : m_storage(other.derived().rows() * other.derived().cols(), other.derived().rows(), other.derived().cols())
     {
       _check_template_params();
-      internal::check_rows_cols_for_overflow(other.derived().rows(), other.derived().cols());
+      internal::check_rows_cols_for_overflow<MaxSizeAtCompileTime>::run(other.derived().rows(), other.derived().cols());
       Base::operator=(other.derived());
     }
 
@@ -601,23 +619,19 @@ class PlainObjectBase : public internal::dense_xpr_base<Derived>::type
     }
 
     template<typename T0, typename T1>
-    EIGEN_STRONG_INLINE void _init2(Index rows, Index cols, typename internal::enable_if<Base::SizeAtCompileTime!=2,T0>::type* = 0)
+    EIGEN_STRONG_INLINE void _init2(Index nbRows, Index nbCols, typename internal::enable_if<Base::SizeAtCompileTime!=2,T0>::type* = 0)
     {
       EIGEN_STATIC_ASSERT(bool(NumTraits<T0>::IsInteger) &&
                           bool(NumTraits<T1>::IsInteger),
                           FLOATING_POINT_ARGUMENT_PASSED__INTEGER_WAS_EXPECTED)
-      eigen_assert(rows >= 0 && (RowsAtCompileTime == Dynamic || RowsAtCompileTime == rows)
-             && cols >= 0 && (ColsAtCompileTime == Dynamic || ColsAtCompileTime == cols));
-      internal::check_rows_cols_for_overflow(rows, cols);      
-      m_storage.resize(rows*cols,rows,cols);
-      EIGEN_INITIALIZE_BY_ZERO_IF_THAT_OPTION_IS_ENABLED
+      resize(nbRows,nbCols);
     }
     template<typename T0, typename T1>
-    EIGEN_STRONG_INLINE void _init2(const Scalar& x, const Scalar& y, typename internal::enable_if<Base::SizeAtCompileTime==2,T0>::type* = 0)
+    EIGEN_STRONG_INLINE void _init2(const Scalar& val0, const Scalar& val1, typename internal::enable_if<Base::SizeAtCompileTime==2,T0>::type* = 0)
     {
       EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(PlainObjectBase, 2)
-      m_storage.data()[0] = x;
-      m_storage.data()[1] = y;
+      m_storage.data()[0] = val0;
+      m_storage.data()[1] = val1;
     }
 
     template<typename MatrixTypeA, typename MatrixTypeB, bool SwapPointers>
@@ -666,7 +680,7 @@ struct internal::conservative_resize_like_impl
     if ( ( Derived::IsRowMajor && _this.cols() == cols) || // row-major and we change only the number of rows
          (!Derived::IsRowMajor && _this.rows() == rows) )  // column-major and we change only the number of columns
     {
-      internal::check_rows_cols_for_overflow(rows, cols);
+      internal::check_rows_cols_for_overflow<Derived::MaxSizeAtCompileTime>::run(rows, cols);
       _this.derived().m_storage.conservativeResize(rows*cols,rows,cols);
     }
     else
diff --git a/ThirdParty/eigen3/Eigen/src/Core/Product.h b/ThirdParty/eigen3/Eigen/src/Core/Product.h
deleted file mode 100644
index 30aa8943b4c..00000000000
--- a/ThirdParty/eigen3/Eigen/src/Core/Product.h
+++ /dev/null
@@ -1,98 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2008-2011 Gael Guennebaud <gael.guennebaud@inria.fr>
-//
-// This Source Code Form is subject to the terms of the Mozilla Public
-// License, v. 2.0. If a copy of the MPL was not distributed with this
-// file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_PRODUCT_H
-#define EIGEN_PRODUCT_H
-
-template<typename Lhs, typename Rhs> class Product;
-template<typename Lhs, typename Rhs, typename StorageKind> class ProductImpl;
-
-/** \class Product
-  * \ingroup Core_Module
-  *
-  * \brief Expression of the product of two arbitrary matrices or vectors
-  *
-  * \param Lhs the type of the left-hand side expression
-  * \param Rhs the type of the right-hand side expression
-  *
-  * This class represents an expression of the product of two arbitrary matrices.
-  *
-  */
-
-namespace internal {
-template<typename Lhs, typename Rhs>
-struct traits<Product<Lhs, Rhs> >
-{
-  typedef MatrixXpr XprKind;
-  typedef typename remove_all<Lhs>::type LhsCleaned;
-  typedef typename remove_all<Rhs>::type RhsCleaned;
-  typedef typename scalar_product_traits<typename traits<LhsCleaned>::Scalar, typename traits<RhsCleaned>::Scalar>::ReturnType Scalar;
-  typedef typename promote_storage_type<typename traits<LhsCleaned>::StorageKind,
-                                        typename traits<RhsCleaned>::StorageKind>::ret StorageKind;
-  typedef typename promote_index_type<typename traits<LhsCleaned>::Index,
-                                      typename traits<RhsCleaned>::Index>::type Index;
-  enum {
-    RowsAtCompileTime = LhsCleaned::RowsAtCompileTime,
-    ColsAtCompileTime = RhsCleaned::ColsAtCompileTime,
-    MaxRowsAtCompileTime = LhsCleaned::MaxRowsAtCompileTime,
-    MaxColsAtCompileTime = RhsCleaned::MaxColsAtCompileTime,
-    Flags = (MaxRowsAtCompileTime==1 ? RowMajorBit : 0), // TODO should be no storage order
-    CoeffReadCost = 0 // TODO CoeffReadCost should not be part of the expression traits
-  };
-};
-} // end namespace internal
-
-
-template<typename Lhs, typename Rhs>
-class Product : public ProductImpl<Lhs,Rhs,typename internal::promote_storage_type<typename internal::traits<Lhs>::StorageKind,
-                                                                            typename internal::traits<Rhs>::StorageKind>::ret>
-{
-  public:
-    
-    typedef typename ProductImpl<
-        Lhs, Rhs,
-        typename internal::promote_storage_type<typename Lhs::StorageKind,
-                                                typename Rhs::StorageKind>::ret>::Base Base;
-    EIGEN_GENERIC_PUBLIC_INTERFACE(Product)
-
-    typedef typename Lhs::Nested LhsNested;
-    typedef typename Rhs::Nested RhsNested;
-    typedef typename internal::remove_all<LhsNested>::type LhsNestedCleaned;
-    typedef typename internal::remove_all<RhsNested>::type RhsNestedCleaned;
-
-    Product(const Lhs& lhs, const Rhs& rhs) : m_lhs(lhs), m_rhs(rhs)
-    {
-      eigen_assert(lhs.cols() == rhs.rows()
-        && "invalid matrix product"
-        && "if you wanted a coeff-wise or a dot product use the respective explicit functions");
-    }
-
-    inline Index rows() const { return m_lhs.rows(); }
-    inline Index cols() const { return m_rhs.cols(); }
-
-    const LhsNestedCleaned& lhs() const { return m_lhs; }
-    const RhsNestedCleaned& rhs() const { return m_rhs; }
-
-  protected:
-
-    const LhsNested m_lhs;
-    const RhsNested m_rhs;
-};
-
-template<typename Lhs, typename Rhs>
-class ProductImpl<Lhs,Rhs,Dense> : public internal::dense_xpr_base<Product<Lhs,Rhs> >::type
-{
-    typedef Product<Lhs, Rhs> Derived;
-  public:
-
-    typedef typename internal::dense_xpr_base<Product<Lhs, Rhs> >::type Base;
-    EIGEN_DENSE_PUBLIC_INTERFACE(Derived)
-};
-
-#endif // EIGEN_PRODUCT_H
diff --git a/ThirdParty/eigen3/Eigen/src/Core/ProductBase.h b/ThirdParty/eigen3/Eigen/src/Core/ProductBase.h
index ec12e5c9f6b..a494b5f8703 100644
--- a/ThirdParty/eigen3/Eigen/src/Core/ProductBase.h
+++ b/ThirdParty/eigen3/Eigen/src/Core/ProductBase.h
@@ -87,10 +87,10 @@ class ProductBase : public MatrixBase<Derived>
 
     typedef typename Base::PlainObject PlainObject;
 
-    ProductBase(const Lhs& lhs, const Rhs& rhs)
-      : m_lhs(lhs), m_rhs(rhs)
+    ProductBase(const Lhs& a_lhs, const Rhs& a_rhs)
+      : m_lhs(a_lhs), m_rhs(a_rhs)
     {
-      eigen_assert(lhs.cols() == rhs.rows()
+      eigen_assert(a_lhs.cols() == a_rhs.rows()
         && "invalid matrix product"
         && "if you wanted a coeff-wise or a dot product use the respective explicit functions");
     }
@@ -108,7 +108,7 @@ class ProductBase : public MatrixBase<Derived>
     inline void subTo(Dest& dst) const { scaleAndAddTo(dst,Scalar(-1)); }
 
     template<typename Dest>
-    inline void scaleAndAddTo(Dest& dst,Scalar alpha) const { derived().scaleAndAddTo(dst,alpha); }
+    inline void scaleAndAddTo(Dest& dst, const Scalar& alpha) const { derived().scaleAndAddTo(dst,alpha); }
 
     const _LhsNested& lhs() const { return m_lhs; }
     const _RhsNested& rhs() const { return m_rhs; }
@@ -195,25 +195,25 @@ class ScaledProduct;
 // Also note that here we accept any compatible scalar types
 template<typename Derived,typename Lhs,typename Rhs>
 const ScaledProduct<Derived>
-operator*(const ProductBase<Derived,Lhs,Rhs>& prod, typename Derived::Scalar x)
+operator*(const ProductBase<Derived,Lhs,Rhs>& prod, const typename Derived::Scalar& x)
 { return ScaledProduct<Derived>(prod.derived(), x); }
 
 template<typename Derived,typename Lhs,typename Rhs>
 typename internal::enable_if<!internal::is_same<typename Derived::Scalar,typename Derived::RealScalar>::value,
                       const ScaledProduct<Derived> >::type
-operator*(const ProductBase<Derived,Lhs,Rhs>& prod, typename Derived::RealScalar x)
+operator*(const ProductBase<Derived,Lhs,Rhs>& prod, const typename Derived::RealScalar& x)
 { return ScaledProduct<Derived>(prod.derived(), x); }
 
 
 template<typename Derived,typename Lhs,typename Rhs>
 const ScaledProduct<Derived>
-operator*(typename Derived::Scalar x,const ProductBase<Derived,Lhs,Rhs>& prod)
+operator*(const typename Derived::Scalar& x,const ProductBase<Derived,Lhs,Rhs>& prod)
 { return ScaledProduct<Derived>(prod.derived(), x); }
 
 template<typename Derived,typename Lhs,typename Rhs>
 typename internal::enable_if<!internal::is_same<typename Derived::Scalar,typename Derived::RealScalar>::value,
                       const ScaledProduct<Derived> >::type
-operator*(typename Derived::RealScalar x,const ProductBase<Derived,Lhs,Rhs>& prod)
+operator*(const typename Derived::RealScalar& x,const ProductBase<Derived,Lhs,Rhs>& prod)
 { return ScaledProduct<Derived>(prod.derived(), x); }
 
 namespace internal {
@@ -241,7 +241,7 @@ class ScaledProduct
     typedef typename Base::PlainObject PlainObject;
 //     EIGEN_PRODUCT_PUBLIC_INTERFACE(ScaledProduct)
 
-    ScaledProduct(const NestedProduct& prod, Scalar x)
+    ScaledProduct(const NestedProduct& prod, const Scalar& x)
     : Base(prod.lhs(),prod.rhs()), m_prod(prod), m_alpha(x) {}
 
     template<typename Dest>
@@ -254,7 +254,7 @@ class ScaledProduct
     inline void subTo(Dest& dst) const { scaleAndAddTo(dst, Scalar(-1)); }
 
     template<typename Dest>
-    inline void scaleAndAddTo(Dest& dst,Scalar alpha) const { m_prod.derived().scaleAndAddTo(dst,alpha * m_alpha); }
+    inline void scaleAndAddTo(Dest& dst, const Scalar& a_alpha) const { m_prod.derived().scaleAndAddTo(dst,a_alpha * m_alpha); }
 
     const Scalar& alpha() const { return m_alpha; }
     
diff --git a/ThirdParty/eigen3/Eigen/src/Core/Random.h b/ThirdParty/eigen3/Eigen/src/Core/Random.h
index a9f7f434666..480fea408d0 100644
--- a/ThirdParty/eigen3/Eigen/src/Core/Random.h
+++ b/ThirdParty/eigen3/Eigen/src/Core/Random.h
@@ -112,7 +112,7 @@ inline Derived& DenseBase<Derived>::setRandom()
   return *this = Random(rows(), cols());
 }
 
-/** Resizes to the given \a size, and sets all coefficients in this expression to random values.
+/** Resizes to the given \a newSize, and sets all coefficients in this expression to random values.
   *
   * \only_for_vectors
   *
@@ -123,16 +123,16 @@ inline Derived& DenseBase<Derived>::setRandom()
   */
 template<typename Derived>
 EIGEN_STRONG_INLINE Derived&
-PlainObjectBase<Derived>::setRandom(Index size)
+PlainObjectBase<Derived>::setRandom(Index newSize)
 {
-  resize(size);
+  resize(newSize);
   return setRandom();
 }
 
 /** Resizes to the given size, and sets all coefficients in this expression to random values.
   *
-  * \param rows the new number of rows
-  * \param cols the new number of columns
+  * \param nbRows the new number of rows
+  * \param nbCols the new number of columns
   *
   * Example: \include Matrix_setRandom_int_int.cpp
   * Output: \verbinclude Matrix_setRandom_int_int.out
@@ -141,9 +141,9 @@ PlainObjectBase<Derived>::setRandom(Index size)
   */
 template<typename Derived>
 EIGEN_STRONG_INLINE Derived&
-PlainObjectBase<Derived>::setRandom(Index rows, Index cols)
+PlainObjectBase<Derived>::setRandom(Index nbRows, Index nbCols)
 {
-  resize(rows, cols);
+  resize(nbRows, nbCols);
   return setRandom();
 }
 
diff --git a/ThirdParty/eigen3/Eigen/src/Core/Redux.h b/ThirdParty/eigen3/Eigen/src/Core/Redux.h
index b7ce7c658a2..50548fa9a0e 100644
--- a/ThirdParty/eigen3/Eigen/src/Core/Redux.h
+++ b/ThirdParty/eigen3/Eigen/src/Core/Redux.h
@@ -330,7 +330,8 @@ DenseBase<Derived>::redux(const Func& func) const
             ::run(derived(), func);
 }
 
-/** \returns the minimum of all coefficients of *this
+/** \returns the minimum of all coefficients of \c *this.
+  * \warning the result is undefined if \c *this contains NaN.
   */
 template<typename Derived>
 EIGEN_STRONG_INLINE typename internal::traits<Derived>::Scalar
@@ -339,7 +340,8 @@ DenseBase<Derived>::minCoeff() const
   return this->redux(Eigen::internal::scalar_min_op<Scalar>());
 }
 
-/** \returns the maximum of all coefficients of *this
+/** \returns the maximum of all coefficients of \c *this.
+  * \warning the result is undefined if \c *this contains NaN.
   */
 template<typename Derived>
 EIGEN_STRONG_INLINE typename internal::traits<Derived>::Scalar
diff --git a/ThirdParty/eigen3/Eigen/src/Core/Ref.h b/ThirdParty/eigen3/Eigen/src/Core/Ref.h
new file mode 100644
index 00000000000..aba795bdb70
--- /dev/null
+++ b/ThirdParty/eigen3/Eigen/src/Core/Ref.h
@@ -0,0 +1,255 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2012 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_REF_H
+#define EIGEN_REF_H
+
+namespace Eigen { 
+
+template<typename Derived> class RefBase;
+template<typename PlainObjectType, int Options = 0,
+         typename StrideType = typename internal::conditional<PlainObjectType::IsVectorAtCompileTime,InnerStride<1>,OuterStride<> >::type > class Ref;
+
+/** \class Ref
+  * \ingroup Core_Module
+  *
+  * \brief A matrix or vector expression mapping an existing expressions
+  *
+  * \tparam PlainObjectType the equivalent matrix type of the mapped data
+  * \tparam Options specifies whether the pointer is \c #Aligned, or \c #Unaligned.
+  *                The default is \c #Unaligned.
+  * \tparam StrideType optionally specifies strides. By default, Ref implies a contiguous storage along the inner dimension (inner stride==1),
+  *                   but accept a variable outer stride (leading dimension).
+  *                   This can be overridden by specifying strides.
+  *                   The type passed here must be a specialization of the Stride template, see examples below.
+  *
+  * This class permits to write non template functions taking Eigen's object as parameters while limiting the number of copies.
+  * A Ref<> object can represent either a const expression or a l-value:
+  * \code
+  * // in-out argument:
+  * void foo1(Ref<VectorXf> x);
+  *
+  * // read-only const argument:
+  * void foo2(const Ref<const VectorXf>& x);
+  * \endcode
+  *
+  * In the in-out case, the input argument must satisfies the constraints of the actual Ref<> type, otherwise a compilation issue will be triggered.
+  * By default, a Ref<VectorXf> can reference any dense vector expression of float having a contiguous memory layout.
+  * Likewise, a Ref<MatrixXf> can reference any column major dense matrix expression of float whose column's elements are contiguously stored with
+  * the possibility to have a constant space inbetween each column, i.e.: the inner stride mmust be equal to 1, but the outer-stride (or leading dimension),
+  * can be greater than the number of rows.
+  *
+  * In the const case, if the input expression does not match the above requirement, then it is evaluated into a temporary before being passed to the function.
+  * Here are some examples:
+  * \code
+  * MatrixXf A;
+  * VectorXf a;
+  * foo1(a.head());             // OK
+  * foo1(A.col());              // OK
+  * foo1(A.row());              // compilation error because here innerstride!=1
+  * foo2(A.row());              // The row is copied into a contiguous temporary
+  * foo2(2*a);                  // The expression is evaluated into a temporary
+  * foo2(A.col().segment(2,4)); // No temporary
+  * \endcode
+  *
+  * The range of inputs that can be referenced without temporary can be enlarged using the last two template parameter.
+  * Here is an example accepting an innerstride!=1:
+  * \code
+  * // in-out argument:
+  * void foo3(Ref<VectorXf,0,InnerStride<> > x);
+  * foo3(A.row());              // OK
+  * \endcode
+  * The downside here is that the function foo3 might be significantly slower than foo1 because it won't be able to exploit vectorization, and will involved more
+  * expensive address computations even if the input is contiguously stored in memory. To overcome this issue, one might propose to overloads internally calling a
+  * template function, e.g.:
+  * \code
+  * // in the .h:
+  * void foo(const Ref<MatrixXf>& A);
+  * void foo(const Ref<MatrixXf,0,Stride<> >& A);
+  *
+  * // in the .cpp:
+  * template<typename TypeOfA> void foo_impl(const TypeOfA& A) {
+  *     ... // crazy code goes here
+  * }
+  * void foo(const Ref<MatrixXf>& A) { foo_impl(A); }
+  * void foo(const Ref<MatrixXf,0,Stride<> >& A) { foo_impl(A); }
+  * \endcode
+  *
+  *
+  * \sa PlainObjectBase::Map(), \ref TopicStorageOrders
+  */
+
+namespace internal {
+
+template<typename _PlainObjectType, int _Options, typename _StrideType>
+struct traits<Ref<_PlainObjectType, _Options, _StrideType> >
+  : public traits<Map<_PlainObjectType, _Options, _StrideType> >
+{
+  typedef _PlainObjectType PlainObjectType;
+  typedef _StrideType StrideType;
+  enum {
+    Options = _Options
+  };
+
+  template<typename Derived> struct match {
+    enum {
+      HasDirectAccess = internal::has_direct_access<Derived>::ret,
+      StorageOrderMatch = PlainObjectType::IsVectorAtCompileTime || ((PlainObjectType::Flags&RowMajorBit)==(Derived::Flags&RowMajorBit)),
+      InnerStrideMatch = int(StrideType::InnerStrideAtCompileTime)==int(Dynamic)
+                      || int(StrideType::InnerStrideAtCompileTime)==int(Derived::InnerStrideAtCompileTime)
+                      || (int(StrideType::InnerStrideAtCompileTime)==0 && int(Derived::InnerStrideAtCompileTime)==1),
+      OuterStrideMatch = Derived::IsVectorAtCompileTime
+                      || int(StrideType::OuterStrideAtCompileTime)==int(Dynamic) || int(StrideType::OuterStrideAtCompileTime)==int(Derived::OuterStrideAtCompileTime),
+      AlignmentMatch = (_Options!=Aligned) || ((PlainObjectType::Flags&AlignedBit)==0) || ((traits<Derived>::Flags&AlignedBit)==AlignedBit),
+      MatchAtCompileTime = HasDirectAccess && StorageOrderMatch && InnerStrideMatch && OuterStrideMatch && AlignmentMatch
+    };
+    typedef typename internal::conditional<MatchAtCompileTime,internal::true_type,internal::false_type>::type type;
+  };
+
+};
+
+template<typename Derived>
+struct traits<RefBase<Derived> > : public traits<Derived> {};
+
+}
+
+template<typename Derived> class RefBase
+ : public MapBase<Derived>
+{
+  typedef typename internal::traits<Derived>::PlainObjectType PlainObjectType;
+  typedef typename internal::traits<Derived>::StrideType StrideType;
+
+public:
+
+  typedef MapBase<Derived> Base;
+  EIGEN_DENSE_PUBLIC_INTERFACE(RefBase)
+
+  inline Index innerStride() const
+  {
+    return StrideType::InnerStrideAtCompileTime != 0 ? m_stride.inner() : 1;
+  }
+
+  inline Index outerStride() const
+  {
+    return StrideType::OuterStrideAtCompileTime != 0 ? m_stride.outer()
+         : IsVectorAtCompileTime ? this->size()
+         : int(Flags)&RowMajorBit ? this->cols()
+         : this->rows();
+  }
+
+  RefBase()
+    : Base(0,RowsAtCompileTime==Dynamic?0:RowsAtCompileTime,ColsAtCompileTime==Dynamic?0:ColsAtCompileTime),
+      // Stride<> does not allow default ctor for Dynamic strides, so let' initialize it with dummy values:
+      m_stride(StrideType::OuterStrideAtCompileTime==Dynamic?0:StrideType::OuterStrideAtCompileTime,
+               StrideType::InnerStrideAtCompileTime==Dynamic?0:StrideType::InnerStrideAtCompileTime)
+  {}
+  
+  EIGEN_INHERIT_ASSIGNMENT_OPERATORS(RefBase)
+
+protected:
+
+  typedef Stride<StrideType::OuterStrideAtCompileTime,StrideType::InnerStrideAtCompileTime> StrideBase;
+
+  template<typename Expression>
+  void construct(Expression& expr)
+  {
+    if(PlainObjectType::RowsAtCompileTime==1)
+    {
+      eigen_assert(expr.rows()==1 || expr.cols()==1);
+      ::new (static_cast<Base*>(this)) Base(expr.data(), 1, expr.size());
+    }
+    else if(PlainObjectType::ColsAtCompileTime==1)
+    {
+      eigen_assert(expr.rows()==1 || expr.cols()==1);
+      ::new (static_cast<Base*>(this)) Base(expr.data(), expr.size(), 1);
+    }
+    else
+      ::new (static_cast<Base*>(this)) Base(expr.data(), expr.rows(), expr.cols());
+    ::new (&m_stride) StrideBase(StrideType::OuterStrideAtCompileTime==0?0:expr.outerStride(),
+                                 StrideType::InnerStrideAtCompileTime==0?0:expr.innerStride());    
+  }
+
+  StrideBase m_stride;
+};
+
+
+template<typename PlainObjectType, int Options, typename StrideType> class Ref
+  : public RefBase<Ref<PlainObjectType, Options, StrideType> >
+{
+    typedef internal::traits<Ref> Traits;
+  public:
+
+    typedef RefBase<Ref> Base;
+    EIGEN_DENSE_PUBLIC_INTERFACE(Ref)
+
+
+    #ifndef EIGEN_PARSED_BY_DOXYGEN
+    template<typename Derived>
+    inline Ref(PlainObjectBase<Derived>& expr,
+               typename internal::enable_if<bool(Traits::template match<Derived>::MatchAtCompileTime),Derived>::type* = 0)
+    {
+      Base::construct(expr);
+    }
+    template<typename Derived>
+    inline Ref(const DenseBase<Derived>& expr,
+               typename internal::enable_if<bool(internal::is_lvalue<Derived>::value&&bool(Traits::template match<Derived>::MatchAtCompileTime)),Derived>::type* = 0,
+               int = Derived::ThisConstantIsPrivateInPlainObjectBase)
+    #else
+    template<typename Derived>
+    inline Ref(DenseBase<Derived>& expr)
+    #endif
+    {
+      Base::construct(expr.const_cast_derived());
+    }
+
+    EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Ref)
+
+};
+
+// this is the const ref version
+template<typename TPlainObjectType, int Options, typename StrideType> class Ref<const TPlainObjectType, Options, StrideType>
+  : public RefBase<Ref<const TPlainObjectType, Options, StrideType> >
+{
+    typedef internal::traits<Ref> Traits;
+  public:
+
+    typedef RefBase<Ref> Base;
+    EIGEN_DENSE_PUBLIC_INTERFACE(Ref)
+
+    template<typename Derived>
+    inline Ref(const DenseBase<Derived>& expr)
+    {
+//      std::cout << match_helper<Derived>::HasDirectAccess << "," << match_helper<Derived>::OuterStrideMatch << "," << match_helper<Derived>::InnerStrideMatch << "\n";
+//      std::cout << int(StrideType::OuterStrideAtCompileTime) << " - " << int(Derived::OuterStrideAtCompileTime) << "\n";
+//      std::cout << int(StrideType::InnerStrideAtCompileTime) << " - " << int(Derived::InnerStrideAtCompileTime) << "\n";
+      construct(expr.derived(), typename Traits::template match<Derived>::type());
+    }
+
+  protected:
+
+    template<typename Expression>
+    void construct(const Expression& expr,internal::true_type)
+    {
+      Base::construct(expr);
+    }
+
+    template<typename Expression>
+    void construct(const Expression& expr, internal::false_type)
+    {
+      m_object.lazyAssign(expr);
+      Base::construct(m_object);
+    }
+
+  protected:
+    TPlainObjectType m_object;
+};
+
+} // end namespace Eigen
+
+#endif // EIGEN_REF_H
diff --git a/ThirdParty/eigen3/Eigen/src/Core/Replicate.h b/ThirdParty/eigen3/Eigen/src/Core/Replicate.h
index b61fdc29e2f..dde86a8349b 100644
--- a/ThirdParty/eigen3/Eigen/src/Core/Replicate.h
+++ b/ThirdParty/eigen3/Eigen/src/Core/Replicate.h
@@ -70,8 +70,8 @@ template<typename MatrixType,int RowFactor,int ColFactor> class Replicate
     EIGEN_DENSE_PUBLIC_INTERFACE(Replicate)
 
     template<typename OriginalMatrixType>
-    inline explicit Replicate(const OriginalMatrixType& matrix)
-      : m_matrix(matrix), m_rowFactor(RowFactor), m_colFactor(ColFactor)
+    inline explicit Replicate(const OriginalMatrixType& a_matrix)
+      : m_matrix(a_matrix), m_rowFactor(RowFactor), m_colFactor(ColFactor)
     {
       EIGEN_STATIC_ASSERT((internal::is_same<typename internal::remove_const<MatrixType>::type,OriginalMatrixType>::value),
                           THE_MATRIX_OR_EXPRESSION_THAT_YOU_PASSED_DOES_NOT_HAVE_THE_EXPECTED_TYPE)
@@ -79,8 +79,8 @@ template<typename MatrixType,int RowFactor,int ColFactor> class Replicate
     }
 
     template<typename OriginalMatrixType>
-    inline Replicate(const OriginalMatrixType& matrix, Index rowFactor, Index colFactor)
-      : m_matrix(matrix), m_rowFactor(rowFactor), m_colFactor(colFactor)
+    inline Replicate(const OriginalMatrixType& a_matrix, Index rowFactor, Index colFactor)
+      : m_matrix(a_matrix), m_rowFactor(rowFactor), m_colFactor(colFactor)
     {
       EIGEN_STATIC_ASSERT((internal::is_same<typename internal::remove_const<MatrixType>::type,OriginalMatrixType>::value),
                           THE_MATRIX_OR_EXPRESSION_THAT_YOU_PASSED_DOES_NOT_HAVE_THE_EXPECTED_TYPE)
@@ -89,27 +89,27 @@ template<typename MatrixType,int RowFactor,int ColFactor> class Replicate
     inline Index rows() const { return m_matrix.rows() * m_rowFactor.value(); }
     inline Index cols() const { return m_matrix.cols() * m_colFactor.value(); }
 
-    inline Scalar coeff(Index row, Index col) const
+    inline Scalar coeff(Index rowId, Index colId) const
     {
       // try to avoid using modulo; this is a pure optimization strategy
       const Index actual_row  = internal::traits<MatrixType>::RowsAtCompileTime==1 ? 0
-                            : RowFactor==1 ? row
-                            : row%m_matrix.rows();
+                            : RowFactor==1 ? rowId
+                            : rowId%m_matrix.rows();
       const Index actual_col  = internal::traits<MatrixType>::ColsAtCompileTime==1 ? 0
-                            : ColFactor==1 ? col
-                            : col%m_matrix.cols();
+                            : ColFactor==1 ? colId
+                            : colId%m_matrix.cols();
 
       return m_matrix.coeff(actual_row, actual_col);
     }
     template<int LoadMode>
-    inline PacketScalar packet(Index row, Index col) const
+    inline PacketScalar packet(Index rowId, Index colId) const
     {
       const Index actual_row  = internal::traits<MatrixType>::RowsAtCompileTime==1 ? 0
-                            : RowFactor==1 ? row
-                            : row%m_matrix.rows();
+                            : RowFactor==1 ? rowId
+                            : rowId%m_matrix.rows();
       const Index actual_col  = internal::traits<MatrixType>::ColsAtCompileTime==1 ? 0
-                            : ColFactor==1 ? col
-                            : col%m_matrix.cols();
+                            : ColFactor==1 ? colId
+                            : colId%m_matrix.cols();
 
       return m_matrix.template packet<LoadMode>(actual_row, actual_col);
     }
diff --git a/ThirdParty/eigen3/Eigen/src/Core/ReturnByValue.h b/ThirdParty/eigen3/Eigen/src/Core/ReturnByValue.h
index 613912ffa8c..d66c24ba0f8 100644
--- a/ThirdParty/eigen3/Eigen/src/Core/ReturnByValue.h
+++ b/ThirdParty/eigen3/Eigen/src/Core/ReturnByValue.h
@@ -48,7 +48,7 @@ struct nested<ReturnByValue<Derived>, n, PlainObject>
 } // end namespace internal
 
 template<typename Derived> class ReturnByValue
-  : public internal::dense_xpr_base< ReturnByValue<Derived> >::type
+  : internal::no_assignment_operator, public internal::dense_xpr_base< ReturnByValue<Derived> >::type
 {
   public:
     typedef typename internal::traits<Derived>::ReturnType ReturnType;
diff --git a/ThirdParty/eigen3/Eigen/src/Core/Select.h b/ThirdParty/eigen3/Eigen/src/Core/Select.h
index 2bf6e91d0aa..87993bbb553 100644
--- a/ThirdParty/eigen3/Eigen/src/Core/Select.h
+++ b/ThirdParty/eigen3/Eigen/src/Core/Select.h
@@ -60,10 +60,10 @@ class Select : internal::no_assignment_operator,
     typedef typename internal::dense_xpr_base<Select>::type Base;
     EIGEN_DENSE_PUBLIC_INTERFACE(Select)
 
-    Select(const ConditionMatrixType& conditionMatrix,
-           const ThenMatrixType& thenMatrix,
-           const ElseMatrixType& elseMatrix)
-      : m_condition(conditionMatrix), m_then(thenMatrix), m_else(elseMatrix)
+    Select(const ConditionMatrixType& a_conditionMatrix,
+           const ThenMatrixType& a_thenMatrix,
+           const ElseMatrixType& a_elseMatrix)
+      : m_condition(a_conditionMatrix), m_then(a_thenMatrix), m_else(a_elseMatrix)
     {
       eigen_assert(m_condition.rows() == m_then.rows() && m_condition.rows() == m_else.rows());
       eigen_assert(m_condition.cols() == m_then.cols() && m_condition.cols() == m_else.cols());
@@ -136,7 +136,7 @@ template<typename Derived>
 template<typename ThenDerived>
 inline const Select<Derived,ThenDerived, typename ThenDerived::ConstantReturnType>
 DenseBase<Derived>::select(const DenseBase<ThenDerived>& thenMatrix,
-                            typename ThenDerived::Scalar elseScalar) const
+                           const typename ThenDerived::Scalar& elseScalar) const
 {
   return Select<Derived,ThenDerived,typename ThenDerived::ConstantReturnType>(
     derived(), thenMatrix.derived(), ThenDerived::Constant(rows(),cols(),elseScalar));
@@ -150,8 +150,8 @@ DenseBase<Derived>::select(const DenseBase<ThenDerived>& thenMatrix,
 template<typename Derived>
 template<typename ElseDerived>
 inline const Select<Derived, typename ElseDerived::ConstantReturnType, ElseDerived >
-DenseBase<Derived>::select(typename ElseDerived::Scalar thenScalar,
-                            const DenseBase<ElseDerived>& elseMatrix) const
+DenseBase<Derived>::select(const typename ElseDerived::Scalar& thenScalar,
+                           const DenseBase<ElseDerived>& elseMatrix) const
 {
   return Select<Derived,typename ElseDerived::ConstantReturnType,ElseDerived>(
     derived(), ElseDerived::Constant(rows(),cols(),thenScalar), elseMatrix.derived());
diff --git a/ThirdParty/eigen3/Eigen/src/Core/SelfAdjointView.h b/ThirdParty/eigen3/Eigen/src/Core/SelfAdjointView.h
index 82cc4da736a..6fa7cd15e6f 100644
--- a/ThirdParty/eigen3/Eigen/src/Core/SelfAdjointView.h
+++ b/ThirdParty/eigen3/Eigen/src/Core/SelfAdjointView.h
@@ -132,7 +132,7 @@ template<typename MatrixType, unsigned int UpLo> class SelfAdjointView
       * \sa rankUpdate(const MatrixBase<DerivedU>&, Scalar)
       */
     template<typename DerivedU, typename DerivedV>
-    SelfAdjointView& rankUpdate(const MatrixBase<DerivedU>& u, const MatrixBase<DerivedV>& v, Scalar alpha = Scalar(1));
+    SelfAdjointView& rankUpdate(const MatrixBase<DerivedU>& u, const MatrixBase<DerivedV>& v, const Scalar& alpha = Scalar(1));
 
     /** Perform a symmetric rank K update of the selfadjoint matrix \c *this:
       * \f$ this = this + \alpha ( u u^* ) \f$ where \a u is a vector or matrix.
@@ -145,7 +145,7 @@ template<typename MatrixType, unsigned int UpLo> class SelfAdjointView
       * \sa rankUpdate(const MatrixBase<DerivedU>&, const MatrixBase<DerivedV>&, Scalar)
       */
     template<typename DerivedU>
-    SelfAdjointView& rankUpdate(const MatrixBase<DerivedU>& u, Scalar alpha = Scalar(1));
+    SelfAdjointView& rankUpdate(const MatrixBase<DerivedU>& u, const Scalar& alpha = Scalar(1));
 
 /////////// Cholesky module ///////////
 
@@ -214,9 +214,9 @@ struct triangular_assignment_selector<Derived1, Derived2, (SelfAdjoint|Upper), U
     triangular_assignment_selector<Derived1, Derived2, (SelfAdjoint|Upper), UnrollCount-1, ClearOpposite>::run(dst, src);
 
     if(row == col)
-      dst.coeffRef(row, col) = real(src.coeff(row, col));
+      dst.coeffRef(row, col) = numext::real(src.coeff(row, col));
     else if(row < col)
-      dst.coeffRef(col, row) = conj(dst.coeffRef(row, col) = src.coeff(row, col));
+      dst.coeffRef(col, row) = numext::conj(dst.coeffRef(row, col) = src.coeff(row, col));
   }
 };
 
@@ -239,9 +239,9 @@ struct triangular_assignment_selector<Derived1, Derived2, (SelfAdjoint|Lower), U
     triangular_assignment_selector<Derived1, Derived2, (SelfAdjoint|Lower), UnrollCount-1, ClearOpposite>::run(dst, src);
 
     if(row == col)
-      dst.coeffRef(row, col) = real(src.coeff(row, col));
+      dst.coeffRef(row, col) = numext::real(src.coeff(row, col));
     else if(row > col)
-      dst.coeffRef(col, row) = conj(dst.coeffRef(row, col) = src.coeff(row, col));
+      dst.coeffRef(col, row) = numext::conj(dst.coeffRef(row, col) = src.coeff(row, col));
   }
 };
 
@@ -262,7 +262,7 @@ struct triangular_assignment_selector<Derived1, Derived2, SelfAdjoint|Upper, Dyn
       for(Index i = 0; i < j; ++i)
       {
         dst.copyCoeff(i, j, src);
-        dst.coeffRef(j,i) = conj(dst.coeff(i,j));
+        dst.coeffRef(j,i) = numext::conj(dst.coeff(i,j));
       }
       dst.copyCoeff(j, j, src);
     }
@@ -280,7 +280,7 @@ struct triangular_assignment_selector<Derived1, Derived2, SelfAdjoint|Lower, Dyn
       for(Index j = 0; j < i; ++j)
       {
         dst.copyCoeff(i, j, src);
-        dst.coeffRef(j,i) = conj(dst.coeff(i,j));
+        dst.coeffRef(j,i) = numext::conj(dst.coeff(i,j));
       }
       dst.copyCoeff(i, i, src);
     }
diff --git a/ThirdParty/eigen3/Eigen/src/Core/SelfCwiseBinaryOp.h b/ThirdParty/eigen3/Eigen/src/Core/SelfCwiseBinaryOp.h
index 0caf2bab1d8..22f3047b43f 100644
--- a/ThirdParty/eigen3/Eigen/src/Core/SelfCwiseBinaryOp.h
+++ b/ThirdParty/eigen3/Eigen/src/Core/SelfCwiseBinaryOp.h
@@ -185,7 +185,10 @@ inline Derived& DenseBase<Derived>::operator/=(const Scalar& other)
                                         internal::scalar_product_op<Scalar> >::type BinOp;
   typedef typename Derived::PlainObject PlainObject;
   SelfCwiseBinaryOp<BinOp, Derived, typename PlainObject::ConstantReturnType> tmp(derived());
-  tmp = PlainObject::Constant(rows(),cols(), NumTraits<Scalar>::IsInteger ? other : Scalar(1)/other);
+  Scalar actual_other;
+  if(NumTraits<Scalar>::IsInteger)  actual_other = other;
+  else                              actual_other = Scalar(1)/other;
+  tmp = PlainObject::Constant(rows(),cols(), actual_other);
   return derived();
 }
 
diff --git a/ThirdParty/eigen3/Eigen/src/Core/StableNorm.h b/ThirdParty/eigen3/Eigen/src/Core/StableNorm.h
index f52e06f4fcd..c83e955ee08 100644
--- a/ThirdParty/eigen3/Eigen/src/Core/StableNorm.h
+++ b/ThirdParty/eigen3/Eigen/src/Core/StableNorm.h
@@ -20,7 +20,7 @@ inline void stable_norm_kernel(const ExpressionType& bl, Scalar& ssq, Scalar& sc
   Scalar max = bl.cwiseAbs().maxCoeff();
   if (max>scale)
   {
-    ssq = ssq * abs2(scale/max);
+    ssq = ssq * numext::abs2(scale/max);
     scale = max;
     invScale = Scalar(1)/scale;
   }
@@ -28,106 +28,69 @@ inline void stable_norm_kernel(const ExpressionType& bl, Scalar& ssq, Scalar& sc
   // then we can neglect this sub vector
   ssq += (bl*invScale).squaredNorm();
 }
-}
 
-/** \returns the \em l2 norm of \c *this avoiding underflow and overflow.
-  * This version use a blockwise two passes algorithm:
-  *  1 - find the absolute largest coefficient \c s
-  *  2 - compute \f$ s \Vert \frac{*this}{s} \Vert \f$ in a standard way
-  *
-  * For architecture/scalar types supporting vectorization, this version
-  * is faster than blueNorm(). Otherwise the blueNorm() is much faster.
-  *
-  * \sa norm(), blueNorm(), hypotNorm()
-  */
 template<typename Derived>
-inline typename NumTraits<typename internal::traits<Derived>::Scalar>::Real
-MatrixBase<Derived>::stableNorm() const
-{
-  using std::min;
-  const Index blockSize = 4096;
-  RealScalar scale(0);
-  RealScalar invScale(1);
-  RealScalar ssq(0); // sum of square
-  enum {
-    Alignment = (int(Flags)&DirectAccessBit) || (int(Flags)&AlignedBit) ? 1 : 0
-  };
-  Index n = size();
-  Index bi = internal::first_aligned(derived());
-  if (bi>0)
-    internal::stable_norm_kernel(this->head(bi), ssq, scale, invScale);
-  for (; bi<n; bi+=blockSize)
-    internal::stable_norm_kernel(this->segment(bi,(min)(blockSize, n - bi)).template forceAlignedAccessIf<Alignment>(), ssq, scale, invScale);
-  return scale * internal::sqrt(ssq);
-}
-
-/** \returns the \em l2 norm of \c *this using the Blue's algorithm.
-  * A Portable Fortran Program to Find the Euclidean Norm of a Vector,
-  * ACM TOMS, Vol 4, Issue 1, 1978.
-  *
-  * For architecture/scalar types without vectorization, this version
-  * is much faster than stableNorm(). Otherwise the stableNorm() is faster.
-  *
-  * \sa norm(), stableNorm(), hypotNorm()
-  */
-template<typename Derived>
-inline typename NumTraits<typename internal::traits<Derived>::Scalar>::Real
-MatrixBase<Derived>::blueNorm() const
+inline typename NumTraits<typename traits<Derived>::Scalar>::Real
+blueNorm_impl(const EigenBase<Derived>& _vec)
 {
+  typedef typename Derived::RealScalar RealScalar;  
+  typedef typename Derived::Index Index;
   using std::pow;
   using std::min;
   using std::max;
+  using std::sqrt;
+  using std::abs;
+  const Derived& vec(_vec.derived());
   static bool initialized = false;
   static RealScalar b1, b2, s1m, s2m, overfl, rbig, relerr;
   if(!initialized)
   {
     int ibeta, it, iemin, iemax, iexp;
-    RealScalar abig, eps;
+    RealScalar eps;
     // This program calculates the machine-dependent constants
     // bl, b2, slm, s2m, relerr overfl
     // from the "basic" machine-dependent numbers
-    // ibeta, it, iemin, iemax, rbig.
+    // nbig, ibeta, it, iemin, iemax, rbig.
     // The following define the basic machine-dependent constants.
     // For portability, the PORT subprograms "ilmaeh" and "rlmach"
     // are used. For any specific computer, each of the assignment
     // statements can be replaced
-    ibeta = std::numeric_limits<RealScalar>::radix;         // base for floating-point numbers
-    it    = std::numeric_limits<RealScalar>::digits;        // number of base-beta digits in mantissa
-    iemin = std::numeric_limits<RealScalar>::min_exponent;  // minimum exponent
-    iemax = std::numeric_limits<RealScalar>::max_exponent;  // maximum exponent
-    rbig  = (std::numeric_limits<RealScalar>::max)();         // largest floating-point number
+    ibeta = std::numeric_limits<RealScalar>::radix;                 // base for floating-point numbers
+    it    = std::numeric_limits<RealScalar>::digits;                // number of base-beta digits in mantissa
+    iemin = std::numeric_limits<RealScalar>::min_exponent;          // minimum exponent
+    iemax = std::numeric_limits<RealScalar>::max_exponent;          // maximum exponent
+    rbig  = (std::numeric_limits<RealScalar>::max)();               // largest floating-point number
 
     iexp  = -((1-iemin)/2);
-    b1    = RealScalar(pow(RealScalar(ibeta),RealScalar(iexp)));  // lower boundary of midrange
+    b1    = RealScalar(pow(RealScalar(ibeta),RealScalar(iexp)));    // lower boundary of midrange
     iexp  = (iemax + 1 - it)/2;
-    b2    = RealScalar(pow(RealScalar(ibeta),RealScalar(iexp)));   // upper boundary of midrange
+    b2    = RealScalar(pow(RealScalar(ibeta),RealScalar(iexp)));    // upper boundary of midrange
 
     iexp  = (2-iemin)/2;
-    s1m   = RealScalar(pow(RealScalar(ibeta),RealScalar(iexp)));   // scaling factor for lower range
+    s1m   = RealScalar(pow(RealScalar(ibeta),RealScalar(iexp)));    // scaling factor for lower range
     iexp  = - ((iemax+it)/2);
-    s2m   = RealScalar(pow(RealScalar(ibeta),RealScalar(iexp)));   // scaling factor for upper range
+    s2m   = RealScalar(pow(RealScalar(ibeta),RealScalar(iexp)));    // scaling factor for upper range
 
-    overfl  = rbig*s2m;             // overflow boundary for abig
+    overfl  = rbig*s2m;                                             // overflow boundary for abig
     eps     = RealScalar(pow(double(ibeta), 1-it));
-    relerr  = internal::sqrt(eps);         // tolerance for neglecting asml
-    abig    = RealScalar(1.0/eps - 1.0);
+    relerr  = sqrt(eps);                                            // tolerance for neglecting asml
     initialized = true;
   }
-  Index n = size();
+  Index n = vec.size();
   RealScalar ab2 = b2 / RealScalar(n);
   RealScalar asml = RealScalar(0);
   RealScalar amed = RealScalar(0);
   RealScalar abig = RealScalar(0);
-  for(Index j=0; j<n; ++j)
+  for(typename Derived::InnerIterator it(vec, 0); it; ++it)
   {
-    RealScalar ax = internal::abs(coeff(j));
-    if(ax > ab2)     abig += internal::abs2(ax*s2m);
-    else if(ax < b1) asml += internal::abs2(ax*s1m);
-    else             amed += internal::abs2(ax);
+    RealScalar ax = abs(it.value());
+    if(ax > ab2)     abig += numext::abs2(ax*s2m);
+    else if(ax < b1) asml += numext::abs2(ax*s1m);
+    else             amed += numext::abs2(ax);
   }
   if(abig > RealScalar(0))
   {
-    abig = internal::sqrt(abig);
+    abig = sqrt(abig);
     if(abig > overfl)
     {
       return rbig;
@@ -135,7 +98,7 @@ MatrixBase<Derived>::blueNorm() const
     if(amed > RealScalar(0))
     {
       abig = abig/s2m;
-      amed = internal::sqrt(amed);
+      amed = sqrt(amed);
     }
     else
       return abig/s2m;
@@ -144,20 +107,70 @@ MatrixBase<Derived>::blueNorm() const
   {
     if (amed > RealScalar(0))
     {
-      abig = internal::sqrt(amed);
-      amed = internal::sqrt(asml) / s1m;
+      abig = sqrt(amed);
+      amed = sqrt(asml) / s1m;
     }
     else
-      return internal::sqrt(asml)/s1m;
+      return sqrt(asml)/s1m;
   }
   else
-    return internal::sqrt(amed);
+    return sqrt(amed);
   asml = (min)(abig, amed);
   abig = (max)(abig, amed);
   if(asml <= abig*relerr)
     return abig;
   else
-    return abig * internal::sqrt(RealScalar(1) + internal::abs2(asml/abig));
+    return abig * sqrt(RealScalar(1) + numext::abs2(asml/abig));
+}
+
+} // end namespace internal
+
+/** \returns the \em l2 norm of \c *this avoiding underflow and overflow.
+  * This version use a blockwise two passes algorithm:
+  *  1 - find the absolute largest coefficient \c s
+  *  2 - compute \f$ s \Vert \frac{*this}{s} \Vert \f$ in a standard way
+  *
+  * For architecture/scalar types supporting vectorization, this version
+  * is faster than blueNorm(). Otherwise the blueNorm() is much faster.
+  *
+  * \sa norm(), blueNorm(), hypotNorm()
+  */
+template<typename Derived>
+inline typename NumTraits<typename internal::traits<Derived>::Scalar>::Real
+MatrixBase<Derived>::stableNorm() const
+{
+  using std::min;
+  using std::sqrt;
+  const Index blockSize = 4096;
+  RealScalar scale(0);
+  RealScalar invScale(1);
+  RealScalar ssq(0); // sum of square
+  enum {
+    Alignment = (int(Flags)&DirectAccessBit) || (int(Flags)&AlignedBit) ? 1 : 0
+  };
+  Index n = size();
+  Index bi = internal::first_aligned(derived());
+  if (bi>0)
+    internal::stable_norm_kernel(this->head(bi), ssq, scale, invScale);
+  for (; bi<n; bi+=blockSize)
+    internal::stable_norm_kernel(this->segment(bi,(min)(blockSize, n - bi)).template forceAlignedAccessIf<Alignment>(), ssq, scale, invScale);
+  return scale * sqrt(ssq);
+}
+
+/** \returns the \em l2 norm of \c *this using the Blue's algorithm.
+  * A Portable Fortran Program to Find the Euclidean Norm of a Vector,
+  * ACM TOMS, Vol 4, Issue 1, 1978.
+  *
+  * For architecture/scalar types without vectorization, this version
+  * is much faster than stableNorm(). Otherwise the stableNorm() is faster.
+  *
+  * \sa norm(), stableNorm(), hypotNorm()
+  */
+template<typename Derived>
+inline typename NumTraits<typename internal::traits<Derived>::Scalar>::Real
+MatrixBase<Derived>::blueNorm() const
+{
+  return internal::blueNorm_impl(*this);
 }
 
 /** \returns the \em l2 norm of \c *this avoiding undeflow and overflow.
diff --git a/ThirdParty/eigen3/Eigen/src/Core/Swap.h b/ThirdParty/eigen3/Eigen/src/Core/Swap.h
index fd73cf3ad7e..bf58bd5997d 100644
--- a/ThirdParty/eigen3/Eigen/src/Core/Swap.h
+++ b/ThirdParty/eigen3/Eigen/src/Core/Swap.h
@@ -49,9 +49,9 @@ template<typename ExpressionType> class SwapWrapper
     inline ScalarWithConstIfNotLvalue* data() { return m_expression.data(); }
     inline const Scalar* data() const { return m_expression.data(); }
 
-    inline Scalar& coeffRef(Index row, Index col)
+    inline Scalar& coeffRef(Index rowId, Index colId)
     {
-      return m_expression.const_cast_derived().coeffRef(row, col);
+      return m_expression.const_cast_derived().coeffRef(rowId, colId);
     }
 
     inline Scalar& coeffRef(Index index)
@@ -59,9 +59,9 @@ template<typename ExpressionType> class SwapWrapper
       return m_expression.const_cast_derived().coeffRef(index);
     }
 
-    inline Scalar& coeffRef(Index row, Index col) const
+    inline Scalar& coeffRef(Index rowId, Index colId) const
     {
-      return m_expression.coeffRef(row, col);
+      return m_expression.coeffRef(rowId, colId);
     }
 
     inline Scalar& coeffRef(Index index) const
@@ -70,14 +70,14 @@ template<typename ExpressionType> class SwapWrapper
     }
 
     template<typename OtherDerived>
-    void copyCoeff(Index row, Index col, const DenseBase<OtherDerived>& other)
+    void copyCoeff(Index rowId, Index colId, const DenseBase<OtherDerived>& other)
     {
       OtherDerived& _other = other.const_cast_derived();
-      eigen_internal_assert(row >= 0 && row < rows()
-                         && col >= 0 && col < cols());
-      Scalar tmp = m_expression.coeff(row, col);
-      m_expression.coeffRef(row, col) = _other.coeff(row, col);
-      _other.coeffRef(row, col) = tmp;
+      eigen_internal_assert(rowId >= 0 && rowId < rows()
+                         && colId >= 0 && colId < cols());
+      Scalar tmp = m_expression.coeff(rowId, colId);
+      m_expression.coeffRef(rowId, colId) = _other.coeff(rowId, colId);
+      _other.coeffRef(rowId, colId) = tmp;
     }
 
     template<typename OtherDerived>
@@ -91,16 +91,16 @@ template<typename ExpressionType> class SwapWrapper
     }
 
     template<typename OtherDerived, int StoreMode, int LoadMode>
-    void copyPacket(Index row, Index col, const DenseBase<OtherDerived>& other)
+    void copyPacket(Index rowId, Index colId, const DenseBase<OtherDerived>& other)
     {
       OtherDerived& _other = other.const_cast_derived();
-      eigen_internal_assert(row >= 0 && row < rows()
-                        && col >= 0 && col < cols());
-      Packet tmp = m_expression.template packet<StoreMode>(row, col);
-      m_expression.template writePacket<StoreMode>(row, col,
-        _other.template packet<LoadMode>(row, col)
+      eigen_internal_assert(rowId >= 0 && rowId < rows()
+                        && colId >= 0 && colId < cols());
+      Packet tmp = m_expression.template packet<StoreMode>(rowId, colId);
+      m_expression.template writePacket<StoreMode>(rowId, colId,
+        _other.template packet<LoadMode>(rowId, colId)
       );
-      _other.template writePacket<LoadMode>(row, col, tmp);
+      _other.template writePacket<LoadMode>(rowId, colId, tmp);
     }
 
     template<typename OtherDerived, int StoreMode, int LoadMode>
diff --git a/ThirdParty/eigen3/Eigen/src/Core/Transpose.h b/ThirdParty/eigen3/Eigen/src/Core/Transpose.h
index ee25497770e..f21b3aa65f5 100644
--- a/ThirdParty/eigen3/Eigen/src/Core/Transpose.h
+++ b/ThirdParty/eigen3/Eigen/src/Core/Transpose.h
@@ -62,7 +62,7 @@ template<typename MatrixType> class Transpose
     typedef typename TransposeImpl<MatrixType,typename internal::traits<MatrixType>::StorageKind>::Base Base;
     EIGEN_GENERIC_PUBLIC_INTERFACE(Transpose)
 
-    inline Transpose(MatrixType& matrix) : m_matrix(matrix) {}
+    inline Transpose(MatrixType& a_matrix) : m_matrix(a_matrix) {}
 
     EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Transpose)
 
@@ -104,6 +104,7 @@ template<typename MatrixType> class TransposeImpl<MatrixType,Dense>
 
     typedef typename internal::TransposeImpl_base<MatrixType>::type Base;
     EIGEN_DENSE_PUBLIC_INTERFACE(Transpose<MatrixType>)
+    EIGEN_INHERIT_ASSIGNMENT_OPERATORS(TransposeImpl)
 
     inline Index innerStride() const { return derived().nestedExpression().innerStride(); }
     inline Index outerStride() const { return derived().nestedExpression().outerStride(); }
@@ -117,10 +118,10 @@ template<typename MatrixType> class TransposeImpl<MatrixType,Dense>
     inline ScalarWithConstIfNotLvalue* data() { return derived().nestedExpression().data(); }
     inline const Scalar* data() const { return derived().nestedExpression().data(); }
 
-    inline ScalarWithConstIfNotLvalue& coeffRef(Index row, Index col)
+    inline ScalarWithConstIfNotLvalue& coeffRef(Index rowId, Index colId)
     {
       EIGEN_STATIC_ASSERT_LVALUE(MatrixType)
-      return derived().nestedExpression().const_cast_derived().coeffRef(col, row);
+      return derived().nestedExpression().const_cast_derived().coeffRef(colId, rowId);
     }
 
     inline ScalarWithConstIfNotLvalue& coeffRef(Index index)
@@ -129,9 +130,9 @@ template<typename MatrixType> class TransposeImpl<MatrixType,Dense>
       return derived().nestedExpression().const_cast_derived().coeffRef(index);
     }
 
-    inline const Scalar& coeffRef(Index row, Index col) const
+    inline const Scalar& coeffRef(Index rowId, Index colId) const
     {
-      return derived().nestedExpression().coeffRef(col, row);
+      return derived().nestedExpression().coeffRef(colId, rowId);
     }
 
     inline const Scalar& coeffRef(Index index) const
@@ -139,9 +140,9 @@ template<typename MatrixType> class TransposeImpl<MatrixType,Dense>
       return derived().nestedExpression().coeffRef(index);
     }
 
-    inline CoeffReturnType coeff(Index row, Index col) const
+    inline CoeffReturnType coeff(Index rowId, Index colId) const
     {
-      return derived().nestedExpression().coeff(col, row);
+      return derived().nestedExpression().coeff(colId, rowId);
     }
 
     inline CoeffReturnType coeff(Index index) const
@@ -150,15 +151,15 @@ template<typename MatrixType> class TransposeImpl<MatrixType,Dense>
     }
 
     template<int LoadMode>
-    inline const PacketScalar packet(Index row, Index col) const
+    inline const PacketScalar packet(Index rowId, Index colId) const
     {
-      return derived().nestedExpression().template packet<LoadMode>(col, row);
+      return derived().nestedExpression().template packet<LoadMode>(colId, rowId);
     }
 
     template<int LoadMode>
-    inline void writePacket(Index row, Index col, const PacketScalar& x)
+    inline void writePacket(Index rowId, Index colId, const PacketScalar& x)
     {
-      derived().nestedExpression().const_cast_derived().template writePacket<LoadMode>(col, row, x);
+      derived().nestedExpression().const_cast_derived().template writePacket<LoadMode>(colId, rowId, x);
     }
 
     template<int LoadMode>
@@ -206,7 +207,7 @@ DenseBase<Derived>::transpose()
   *
   * \sa transposeInPlace(), adjoint() */
 template<typename Derived>
-inline const typename DenseBase<Derived>::ConstTransposeReturnType
+inline typename DenseBase<Derived>::ConstTransposeReturnType
 DenseBase<Derived>::transpose() const
 {
   return ConstTransposeReturnType(derived());
@@ -252,7 +253,7 @@ struct inplace_transpose_selector;
 template<typename MatrixType>
 struct inplace_transpose_selector<MatrixType,true> { // square matrix
   static void run(MatrixType& m) {
-    m.template triangularView<StrictlyUpper>().swap(m.transpose());
+    m.matrix().template triangularView<StrictlyUpper>().swap(m.matrix().transpose());
   }
 };
 
@@ -260,7 +261,7 @@ template<typename MatrixType>
 struct inplace_transpose_selector<MatrixType,false> { // non square matrix
   static void run(MatrixType& m) {
     if (m.rows()==m.cols())
-      m.template triangularView<StrictlyUpper>().swap(m.transpose());
+      m.matrix().template triangularView<StrictlyUpper>().swap(m.matrix().transpose());
     else
       m = m.transpose().eval();
   }
@@ -278,7 +279,7 @@ struct inplace_transpose_selector<MatrixType,false> { // non square matrix
   * m = m.transpose().eval();
   * \endcode
   * and is faster and also safer because in the latter line of code, forgetting the eval() results
-  * in a bug caused by aliasing.
+  * in a bug caused by \ref TopicAliasing "aliasing".
   *
   * Notice however that this method is only useful if you want to replace a matrix by its own transpose.
   * If you just need the transpose of a matrix, use transpose().
@@ -289,6 +290,8 @@ struct inplace_transpose_selector<MatrixType,false> { // non square matrix
 template<typename Derived>
 inline void DenseBase<Derived>::transposeInPlace()
 {
+  eigen_assert((rows() == cols() || (RowsAtCompileTime == Dynamic && ColsAtCompileTime == Dynamic))
+               && "transposeInPlace() called on a non-square non-resizable matrix");
   internal::inplace_transpose_selector<Derived>::run(derived());
 }
 
@@ -385,7 +388,7 @@ struct checkTransposeAliasing_impl
         eigen_assert((!check_transpose_aliasing_run_time_selector
                       <typename Derived::Scalar,blas_traits<Derived>::IsTransposed,OtherDerived>
                       ::run(extract_data(dst), other))
-          && "aliasing detected during tranposition, use transposeInPlace() "
+          && "aliasing detected during transposition, use transposeInPlace() "
              "or evaluate the rhs into a temporary using .eval()");
 
     }
diff --git a/ThirdParty/eigen3/Eigen/src/Core/Transpositions.h b/ThirdParty/eigen3/Eigen/src/Core/Transpositions.h
index 2cd268a5fa0..e4ba0756fa6 100644
--- a/ThirdParty/eigen3/Eigen/src/Core/Transpositions.h
+++ b/ThirdParty/eigen3/Eigen/src/Core/Transpositions.h
@@ -99,9 +99,9 @@ class TranspositionsBase
     IndicesType& indices() { return derived().indices(); }
 
     /** Resizes to given size. */
-    inline void resize(int size)
+    inline void resize(int newSize)
     {
-      indices().resize(size);
+      indices().resize(newSize);
     }
 
     /** Sets \c *this to represents an identity transformation */
@@ -177,7 +177,7 @@ class Transpositions : public TranspositionsBase<Transpositions<SizeAtCompileTim
 
     /** Generic constructor from expression of the transposition indices. */
     template<typename Other>
-    explicit inline Transpositions(const MatrixBase<Other>& indices) : m_indices(indices)
+    explicit inline Transpositions(const MatrixBase<Other>& a_indices) : m_indices(a_indices)
     {}
 
     /** Copies the \a other transpositions into \c *this */
@@ -234,12 +234,12 @@ class Map<Transpositions<SizeAtCompileTime,MaxSizeAtCompileTime,IndexType>,Packe
     typedef typename Traits::IndicesType IndicesType;
     typedef typename IndicesType::Scalar Index;
 
-    inline Map(const Index* indices)
-      : m_indices(indices)
+    inline Map(const Index* indicesPtr)
+      : m_indices(indicesPtr)
     {}
 
-    inline Map(const Index* indices, Index size)
-      : m_indices(indices,size)
+    inline Map(const Index* indicesPtr, Index size)
+      : m_indices(indicesPtr,size)
     {}
 
     /** Copies the \a other transpositions into \c *this */
@@ -291,8 +291,8 @@ class TranspositionsWrapper
     typedef typename Traits::IndicesType IndicesType;
     typedef typename IndicesType::Scalar Index;
 
-    inline TranspositionsWrapper(IndicesType& indices)
-      : m_indices(indices)
+    inline TranspositionsWrapper(IndicesType& a_indices)
+      : m_indices(a_indices)
     {}
 
     /** Copies the \a other transpositions into \c *this */
diff --git a/ThirdParty/eigen3/Eigen/src/Core/TriangularMatrix.h b/ThirdParty/eigen3/Eigen/src/Core/TriangularMatrix.h
index 301b0ef2434..fba07365f6f 100644
--- a/ThirdParty/eigen3/Eigen/src/Core/TriangularMatrix.h
+++ b/ThirdParty/eigen3/Eigen/src/Core/TriangularMatrix.h
@@ -779,22 +779,23 @@ MatrixBase<Derived>::triangularView() const
   * \sa isLowerTriangular()
   */
 template<typename Derived>
-bool MatrixBase<Derived>::isUpperTriangular(RealScalar prec) const
+bool MatrixBase<Derived>::isUpperTriangular(const RealScalar& prec) const
 {
+  using std::abs;
   RealScalar maxAbsOnUpperPart = static_cast<RealScalar>(-1);
   for(Index j = 0; j < cols(); ++j)
   {
     Index maxi = (std::min)(j, rows()-1);
     for(Index i = 0; i <= maxi; ++i)
     {
-      RealScalar absValue = internal::abs(coeff(i,j));
+      RealScalar absValue = abs(coeff(i,j));
       if(absValue > maxAbsOnUpperPart) maxAbsOnUpperPart = absValue;
     }
   }
   RealScalar threshold = maxAbsOnUpperPart * prec;
   for(Index j = 0; j < cols(); ++j)
     for(Index i = j+1; i < rows(); ++i)
-      if(internal::abs(coeff(i, j)) > threshold) return false;
+      if(abs(coeff(i, j)) > threshold) return false;
   return true;
 }
 
@@ -804,13 +805,14 @@ bool MatrixBase<Derived>::isUpperTriangular(RealScalar prec) const
   * \sa isUpperTriangular()
   */
 template<typename Derived>
-bool MatrixBase<Derived>::isLowerTriangular(RealScalar prec) const
+bool MatrixBase<Derived>::isLowerTriangular(const RealScalar& prec) const
 {
+  using std::abs;
   RealScalar maxAbsOnLowerPart = static_cast<RealScalar>(-1);
   for(Index j = 0; j < cols(); ++j)
     for(Index i = j; i < rows(); ++i)
     {
-      RealScalar absValue = internal::abs(coeff(i,j));
+      RealScalar absValue = abs(coeff(i,j));
       if(absValue > maxAbsOnLowerPart) maxAbsOnLowerPart = absValue;
     }
   RealScalar threshold = maxAbsOnLowerPart * prec;
@@ -818,7 +820,7 @@ bool MatrixBase<Derived>::isLowerTriangular(RealScalar prec) const
   {
     Index maxi = (std::min)(j, rows()-1);
     for(Index i = 0; i < maxi; ++i)
-      if(internal::abs(coeff(i, j)) > threshold) return false;
+      if(abs(coeff(i, j)) > threshold) return false;
   }
   return true;
 }
diff --git a/ThirdParty/eigen3/Eigen/src/Core/VectorBlock.h b/ThirdParty/eigen3/Eigen/src/Core/VectorBlock.h
index 6f4effca055..1a7330f3cfc 100644
--- a/ThirdParty/eigen3/Eigen/src/Core/VectorBlock.h
+++ b/ThirdParty/eigen3/Eigen/src/Core/VectorBlock.h
@@ -90,195 +90,6 @@ template<typename VectorType, int Size> class VectorBlock
 };
 
 
-/** \returns a dynamic-size expression of a segment (i.e. a vector block) in *this.
-  *
-  * \only_for_vectors
-  *
-  * \param start the first coefficient in the segment
-  * \param size the number of coefficients in the segment
-  *
-  * Example: \include MatrixBase_segment_int_int.cpp
-  * Output: \verbinclude MatrixBase_segment_int_int.out
-  *
-  * \note Even though the returned expression has dynamic size, in the case
-  * when it is applied to a fixed-size vector, it inherits a fixed maximal size,
-  * which means that evaluating it does not cause a dynamic memory allocation.
-  *
-  * \sa class Block, segment(Index)
-  */
-template<typename Derived>
-inline typename DenseBase<Derived>::SegmentReturnType
-DenseBase<Derived>::segment(Index start, Index size)
-{
-  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
-  return SegmentReturnType(derived(), start, size);
-}
-
-/** This is the const version of segment(Index,Index).*/
-template<typename Derived>
-inline typename DenseBase<Derived>::ConstSegmentReturnType
-DenseBase<Derived>::segment(Index start, Index size) const
-{
-  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
-  return ConstSegmentReturnType(derived(), start, size);
-}
-
-/** \returns a dynamic-size expression of the first coefficients of *this.
-  *
-  * \only_for_vectors
-  *
-  * \param size the number of coefficients in the block
-  *
-  * Example: \include MatrixBase_start_int.cpp
-  * Output: \verbinclude MatrixBase_start_int.out
-  *
-  * \note Even though the returned expression has dynamic size, in the case
-  * when it is applied to a fixed-size vector, it inherits a fixed maximal size,
-  * which means that evaluating it does not cause a dynamic memory allocation.
-  *
-  * \sa class Block, block(Index,Index)
-  */
-template<typename Derived>
-inline typename DenseBase<Derived>::SegmentReturnType
-DenseBase<Derived>::head(Index size)
-{
-  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
-  return SegmentReturnType(derived(), 0, size);
-}
-
-/** This is the const version of head(Index).*/
-template<typename Derived>
-inline typename DenseBase<Derived>::ConstSegmentReturnType
-DenseBase<Derived>::head(Index size) const
-{
-  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
-  return ConstSegmentReturnType(derived(), 0, size);
-}
-
-/** \returns a dynamic-size expression of the last coefficients of *this.
-  *
-  * \only_for_vectors
-  *
-  * \param size the number of coefficients in the block
-  *
-  * Example: \include MatrixBase_end_int.cpp
-  * Output: \verbinclude MatrixBase_end_int.out
-  *
-  * \note Even though the returned expression has dynamic size, in the case
-  * when it is applied to a fixed-size vector, it inherits a fixed maximal size,
-  * which means that evaluating it does not cause a dynamic memory allocation.
-  *
-  * \sa class Block, block(Index,Index)
-  */
-template<typename Derived>
-inline typename DenseBase<Derived>::SegmentReturnType
-DenseBase<Derived>::tail(Index size)
-{
-  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
-  return SegmentReturnType(derived(), this->size() - size, size);
-}
-
-/** This is the const version of tail(Index).*/
-template<typename Derived>
-inline typename DenseBase<Derived>::ConstSegmentReturnType
-DenseBase<Derived>::tail(Index size) const
-{
-  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
-  return ConstSegmentReturnType(derived(), this->size() - size, size);
-}
-
-/** \returns a fixed-size expression of a segment (i.e. a vector block) in \c *this
-  *
-  * \only_for_vectors
-  *
-  * The template parameter \a Size is the number of coefficients in the block
-  *
-  * \param start the index of the first element of the sub-vector
-  *
-  * Example: \include MatrixBase_template_int_segment.cpp
-  * Output: \verbinclude MatrixBase_template_int_segment.out
-  *
-  * \sa class Block
-  */
-template<typename Derived>
-template<int Size>
-inline typename DenseBase<Derived>::template FixedSegmentReturnType<Size>::Type
-DenseBase<Derived>::segment(Index start)
-{
-  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
-  return typename FixedSegmentReturnType<Size>::Type(derived(), start);
-}
-
-/** This is the const version of segment<int>(Index).*/
-template<typename Derived>
-template<int Size>
-inline typename DenseBase<Derived>::template ConstFixedSegmentReturnType<Size>::Type
-DenseBase<Derived>::segment(Index start) const
-{
-  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
-  return typename ConstFixedSegmentReturnType<Size>::Type(derived(), start);
-}
-
-/** \returns a fixed-size expression of the first coefficients of *this.
-  *
-  * \only_for_vectors
-  *
-  * The template parameter \a Size is the number of coefficients in the block
-  *
-  * Example: \include MatrixBase_template_int_start.cpp
-  * Output: \verbinclude MatrixBase_template_int_start.out
-  *
-  * \sa class Block
-  */
-template<typename Derived>
-template<int Size>
-inline typename DenseBase<Derived>::template FixedSegmentReturnType<Size>::Type
-DenseBase<Derived>::head()
-{
-  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
-  return typename FixedSegmentReturnType<Size>::Type(derived(), 0);
-}
-
-/** This is the const version of head<int>().*/
-template<typename Derived>
-template<int Size>
-inline typename DenseBase<Derived>::template ConstFixedSegmentReturnType<Size>::Type
-DenseBase<Derived>::head() const
-{
-  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
-  return typename ConstFixedSegmentReturnType<Size>::Type(derived(), 0);
-}
-
-/** \returns a fixed-size expression of the last coefficients of *this.
-  *
-  * \only_for_vectors
-  *
-  * The template parameter \a Size is the number of coefficients in the block
-  *
-  * Example: \include MatrixBase_template_int_end.cpp
-  * Output: \verbinclude MatrixBase_template_int_end.out
-  *
-  * \sa class Block
-  */
-template<typename Derived>
-template<int Size>
-inline typename DenseBase<Derived>::template FixedSegmentReturnType<Size>::Type
-DenseBase<Derived>::tail()
-{
-  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
-  return typename FixedSegmentReturnType<Size>::Type(derived(), size() - Size);
-}
-
-/** This is the const version of tail<int>.*/
-template<typename Derived>
-template<int Size>
-inline typename DenseBase<Derived>::template ConstFixedSegmentReturnType<Size>::Type
-DenseBase<Derived>::tail() const
-{
-  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
-  return typename ConstFixedSegmentReturnType<Size>::Type(derived(), size() - Size);
-}
-
 } // end namespace Eigen
 
 #endif // EIGEN_VECTORBLOCK_H
diff --git a/ThirdParty/eigen3/Eigen/src/Core/VectorwiseOp.h b/ThirdParty/eigen3/Eigen/src/Core/VectorwiseOp.h
index 862c0f33608..511564875a9 100644
--- a/ThirdParty/eigen3/Eigen/src/Core/VectorwiseOp.h
+++ b/ThirdParty/eigen3/Eigen/src/Core/VectorwiseOp.h
@@ -103,8 +103,8 @@ class PartialReduxExpr : internal::no_assignment_operator,
 
 #define EIGEN_MEMBER_FUNCTOR(MEMBER,COST)                               \
   template <typename ResultType>                                        \
-  struct member_##MEMBER {                                           \
-    EIGEN_EMPTY_STRUCT_CTOR(member_##MEMBER)                         \
+  struct member_##MEMBER {                                              \
+    EIGEN_EMPTY_STRUCT_CTOR(member_##MEMBER)                            \
     typedef ResultType result_type;                                     \
     template<typename Scalar, int Size> struct Cost                     \
     { enum { value = COST }; };                                         \
@@ -233,6 +233,28 @@ template<typename ExpressionType, int Direction> class VectorwiseOp
                        Direction==Vertical   ? 1 : m_matrix.rows(),
                        Direction==Horizontal ? 1 : m_matrix.cols());
     }
+    
+    template<typename OtherDerived> struct OppositeExtendedType {
+      typedef Replicate<OtherDerived,
+                        Direction==Horizontal ? 1 : ExpressionType::RowsAtCompileTime,
+                        Direction==Vertical   ? 1 : ExpressionType::ColsAtCompileTime> Type;
+    };
+
+    /** \internal
+      * Replicates a vector in the opposite direction to match the size of \c *this */
+    template<typename OtherDerived>
+    typename OppositeExtendedType<OtherDerived>::Type
+    extendedToOpposite(const DenseBase<OtherDerived>& other) const
+    {
+      EIGEN_STATIC_ASSERT(EIGEN_IMPLIES(Direction==Horizontal, OtherDerived::MaxColsAtCompileTime==1),
+                          YOU_PASSED_A_ROW_VECTOR_BUT_A_COLUMN_VECTOR_WAS_EXPECTED)
+      EIGEN_STATIC_ASSERT(EIGEN_IMPLIES(Direction==Vertical, OtherDerived::MaxRowsAtCompileTime==1),
+                          YOU_PASSED_A_COLUMN_VECTOR_BUT_A_ROW_VECTOR_WAS_EXPECTED)
+      return typename OppositeExtendedType<OtherDerived>::Type
+                      (other.derived(),
+                       Direction==Horizontal  ? 1 : m_matrix.rows(),
+                       Direction==Vertical    ? 1 : m_matrix.cols());
+    }
 
   public:
 
@@ -255,6 +277,8 @@ template<typename ExpressionType, int Direction> class VectorwiseOp
 
     /** \returns a row (or column) vector expression of the smallest coefficient
       * of each column (or row) of the referenced expression.
+      * 
+      * \warning the result is undefined if \c *this contains NaN.
       *
       * Example: \include PartialRedux_minCoeff.cpp
       * Output: \verbinclude PartialRedux_minCoeff.out
@@ -265,6 +289,8 @@ template<typename ExpressionType, int Direction> class VectorwiseOp
 
     /** \returns a row (or column) vector expression of the largest coefficient
       * of each column (or row) of the referenced expression.
+      * 
+      * \warning the result is undefined if \c *this contains NaN.
       *
       * Example: \include PartialRedux_maxCoeff.cpp
       * Output: \verbinclude PartialRedux_maxCoeff.out
@@ -504,6 +530,23 @@ template<typename ExpressionType, int Direction> class VectorwiseOp
       EIGEN_STATIC_ASSERT_SAME_XPR_KIND(ExpressionType, OtherDerived)
       return m_matrix / extendedTo(other.derived());
     }
+    
+    /** \returns an expression where each column of row of the referenced matrix are normalized.
+      * The referenced matrix is \b not modified.
+      * \sa MatrixBase::normalized(), normalize()
+      */
+    CwiseBinaryOp<internal::scalar_quotient_op<Scalar>,
+                  const ExpressionTypeNestedCleaned,
+                  const typename OppositeExtendedType<typename ReturnType<internal::member_norm,RealScalar>::Type>::Type>
+    normalized() const { return m_matrix.cwiseQuotient(extendedToOpposite(this->norm())); }
+    
+    
+    /** Normalize in-place each row or columns of the referenced matrix.
+      * \sa MatrixBase::normalize(), normalized()
+      */
+    void normalize() {
+      m_matrix = this->normalized();
+    }
 
 /////////// Geometry module ///////////
 
diff --git a/ThirdParty/eigen3/Eigen/src/Core/Visitor.h b/ThirdParty/eigen3/Eigen/src/Core/Visitor.h
index 916bfd096a9..64867b7a2cb 100644
--- a/ThirdParty/eigen3/Eigen/src/Core/Visitor.h
+++ b/ThirdParty/eigen3/Eigen/src/Core/Visitor.h
@@ -164,25 +164,25 @@ struct functor_traits<max_coeff_visitor<Scalar> > {
 
 } // end namespace internal
 
-/** \returns the minimum of all coefficients of *this
-  * and puts in *row and *col its location.
+/** \returns the minimum of all coefficients of *this and puts in *row and *col its location.
+  * \warning the result is undefined if \c *this contains NaN.
   *
   * \sa DenseBase::minCoeff(Index*), DenseBase::maxCoeff(Index*,Index*), DenseBase::visitor(), DenseBase::minCoeff()
   */
 template<typename Derived>
 template<typename IndexType>
 typename internal::traits<Derived>::Scalar
-DenseBase<Derived>::minCoeff(IndexType* row, IndexType* col) const
+DenseBase<Derived>::minCoeff(IndexType* rowId, IndexType* colId) const
 {
   internal::min_coeff_visitor<Derived> minVisitor;
   this->visit(minVisitor);
-  *row = minVisitor.row;
-  if (col) *col = minVisitor.col;
+  *rowId = minVisitor.row;
+  if (colId) *colId = minVisitor.col;
   return minVisitor.res;
 }
 
-/** \returns the minimum of all coefficients of *this
-  * and puts in *index its location.
+/** \returns the minimum of all coefficients of *this and puts in *index its location.
+  * \warning the result is undefined if \c *this contains NaN. 
   *
   * \sa DenseBase::minCoeff(IndexType*,IndexType*), DenseBase::maxCoeff(IndexType*,IndexType*), DenseBase::visitor(), DenseBase::minCoeff()
   */
@@ -198,25 +198,25 @@ DenseBase<Derived>::minCoeff(IndexType* index) const
   return minVisitor.res;
 }
 
-/** \returns the maximum of all coefficients of *this
-  * and puts in *row and *col its location.
+/** \returns the maximum of all coefficients of *this and puts in *row and *col its location.
+  * \warning the result is undefined if \c *this contains NaN. 
   *
   * \sa DenseBase::minCoeff(IndexType*,IndexType*), DenseBase::visitor(), DenseBase::maxCoeff()
   */
 template<typename Derived>
 template<typename IndexType>
 typename internal::traits<Derived>::Scalar
-DenseBase<Derived>::maxCoeff(IndexType* row, IndexType* col) const
+DenseBase<Derived>::maxCoeff(IndexType* rowPtr, IndexType* colPtr) const
 {
   internal::max_coeff_visitor<Derived> maxVisitor;
   this->visit(maxVisitor);
-  *row = maxVisitor.row;
-  if (col) *col = maxVisitor.col;
+  *rowPtr = maxVisitor.row;
+  if (colPtr) *colPtr = maxVisitor.col;
   return maxVisitor.res;
 }
 
-/** \returns the maximum of all coefficients of *this
-  * and puts in *index its location.
+/** \returns the maximum of all coefficients of *this and puts in *index its location.
+  * \warning the result is undefined if \c *this contains NaN.
   *
   * \sa DenseBase::maxCoeff(IndexType*,IndexType*), DenseBase::minCoeff(IndexType*,IndexType*), DenseBase::visitor(), DenseBase::maxCoeff()
   */
diff --git a/ThirdParty/eigen3/Eigen/src/Core/arch/AltiVec/PacketMath.h b/ThirdParty/eigen3/Eigen/src/Core/arch/AltiVec/PacketMath.h
index 75de1931198..e4089962dea 100644
--- a/ThirdParty/eigen3/Eigen/src/Core/arch/AltiVec/PacketMath.h
+++ b/ThirdParty/eigen3/Eigen/src/Core/arch/AltiVec/PacketMath.h
@@ -173,6 +173,9 @@ template<> EIGEN_STRONG_INLINE Packet4i psub<Packet4i>(const Packet4i& a, const
 template<> EIGEN_STRONG_INLINE Packet4f pnegate(const Packet4f& a) { return psub<Packet4f>(p4f_ZERO, a); }
 template<> EIGEN_STRONG_INLINE Packet4i pnegate(const Packet4i& a) { return psub<Packet4i>(p4i_ZERO, a); }
 
+template<> EIGEN_STRONG_INLINE Packet4f pconj(const Packet4f& a) { return a; }
+template<> EIGEN_STRONG_INLINE Packet4i pconj(const Packet4i& a) { return a; }
+
 template<> EIGEN_STRONG_INLINE Packet4f pmul<Packet4f>(const Packet4f& a, const Packet4f& b) { return vec_madd(a,b,p4f_ZERO); }
 /* Commented out: it's actually slower than processing it scalar
  *
diff --git a/ThirdParty/eigen3/Eigen/src/Core/arch/NEON/PacketMath.h b/ThirdParty/eigen3/Eigen/src/Core/arch/NEON/PacketMath.h
index a20250f7c65..163bac215e6 100644
--- a/ThirdParty/eigen3/Eigen/src/Core/arch/NEON/PacketMath.h
+++ b/ThirdParty/eigen3/Eigen/src/Core/arch/NEON/PacketMath.h
@@ -115,6 +115,9 @@ template<> EIGEN_STRONG_INLINE Packet4i psub<Packet4i>(const Packet4i& a, const
 template<> EIGEN_STRONG_INLINE Packet4f pnegate(const Packet4f& a) { return vnegq_f32(a); }
 template<> EIGEN_STRONG_INLINE Packet4i pnegate(const Packet4i& a) { return vnegq_s32(a); }
 
+template<> EIGEN_STRONG_INLINE Packet4f pconj(const Packet4f& a) { return a; }
+template<> EIGEN_STRONG_INLINE Packet4i pconj(const Packet4i& a) { return a; }
+
 template<> EIGEN_STRONG_INLINE Packet4f pmul<Packet4f>(const Packet4f& a, const Packet4f& b) { return vmulq_f32(a,b); }
 template<> EIGEN_STRONG_INLINE Packet4i pmul<Packet4i>(const Packet4i& a, const Packet4i& b) { return vmulq_s32(a,b); }
 
@@ -188,15 +191,15 @@ template<> EIGEN_STRONG_INLINE Packet4i ploadu<Packet4i>(const int* from)   { EI
 template<> EIGEN_STRONG_INLINE Packet4f ploaddup<Packet4f>(const float*   from)
 {
   float32x2_t lo, hi;
-  lo = vdup_n_f32(*from);
-  hi = vdup_n_f32(*(from+1));
+  lo = vld1_dup_f32(from);
+  hi = vld1_dup_f32(from+1);
   return vcombine_f32(lo, hi);
 }
 template<> EIGEN_STRONG_INLINE Packet4i ploaddup<Packet4i>(const int*     from)
 {
   int32x2_t lo, hi;
-  lo = vdup_n_s32(*from);
-  hi = vdup_n_s32(*(from+1));
+  lo = vld1_dup_s32(from);
+  hi = vld1_dup_s32(from+1);
   return vcombine_s32(lo, hi);
 }
 
@@ -237,15 +240,12 @@ template<> EIGEN_STRONG_INLINE Packet4i pabs(const Packet4i& a) { return vabsq_s
 template<> EIGEN_STRONG_INLINE float predux<Packet4f>(const Packet4f& a)
 {
   float32x2_t a_lo, a_hi, sum;
-  float s[2];
 
   a_lo = vget_low_f32(a);
   a_hi = vget_high_f32(a);
   sum = vpadd_f32(a_lo, a_hi);
   sum = vpadd_f32(sum, sum);
-  vst1_f32(s, sum);
-
-  return s[0];
+  return vget_lane_f32(sum, 0);
 }
 
 template<> EIGEN_STRONG_INLINE Packet4f preduxp<Packet4f>(const Packet4f* vecs)
@@ -271,15 +271,12 @@ template<> EIGEN_STRONG_INLINE Packet4f preduxp<Packet4f>(const Packet4f* vecs)
 template<> EIGEN_STRONG_INLINE int predux<Packet4i>(const Packet4i& a)
 {
   int32x2_t a_lo, a_hi, sum;
-  int32_t s[2];
 
   a_lo = vget_low_s32(a);
   a_hi = vget_high_s32(a);
   sum = vpadd_s32(a_lo, a_hi);
   sum = vpadd_s32(sum, sum);
-  vst1_s32(s, sum);
-
-  return s[0];
+  return vget_lane_s32(sum, 0);
 }
 
 template<> EIGEN_STRONG_INLINE Packet4i preduxp<Packet4i>(const Packet4i* vecs)
@@ -307,7 +304,6 @@ template<> EIGEN_STRONG_INLINE Packet4i preduxp<Packet4i>(const Packet4i* vecs)
 template<> EIGEN_STRONG_INLINE float predux_mul<Packet4f>(const Packet4f& a)
 {
   float32x2_t a_lo, a_hi, prod;
-  float s[2];
 
   // Get a_lo = |a1|a2| and a_hi = |a3|a4|
   a_lo = vget_low_f32(a);
@@ -316,14 +312,12 @@ template<> EIGEN_STRONG_INLINE float predux_mul<Packet4f>(const Packet4f& a)
   prod = vmul_f32(a_lo, a_hi);
   // Multiply prod with its swapped value |a2*a4|a1*a3|
   prod = vmul_f32(prod, vrev64_f32(prod));
-  vst1_f32(s, prod);
 
-  return s[0];
+  return vget_lane_f32(prod, 0);
 }
 template<> EIGEN_STRONG_INLINE int predux_mul<Packet4i>(const Packet4i& a)
 {
   int32x2_t a_lo, a_hi, prod;
-  int32_t s[2];
 
   // Get a_lo = |a1|a2| and a_hi = |a3|a4|
   a_lo = vget_low_s32(a);
@@ -332,65 +326,57 @@ template<> EIGEN_STRONG_INLINE int predux_mul<Packet4i>(const Packet4i& a)
   prod = vmul_s32(a_lo, a_hi);
   // Multiply prod with its swapped value |a2*a4|a1*a3|
   prod = vmul_s32(prod, vrev64_s32(prod));
-  vst1_s32(s, prod);
 
-  return s[0];
+  return vget_lane_s32(prod, 0);
 }
 
 // min
 template<> EIGEN_STRONG_INLINE float predux_min<Packet4f>(const Packet4f& a)
 {
   float32x2_t a_lo, a_hi, min;
-  float s[2];
 
   a_lo = vget_low_f32(a);
   a_hi = vget_high_f32(a);
   min = vpmin_f32(a_lo, a_hi);
   min = vpmin_f32(min, min);
-  vst1_f32(s, min);
 
-  return s[0];
+  return vget_lane_f32(min, 0);
 }
+
 template<> EIGEN_STRONG_INLINE int predux_min<Packet4i>(const Packet4i& a)
 {
   int32x2_t a_lo, a_hi, min;
-  int32_t s[2];
 
   a_lo = vget_low_s32(a);
   a_hi = vget_high_s32(a);
   min = vpmin_s32(a_lo, a_hi);
   min = vpmin_s32(min, min);
-  vst1_s32(s, min);
-
-  return s[0];
+  
+  return vget_lane_s32(min, 0);
 }
 
 // max
 template<> EIGEN_STRONG_INLINE float predux_max<Packet4f>(const Packet4f& a)
 {
   float32x2_t a_lo, a_hi, max;
-  float s[2];
 
   a_lo = vget_low_f32(a);
   a_hi = vget_high_f32(a);
   max = vpmax_f32(a_lo, a_hi);
   max = vpmax_f32(max, max);
-  vst1_f32(s, max);
 
-  return s[0];
+  return vget_lane_f32(max, 0);
 }
+
 template<> EIGEN_STRONG_INLINE int predux_max<Packet4i>(const Packet4i& a)
 {
   int32x2_t a_lo, a_hi, max;
-  int32_t s[2];
 
   a_lo = vget_low_s32(a);
   a_hi = vget_high_s32(a);
   max = vpmax_s32(a_lo, a_hi);
-  max = vpmax_s32(max, max);
-  vst1_s32(s, max);
 
-  return s[0];
+  return vget_lane_s32(max, 0);
 }
 
 // this PALIGN_NEON business is to work around a bug in LLVM Clang 3.0 causing incorrect compilation errors,
diff --git a/ThirdParty/eigen3/Eigen/src/Core/arch/SSE/MathFunctions.h b/ThirdParty/eigen3/Eigen/src/Core/arch/SSE/MathFunctions.h
index d917b6f1d01..3376a984e5e 100644
--- a/ThirdParty/eigen3/Eigen/src/Core/arch/SSE/MathFunctions.h
+++ b/ThirdParty/eigen3/Eigen/src/Core/arch/SSE/MathFunctions.h
@@ -135,13 +135,16 @@ Packet4f pexp<Packet4f>(const Packet4f& _x)
   /* express exp(x) as exp(g + n*log(2)) */
   fx = pmadd(x, p4f_cephes_LOG2EF, p4f_half);
 
-  /* how to perform a floorf with SSE: just below */
+#ifdef EIGEN_VECTORIZE_SSE4_1
+  fx = _mm_floor_ps(fx);
+#else
   emm0 = _mm_cvttps_epi32(fx);
   tmp  = _mm_cvtepi32_ps(emm0);
   /* if greater, substract 1 */
   Packet4f mask = _mm_cmpgt_ps(tmp, fx);
   mask = _mm_and_ps(mask, p4f_1);
   fx = psub(tmp, mask);
+#endif
 
   tmp = pmul(fx, p4f_cephes_exp_C1);
   Packet4f z = pmul(fx, p4f_cephes_exp_C2);
@@ -165,6 +168,79 @@ Packet4f pexp<Packet4f>(const Packet4f& _x)
   emm0 = _mm_slli_epi32(emm0, 23);
   return pmul(y, _mm_castsi128_ps(emm0));
 }
+template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
+Packet2d pexp<Packet2d>(const Packet2d& _x)
+{
+  Packet2d x = _x;
+
+  _EIGEN_DECLARE_CONST_Packet2d(1 , 1.0);
+  _EIGEN_DECLARE_CONST_Packet2d(2 , 2.0);
+  _EIGEN_DECLARE_CONST_Packet2d(half, 0.5);
+
+  _EIGEN_DECLARE_CONST_Packet2d(exp_hi,  709.437);
+  _EIGEN_DECLARE_CONST_Packet2d(exp_lo, -709.436139303);
+
+  _EIGEN_DECLARE_CONST_Packet2d(cephes_LOG2EF, 1.4426950408889634073599);
+
+  _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_p0, 1.26177193074810590878e-4);
+  _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_p1, 3.02994407707441961300e-2);
+  _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_p2, 9.99999999999999999910e-1);
+
+  _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_q0, 3.00198505138664455042e-6);
+  _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_q1, 2.52448340349684104192e-3);
+  _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_q2, 2.27265548208155028766e-1);
+  _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_q3, 2.00000000000000000009e0);
+
+  _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_C1, 0.693145751953125);
+  _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_C2, 1.42860682030941723212e-6);
+  static const __m128i p4i_1023_0 = _mm_setr_epi32(1023, 1023, 0, 0);
+
+  Packet2d tmp = _mm_setzero_pd(), fx;
+  Packet4i emm0;
+
+  // clamp x
+  x = pmax(pmin(x, p2d_exp_hi), p2d_exp_lo);
+  /* express exp(x) as exp(g + n*log(2)) */
+  fx = pmadd(p2d_cephes_LOG2EF, x, p2d_half);
+
+#ifdef EIGEN_VECTORIZE_SSE4_1
+  fx = _mm_floor_pd(fx);
+#else
+  emm0 = _mm_cvttpd_epi32(fx);
+  tmp  = _mm_cvtepi32_pd(emm0);
+  /* if greater, substract 1 */
+  Packet2d mask = _mm_cmpgt_pd(tmp, fx);
+  mask = _mm_and_pd(mask, p2d_1);
+  fx = psub(tmp, mask);
+#endif
+
+  tmp = pmul(fx, p2d_cephes_exp_C1);
+  Packet2d z = pmul(fx, p2d_cephes_exp_C2);
+  x = psub(x, tmp);
+  x = psub(x, z);
+
+  Packet2d x2 = pmul(x,x);
+
+  Packet2d px = p2d_cephes_exp_p0;
+  px = pmadd(px, x2, p2d_cephes_exp_p1);
+  px = pmadd(px, x2, p2d_cephes_exp_p2);
+  px = pmul (px, x);
+
+  Packet2d qx = p2d_cephes_exp_q0;
+  qx = pmadd(qx, x2, p2d_cephes_exp_q1);
+  qx = pmadd(qx, x2, p2d_cephes_exp_q2);
+  qx = pmadd(qx, x2, p2d_cephes_exp_q3);
+
+  x = pdiv(px,psub(qx,px));
+  x = pmadd(p2d_2,x,p2d_1);
+
+  // build 2^n
+  emm0 = _mm_cvttpd_epi32(fx);
+  emm0 = _mm_add_epi32(emm0, p4i_1023_0);
+  emm0 = _mm_slli_epi32(emm0, 20);
+  emm0 = _mm_shuffle_epi32(emm0, _MM_SHUFFLE(1,2,0,3));
+  return pmul(x, _mm_castsi128_pd(emm0));
+}
 
 /* evaluation of 4 sines at onces, using SSE2 intrinsics.
 
@@ -374,7 +450,7 @@ Packet4f psqrt<Packet4f>(const Packet4f& _x)
   Packet4f half = pmul(_x, pset1<Packet4f>(.5f));
 
   /* select only the inverse sqrt of non-zero inputs */
-  Packet4f non_zero_mask = _mm_cmpgt_ps(_x, pset1<Packet4f>(std::numeric_limits<float>::epsilon()));
+  Packet4f non_zero_mask = _mm_cmpge_ps(_x, pset1<Packet4f>((std::numeric_limits<float>::min)()));
   Packet4f x = _mm_and_ps(non_zero_mask, _mm_rsqrt_ps(_x));
 
   x = pmul(x, psub(pset1<Packet4f>(1.5f), pmul(half, pmul(x,x))));
diff --git a/ThirdParty/eigen3/Eigen/src/Core/arch/SSE/PacketMath.h b/ThirdParty/eigen3/Eigen/src/Core/arch/SSE/PacketMath.h
index 10d9182190f..e256f4bac49 100644
--- a/ThirdParty/eigen3/Eigen/src/Core/arch/SSE/PacketMath.h
+++ b/ThirdParty/eigen3/Eigen/src/Core/arch/SSE/PacketMath.h
@@ -48,6 +48,9 @@ template<> struct is_arithmetic<__m128d> { enum { value = true }; };
 #define _EIGEN_DECLARE_CONST_Packet4f(NAME,X) \
   const Packet4f p4f_##NAME = pset1<Packet4f>(X)
 
+#define _EIGEN_DECLARE_CONST_Packet2d(NAME,X) \
+  const Packet2d p2d_##NAME = pset1<Packet2d>(X)
+
 #define _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(NAME,X) \
   const Packet4f p4f_##NAME = _mm_castsi128_ps(pset1<Packet4i>(X))
 
@@ -63,7 +66,7 @@ template<> struct packet_traits<float>  : default_packet_traits
     AlignedOnScalar = 1,
     size=4,
 
-    HasDiv    = 1,
+    HasDiv  = 1,
     HasSin  = EIGEN_FAST_MATH,
     HasCos  = EIGEN_FAST_MATH,
     HasLog  = 1,
@@ -79,7 +82,8 @@ template<> struct packet_traits<double> : default_packet_traits
     AlignedOnScalar = 1,
     size=2,
 
-    HasDiv    = 1
+    HasDiv  = 1,
+    HasExp  = 1
   };
 };
 template<> struct packet_traits<int>    : default_packet_traits
@@ -137,6 +141,10 @@ template<> EIGEN_STRONG_INLINE Packet4i pnegate(const Packet4i& a)
   return psub(_mm_setr_epi32(0,0,0,0), a);
 }
 
+template<> EIGEN_STRONG_INLINE Packet4f pconj(const Packet4f& a) { return a; }
+template<> EIGEN_STRONG_INLINE Packet2d pconj(const Packet2d& a) { return a; }
+template<> EIGEN_STRONG_INLINE Packet4i pconj(const Packet4i& a) { return a; }
+
 template<> EIGEN_STRONG_INLINE Packet4f pmul<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_mul_ps(a,b); }
 template<> EIGEN_STRONG_INLINE Packet2d pmul<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_mul_pd(a,b); }
 template<> EIGEN_STRONG_INLINE Packet4i pmul<Packet4i>(const Packet4i& a, const Packet4i& b)
@@ -169,18 +177,26 @@ template<> EIGEN_STRONG_INLINE Packet4f pmin<Packet4f>(const Packet4f& a, const
 template<> EIGEN_STRONG_INLINE Packet2d pmin<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_min_pd(a,b); }
 template<> EIGEN_STRONG_INLINE Packet4i pmin<Packet4i>(const Packet4i& a, const Packet4i& b)
 {
+#ifdef EIGEN_VECTORIZE_SSE4_1
+  return _mm_min_epi32(a,b);
+#else
   // after some bench, this version *is* faster than a scalar implementation
   Packet4i mask = _mm_cmplt_epi32(a,b);
   return _mm_or_si128(_mm_and_si128(mask,a),_mm_andnot_si128(mask,b));
+#endif
 }
 
 template<> EIGEN_STRONG_INLINE Packet4f pmax<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_max_ps(a,b); }
 template<> EIGEN_STRONG_INLINE Packet2d pmax<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_max_pd(a,b); }
 template<> EIGEN_STRONG_INLINE Packet4i pmax<Packet4i>(const Packet4i& a, const Packet4i& b)
 {
+#ifdef EIGEN_VECTORIZE_SSE4_1
+  return _mm_max_epi32(a,b);
+#else
   // after some bench, this version *is* faster than a scalar implementation
   Packet4i mask = _mm_cmpgt_epi32(a,b);
   return _mm_or_si128(_mm_and_si128(mask,a),_mm_andnot_si128(mask,b));
+#endif
 }
 
 template<> EIGEN_STRONG_INLINE Packet4f pand<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_and_ps(a,b); }
diff --git a/ThirdParty/eigen3/Eigen/src/Core/products/GeneralBlockPanelKernel.h b/ThirdParty/eigen3/Eigen/src/Core/products/GeneralBlockPanelKernel.h
index d018b079ef9..780fa74d3f7 100644
--- a/ThirdParty/eigen3/Eigen/src/Core/products/GeneralBlockPanelKernel.h
+++ b/ThirdParty/eigen3/Eigen/src/Core/products/GeneralBlockPanelKernel.h
@@ -527,9 +527,16 @@ struct gebp_kernel
     ResPacketSize = Traits::ResPacketSize
   };
 
-  EIGEN_DONT_INLINE EIGEN_FLATTEN_ATTRIB
+  EIGEN_DONT_INLINE
   void operator()(ResScalar* res, Index resStride, const LhsScalar* blockA, const RhsScalar* blockB, Index rows, Index depth, Index cols, ResScalar alpha,
-                  Index strideA=-1, Index strideB=-1, Index offsetA=0, Index offsetB=0, RhsScalar* unpackedB = 0)
+                  Index strideA=-1, Index strideB=-1, Index offsetA=0, Index offsetB=0, RhsScalar* unpackedB=0);
+};
+
+template<typename LhsScalar, typename RhsScalar, typename Index, int mr, int nr, bool ConjugateLhs, bool ConjugateRhs>
+EIGEN_DONT_INLINE
+void gebp_kernel<LhsScalar,RhsScalar,Index,mr,nr,ConjugateLhs,ConjugateRhs>
+  ::operator()(ResScalar* res, Index resStride, const LhsScalar* blockA, const RhsScalar* blockB, Index rows, Index depth, Index cols, ResScalar alpha,
+               Index strideA, Index strideB, Index offsetA, Index offsetB, RhsScalar* unpackedB)
   {
     Traits traits;
     
@@ -1089,7 +1096,7 @@ EIGEN_ASM_COMMENT("mybegin4");
       }
     }
   }
-};
+
 
 #undef CJMADD
 
@@ -1110,80 +1117,83 @@ EIGEN_ASM_COMMENT("mybegin4");
 template<typename Scalar, typename Index, int Pack1, int Pack2, int StorageOrder, bool Conjugate, bool PanelMode>
 struct gemm_pack_lhs
 {
-  EIGEN_DONT_INLINE void operator()(Scalar* blockA, const Scalar* EIGEN_RESTRICT _lhs, Index lhsStride, Index depth, Index rows,
-                  Index stride=0, Index offset=0)
+  EIGEN_DONT_INLINE void operator()(Scalar* blockA, const Scalar* EIGEN_RESTRICT _lhs, Index lhsStride, Index depth, Index rows, Index stride=0, Index offset=0);
+};
+
+template<typename Scalar, typename Index, int Pack1, int Pack2, int StorageOrder, bool Conjugate, bool PanelMode>
+EIGEN_DONT_INLINE void gemm_pack_lhs<Scalar, Index, Pack1, Pack2, StorageOrder, Conjugate, PanelMode>
+  ::operator()(Scalar* blockA, const Scalar* EIGEN_RESTRICT _lhs, Index lhsStride, Index depth, Index rows, Index stride, Index offset)
+{
+  typedef typename packet_traits<Scalar>::type Packet;
+  enum { PacketSize = packet_traits<Scalar>::size };
+
+  EIGEN_ASM_COMMENT("EIGEN PRODUCT PACK LHS");
+  eigen_assert(((!PanelMode) && stride==0 && offset==0) || (PanelMode && stride>=depth && offset<=stride));
+  eigen_assert( (StorageOrder==RowMajor) || ((Pack1%PacketSize)==0 && Pack1<=4*PacketSize) );
+  conj_if<NumTraits<Scalar>::IsComplex && Conjugate> cj;
+  const_blas_data_mapper<Scalar, Index, StorageOrder> lhs(_lhs,lhsStride);
+  Index count = 0;
+  Index peeled_mc = (rows/Pack1)*Pack1;
+  for(Index i=0; i<peeled_mc; i+=Pack1)
   {
-    typedef typename packet_traits<Scalar>::type Packet;
-    enum { PacketSize = packet_traits<Scalar>::size };
-
-    EIGEN_ASM_COMMENT("EIGEN PRODUCT PACK LHS");
-    eigen_assert(((!PanelMode) && stride==0 && offset==0) || (PanelMode && stride>=depth && offset<=stride));
-    eigen_assert( (StorageOrder==RowMajor) || ((Pack1%PacketSize)==0 && Pack1<=4*PacketSize) );
-    conj_if<NumTraits<Scalar>::IsComplex && Conjugate> cj;
-    const_blas_data_mapper<Scalar, Index, StorageOrder> lhs(_lhs,lhsStride);
-    Index count = 0;
-    Index peeled_mc = (rows/Pack1)*Pack1;
-    for(Index i=0; i<peeled_mc; i+=Pack1)
-    {
-      if(PanelMode) count += Pack1 * offset;
+    if(PanelMode) count += Pack1 * offset;
 
-      if(StorageOrder==ColMajor)
+    if(StorageOrder==ColMajor)
+    {
+      for(Index k=0; k<depth; k++)
       {
-        for(Index k=0; k<depth; k++)
-        {
-          Packet A, B, C, D;
-          if(Pack1>=1*PacketSize) A = ploadu<Packet>(&lhs(i+0*PacketSize, k));
-          if(Pack1>=2*PacketSize) B = ploadu<Packet>(&lhs(i+1*PacketSize, k));
-          if(Pack1>=3*PacketSize) C = ploadu<Packet>(&lhs(i+2*PacketSize, k));
-          if(Pack1>=4*PacketSize) D = ploadu<Packet>(&lhs(i+3*PacketSize, k));
-          if(Pack1>=1*PacketSize) { pstore(blockA+count, cj.pconj(A)); count+=PacketSize; }
-          if(Pack1>=2*PacketSize) { pstore(blockA+count, cj.pconj(B)); count+=PacketSize; }
-          if(Pack1>=3*PacketSize) { pstore(blockA+count, cj.pconj(C)); count+=PacketSize; }
-          if(Pack1>=4*PacketSize) { pstore(blockA+count, cj.pconj(D)); count+=PacketSize; }
-        }
+        Packet A, B, C, D;
+        if(Pack1>=1*PacketSize) A = ploadu<Packet>(&lhs(i+0*PacketSize, k));
+        if(Pack1>=2*PacketSize) B = ploadu<Packet>(&lhs(i+1*PacketSize, k));
+        if(Pack1>=3*PacketSize) C = ploadu<Packet>(&lhs(i+2*PacketSize, k));
+        if(Pack1>=4*PacketSize) D = ploadu<Packet>(&lhs(i+3*PacketSize, k));
+        if(Pack1>=1*PacketSize) { pstore(blockA+count, cj.pconj(A)); count+=PacketSize; }
+        if(Pack1>=2*PacketSize) { pstore(blockA+count, cj.pconj(B)); count+=PacketSize; }
+        if(Pack1>=3*PacketSize) { pstore(blockA+count, cj.pconj(C)); count+=PacketSize; }
+        if(Pack1>=4*PacketSize) { pstore(blockA+count, cj.pconj(D)); count+=PacketSize; }
       }
-      else
+    }
+    else
+    {
+      for(Index k=0; k<depth; k++)
       {
-        for(Index k=0; k<depth; k++)
+        // TODO add a vectorized transpose here
+        Index w=0;
+        for(; w<Pack1-3; w+=4)
         {
-          // TODO add a vectorized transpose here
-          Index w=0;
-          for(; w<Pack1-3; w+=4)
-          {
-            Scalar a(cj(lhs(i+w+0, k))),
-                   b(cj(lhs(i+w+1, k))),
-                   c(cj(lhs(i+w+2, k))),
-                   d(cj(lhs(i+w+3, k)));
-            blockA[count++] = a;
-            blockA[count++] = b;
-            blockA[count++] = c;
-            blockA[count++] = d;
-          }
-          if(Pack1%4)
-            for(;w<Pack1;++w)
-              blockA[count++] = cj(lhs(i+w, k));
+          Scalar a(cj(lhs(i+w+0, k))),
+                  b(cj(lhs(i+w+1, k))),
+                  c(cj(lhs(i+w+2, k))),
+                  d(cj(lhs(i+w+3, k)));
+          blockA[count++] = a;
+          blockA[count++] = b;
+          blockA[count++] = c;
+          blockA[count++] = d;
         }
+        if(Pack1%4)
+          for(;w<Pack1;++w)
+            blockA[count++] = cj(lhs(i+w, k));
       }
-      if(PanelMode) count += Pack1 * (stride-offset-depth);
-    }
-    if(rows-peeled_mc>=Pack2)
-    {
-      if(PanelMode) count += Pack2*offset;
-      for(Index k=0; k<depth; k++)
-        for(Index w=0; w<Pack2; w++)
-          blockA[count++] = cj(lhs(peeled_mc+w, k));
-      if(PanelMode) count += Pack2 * (stride-offset-depth);
-      peeled_mc += Pack2;
-    }
-    for(Index i=peeled_mc; i<rows; i++)
-    {
-      if(PanelMode) count += offset;
-      for(Index k=0; k<depth; k++)
-        blockA[count++] = cj(lhs(i, k));
-      if(PanelMode) count += (stride-offset-depth);
     }
+    if(PanelMode) count += Pack1 * (stride-offset-depth);
   }
-};
+  if(rows-peeled_mc>=Pack2)
+  {
+    if(PanelMode) count += Pack2*offset;
+    for(Index k=0; k<depth; k++)
+      for(Index w=0; w<Pack2; w++)
+        blockA[count++] = cj(lhs(peeled_mc+w, k));
+    if(PanelMode) count += Pack2 * (stride-offset-depth);
+    peeled_mc += Pack2;
+  }
+  for(Index i=peeled_mc; i<rows; i++)
+  {
+    if(PanelMode) count += offset;
+    for(Index k=0; k<depth; k++)
+      blockA[count++] = cj(lhs(i, k));
+    if(PanelMode) count += (stride-offset-depth);
+  }
+}
 
 // copy a complete panel of the rhs
 // this version is optimized for column major matrices
@@ -1197,92 +1207,98 @@ struct gemm_pack_rhs<Scalar, Index, nr, ColMajor, Conjugate, PanelMode>
 {
   typedef typename packet_traits<Scalar>::type Packet;
   enum { PacketSize = packet_traits<Scalar>::size };
-  EIGEN_DONT_INLINE void operator()(Scalar* blockB, const Scalar* rhs, Index rhsStride, Index depth, Index cols,
-                  Index stride=0, Index offset=0)
+  EIGEN_DONT_INLINE void operator()(Scalar* blockB, const Scalar* rhs, Index rhsStride, Index depth, Index cols, Index stride=0, Index offset=0);
+};
+
+template<typename Scalar, typename Index, int nr, bool Conjugate, bool PanelMode>
+EIGEN_DONT_INLINE void gemm_pack_rhs<Scalar, Index, nr, ColMajor, Conjugate, PanelMode>
+  ::operator()(Scalar* blockB, const Scalar* rhs, Index rhsStride, Index depth, Index cols, Index stride, Index offset)
+{
+  EIGEN_ASM_COMMENT("EIGEN PRODUCT PACK RHS COLMAJOR");
+  eigen_assert(((!PanelMode) && stride==0 && offset==0) || (PanelMode && stride>=depth && offset<=stride));
+  conj_if<NumTraits<Scalar>::IsComplex && Conjugate> cj;
+  Index packet_cols = (cols/nr) * nr;
+  Index count = 0;
+  for(Index j2=0; j2<packet_cols; j2+=nr)
   {
-    EIGEN_ASM_COMMENT("EIGEN PRODUCT PACK RHS COLMAJOR");
-    eigen_assert(((!PanelMode) && stride==0 && offset==0) || (PanelMode && stride>=depth && offset<=stride));
-    conj_if<NumTraits<Scalar>::IsComplex && Conjugate> cj;
-    Index packet_cols = (cols/nr) * nr;
-    Index count = 0;
-    for(Index j2=0; j2<packet_cols; j2+=nr)
+    // skip what we have before
+    if(PanelMode) count += nr * offset;
+    const Scalar* b0 = &rhs[(j2+0)*rhsStride];
+    const Scalar* b1 = &rhs[(j2+1)*rhsStride];
+    const Scalar* b2 = &rhs[(j2+2)*rhsStride];
+    const Scalar* b3 = &rhs[(j2+3)*rhsStride];
+    for(Index k=0; k<depth; k++)
     {
-      // skip what we have before
-      if(PanelMode) count += nr * offset;
-      const Scalar* b0 = &rhs[(j2+0)*rhsStride];
-      const Scalar* b1 = &rhs[(j2+1)*rhsStride];
-      const Scalar* b2 = &rhs[(j2+2)*rhsStride];
-      const Scalar* b3 = &rhs[(j2+3)*rhsStride];
-      for(Index k=0; k<depth; k++)
-      {
-                  blockB[count+0] = cj(b0[k]);
-                  blockB[count+1] = cj(b1[k]);
-        if(nr==4) blockB[count+2] = cj(b2[k]);
-        if(nr==4) blockB[count+3] = cj(b3[k]);
-        count += nr;
-      }
-      // skip what we have after
-      if(PanelMode) count += nr * (stride-offset-depth);
+                blockB[count+0] = cj(b0[k]);
+                blockB[count+1] = cj(b1[k]);
+      if(nr==4) blockB[count+2] = cj(b2[k]);
+      if(nr==4) blockB[count+3] = cj(b3[k]);
+      count += nr;
     }
+    // skip what we have after
+    if(PanelMode) count += nr * (stride-offset-depth);
+  }
 
-    // copy the remaining columns one at a time (nr==1)
-    for(Index j2=packet_cols; j2<cols; ++j2)
+  // copy the remaining columns one at a time (nr==1)
+  for(Index j2=packet_cols; j2<cols; ++j2)
+  {
+    if(PanelMode) count += offset;
+    const Scalar* b0 = &rhs[(j2+0)*rhsStride];
+    for(Index k=0; k<depth; k++)
     {
-      if(PanelMode) count += offset;
-      const Scalar* b0 = &rhs[(j2+0)*rhsStride];
-      for(Index k=0; k<depth; k++)
-      {
-        blockB[count] = cj(b0[k]);
-        count += 1;
-      }
-      if(PanelMode) count += (stride-offset-depth);
+      blockB[count] = cj(b0[k]);
+      count += 1;
     }
+    if(PanelMode) count += (stride-offset-depth);
   }
-};
+}
 
 // this version is optimized for row major matrices
 template<typename Scalar, typename Index, int nr, bool Conjugate, bool PanelMode>
 struct gemm_pack_rhs<Scalar, Index, nr, RowMajor, Conjugate, PanelMode>
 {
   enum { PacketSize = packet_traits<Scalar>::size };
-  EIGEN_DONT_INLINE void operator()(Scalar* blockB, const Scalar* rhs, Index rhsStride, Index depth, Index cols,
-                  Index stride=0, Index offset=0)
+  EIGEN_DONT_INLINE void operator()(Scalar* blockB, const Scalar* rhs, Index rhsStride, Index depth, Index cols, Index stride=0, Index offset=0);
+};
+
+template<typename Scalar, typename Index, int nr, bool Conjugate, bool PanelMode>
+EIGEN_DONT_INLINE void gemm_pack_rhs<Scalar, Index, nr, RowMajor, Conjugate, PanelMode>
+  ::operator()(Scalar* blockB, const Scalar* rhs, Index rhsStride, Index depth, Index cols, Index stride, Index offset)
+{
+  EIGEN_ASM_COMMENT("EIGEN PRODUCT PACK RHS ROWMAJOR");
+  eigen_assert(((!PanelMode) && stride==0 && offset==0) || (PanelMode && stride>=depth && offset<=stride));
+  conj_if<NumTraits<Scalar>::IsComplex && Conjugate> cj;
+  Index packet_cols = (cols/nr) * nr;
+  Index count = 0;
+  for(Index j2=0; j2<packet_cols; j2+=nr)
   {
-    EIGEN_ASM_COMMENT("EIGEN PRODUCT PACK RHS ROWMAJOR");
-    eigen_assert(((!PanelMode) && stride==0 && offset==0) || (PanelMode && stride>=depth && offset<=stride));
-    conj_if<NumTraits<Scalar>::IsComplex && Conjugate> cj;
-    Index packet_cols = (cols/nr) * nr;
-    Index count = 0;
-    for(Index j2=0; j2<packet_cols; j2+=nr)
+    // skip what we have before
+    if(PanelMode) count += nr * offset;
+    for(Index k=0; k<depth; k++)
     {
-      // skip what we have before
-      if(PanelMode) count += nr * offset;
-      for(Index k=0; k<depth; k++)
-      {
-        const Scalar* b0 = &rhs[k*rhsStride + j2];
-                  blockB[count+0] = cj(b0[0]);
-                  blockB[count+1] = cj(b0[1]);
-        if(nr==4) blockB[count+2] = cj(b0[2]);
-        if(nr==4) blockB[count+3] = cj(b0[3]);
-        count += nr;
-      }
-      // skip what we have after
-      if(PanelMode) count += nr * (stride-offset-depth);
+      const Scalar* b0 = &rhs[k*rhsStride + j2];
+                blockB[count+0] = cj(b0[0]);
+                blockB[count+1] = cj(b0[1]);
+      if(nr==4) blockB[count+2] = cj(b0[2]);
+      if(nr==4) blockB[count+3] = cj(b0[3]);
+      count += nr;
     }
-    // copy the remaining columns one at a time (nr==1)
-    for(Index j2=packet_cols; j2<cols; ++j2)
+    // skip what we have after
+    if(PanelMode) count += nr * (stride-offset-depth);
+  }
+  // copy the remaining columns one at a time (nr==1)
+  for(Index j2=packet_cols; j2<cols; ++j2)
+  {
+    if(PanelMode) count += offset;
+    const Scalar* b0 = &rhs[j2];
+    for(Index k=0; k<depth; k++)
     {
-      if(PanelMode) count += offset;
-      const Scalar* b0 = &rhs[j2];
-      for(Index k=0; k<depth; k++)
-      {
-        blockB[count] = cj(b0[k*rhsStride]);
-        count += 1;
-      }
-      if(PanelMode) count += stride-offset-depth;
+      blockB[count] = cj(b0[k*rhsStride]);
+      count += 1;
     }
+    if(PanelMode) count += stride-offset-depth;
   }
-};
+}
 
 } // end namespace internal
 
diff --git a/ThirdParty/eigen3/Eigen/src/Core/products/GeneralMatrixMatrix.h b/ThirdParty/eigen3/Eigen/src/Core/products/GeneralMatrixMatrix.h
index 73a465ec5ee..3f5ffcf51c7 100644
--- a/ThirdParty/eigen3/Eigen/src/Core/products/GeneralMatrixMatrix.h
+++ b/ThirdParty/eigen3/Eigen/src/Core/products/GeneralMatrixMatrix.h
@@ -50,6 +50,7 @@ template<
   typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs>
 struct general_matrix_matrix_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,ColMajor>
 {
+
 typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar;
 static void run(Index rows, Index cols, Index depth,
   const LhsScalar* _lhs, Index lhsStride,
@@ -169,7 +170,6 @@ static void run(Index rows, Index cols, Index depth,
       // vertical panel which is, in practice, a very low number.
       pack_rhs(blockB, &rhs(k2,0), rhsStride, actual_kc, cols);
 
-
       // For each mc x kc block of the lhs's vertical panel...
       // (==GEPP_VAR1)
       for(Index i2=0; i2<rows; i2+=mc)
@@ -183,7 +183,6 @@ static void run(Index rows, Index cols, Index depth,
 
         // Everything is packed, we can now call the block * panel kernel:
         gebp(res+i2, resStride, blockA, blockB, actual_mc, actual_kc, cols, alpha, -1, -1, 0, 0, blockW);
-
       }
     }
   }
@@ -204,7 +203,7 @@ struct traits<GeneralProduct<Lhs,Rhs,GemmProduct> >
 template<typename Scalar, typename Index, typename Gemm, typename Lhs, typename Rhs, typename Dest, typename BlockingType>
 struct gemm_functor
 {
-  gemm_functor(const Lhs& lhs, const Rhs& rhs, Dest& dest, Scalar actualAlpha,
+  gemm_functor(const Lhs& lhs, const Rhs& rhs, Dest& dest, const Scalar& actualAlpha,
                   BlockingType& blocking)
     : m_lhs(lhs), m_rhs(rhs), m_dest(dest), m_actualAlpha(actualAlpha), m_blocking(blocking)
   {}
@@ -395,7 +394,7 @@ class GeneralProduct<Lhs, Rhs, GemmProduct>
       EIGEN_CHECK_BINARY_COMPATIBILIY(BinOp,LhsScalar,RhsScalar);
     }
 
-    template<typename Dest> void scaleAndAddTo(Dest& dst, Scalar alpha) const
+    template<typename Dest> void scaleAndAddTo(Dest& dst, const Scalar& alpha) const
     {
       eigen_assert(dst.rows()==m_lhs.rows() && dst.cols()==m_rhs.cols());
 
diff --git a/ThirdParty/eigen3/Eigen/src/Core/products/GeneralMatrixMatrixTriangular.h b/ThirdParty/eigen3/Eigen/src/Core/products/GeneralMatrixMatrixTriangular.h
index 432d3a9dc84..5c37639091c 100644
--- a/ThirdParty/eigen3/Eigen/src/Core/products/GeneralMatrixMatrixTriangular.h
+++ b/ThirdParty/eigen3/Eigen/src/Core/products/GeneralMatrixMatrixTriangular.h
@@ -12,6 +12,9 @@
 
 namespace Eigen { 
 
+template<typename Scalar, typename Index, int StorageOrder, int UpLo, bool ConjLhs, bool ConjRhs>
+struct selfadjoint_rank1_update;
+
 namespace internal {
 
 /**********************************************************************
@@ -39,7 +42,7 @@ struct general_matrix_matrix_triangular_product<Index,LhsScalar,LhsStorageOrder,
 {
   typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar;
   static EIGEN_STRONG_INLINE void run(Index size, Index depth,const LhsScalar* lhs, Index lhsStride,
-                                      const RhsScalar* rhs, Index rhsStride, ResScalar* res, Index resStride, ResScalar alpha)
+                                      const RhsScalar* rhs, Index rhsStride, ResScalar* res, Index resStride, const ResScalar& alpha)
   {
     general_matrix_matrix_triangular_product<Index,
         RhsScalar, RhsStorageOrder==RowMajor ? ColMajor : RowMajor, ConjugateRhs,
@@ -55,7 +58,7 @@ struct general_matrix_matrix_triangular_product<Index,LhsScalar,LhsStorageOrder,
 {
   typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar;
   static EIGEN_STRONG_INLINE void run(Index size, Index depth,const LhsScalar* _lhs, Index lhsStride,
-                                      const RhsScalar* _rhs, Index rhsStride, ResScalar* res, Index resStride, ResScalar alpha)
+                                      const RhsScalar* _rhs, Index rhsStride, ResScalar* res, Index resStride, const ResScalar& alpha)
   {
     const_blas_data_mapper<LhsScalar, Index, LhsStorageOrder> lhs(_lhs,lhsStride);
     const_blas_data_mapper<RhsScalar, Index, RhsStorageOrder> rhs(_rhs,rhsStride);
@@ -133,7 +136,7 @@ struct tribb_kernel
   enum {
     BlockSize  = EIGEN_PLAIN_ENUM_MAX(mr,nr)
   };
-  void operator()(ResScalar* res, Index resStride, const LhsScalar* blockA, const RhsScalar* blockB, Index size, Index depth, ResScalar alpha, RhsScalar* workspace)
+  void operator()(ResScalar* res, Index resStride, const LhsScalar* blockA, const RhsScalar* blockB, Index size, Index depth, const ResScalar& alpha, RhsScalar* workspace)
   {
     gebp_kernel<LhsScalar, RhsScalar, Index, mr, nr, ConjLhs, ConjRhs> gebp_kernel;
     Matrix<ResScalar,BlockSize,BlockSize,ColMajor> buffer;
@@ -180,31 +183,92 @@ struct tribb_kernel
 
 // high level API
 
+template<typename MatrixType, typename ProductType, int UpLo, bool IsOuterProduct>
+struct general_product_to_triangular_selector;
+
+
+template<typename MatrixType, typename ProductType, int UpLo>
+struct general_product_to_triangular_selector<MatrixType,ProductType,UpLo,true>
+{
+  static void run(MatrixType& mat, const ProductType& prod, const typename MatrixType::Scalar& alpha)
+  {
+    typedef typename MatrixType::Scalar Scalar;
+    typedef typename MatrixType::Index Index;
+    
+    typedef typename internal::remove_all<typename ProductType::LhsNested>::type Lhs;
+    typedef internal::blas_traits<Lhs> LhsBlasTraits;
+    typedef typename LhsBlasTraits::DirectLinearAccessType ActualLhs;
+    typedef typename internal::remove_all<ActualLhs>::type _ActualLhs;
+    typename internal::add_const_on_value_type<ActualLhs>::type actualLhs = LhsBlasTraits::extract(prod.lhs());
+    
+    typedef typename internal::remove_all<typename ProductType::RhsNested>::type Rhs;
+    typedef internal::blas_traits<Rhs> RhsBlasTraits;
+    typedef typename RhsBlasTraits::DirectLinearAccessType ActualRhs;
+    typedef typename internal::remove_all<ActualRhs>::type _ActualRhs;
+    typename internal::add_const_on_value_type<ActualRhs>::type actualRhs = RhsBlasTraits::extract(prod.rhs());
+
+    Scalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(prod.lhs().derived()) * RhsBlasTraits::extractScalarFactor(prod.rhs().derived());
+
+    enum {
+      StorageOrder = (internal::traits<MatrixType>::Flags&RowMajorBit) ? RowMajor : ColMajor,
+      UseLhsDirectly = _ActualLhs::InnerStrideAtCompileTime==1,
+      UseRhsDirectly = _ActualRhs::InnerStrideAtCompileTime==1
+    };
+    
+    internal::gemv_static_vector_if<Scalar,Lhs::SizeAtCompileTime,Lhs::MaxSizeAtCompileTime,!UseLhsDirectly> static_lhs;
+    ei_declare_aligned_stack_constructed_variable(Scalar, actualLhsPtr, actualLhs.size(),
+      (UseLhsDirectly ? const_cast<Scalar*>(actualLhs.data()) : static_lhs.data()));
+    if(!UseLhsDirectly) Map<typename _ActualLhs::PlainObject>(actualLhsPtr, actualLhs.size()) = actualLhs;
+    
+    internal::gemv_static_vector_if<Scalar,Rhs::SizeAtCompileTime,Rhs::MaxSizeAtCompileTime,!UseRhsDirectly> static_rhs;
+    ei_declare_aligned_stack_constructed_variable(Scalar, actualRhsPtr, actualRhs.size(),
+      (UseRhsDirectly ? const_cast<Scalar*>(actualRhs.data()) : static_rhs.data()));
+    if(!UseRhsDirectly) Map<typename _ActualRhs::PlainObject>(actualRhsPtr, actualRhs.size()) = actualRhs;
+    
+    
+    selfadjoint_rank1_update<Scalar,Index,StorageOrder,UpLo,
+                              LhsBlasTraits::NeedToConjugate && NumTraits<Scalar>::IsComplex,
+                              RhsBlasTraits::NeedToConjugate && NumTraits<Scalar>::IsComplex>
+          ::run(actualLhs.size(), mat.data(), mat.outerStride(), actualLhsPtr, actualRhsPtr, actualAlpha);
+  }
+};
+
+template<typename MatrixType, typename ProductType, int UpLo>
+struct general_product_to_triangular_selector<MatrixType,ProductType,UpLo,false>
+{
+  static void run(MatrixType& mat, const ProductType& prod, const typename MatrixType::Scalar& alpha)
+  {
+    typedef typename MatrixType::Index Index;
+    
+    typedef typename internal::remove_all<typename ProductType::LhsNested>::type Lhs;
+    typedef internal::blas_traits<Lhs> LhsBlasTraits;
+    typedef typename LhsBlasTraits::DirectLinearAccessType ActualLhs;
+    typedef typename internal::remove_all<ActualLhs>::type _ActualLhs;
+    typename internal::add_const_on_value_type<ActualLhs>::type actualLhs = LhsBlasTraits::extract(prod.lhs());
+    
+    typedef typename internal::remove_all<typename ProductType::RhsNested>::type Rhs;
+    typedef internal::blas_traits<Rhs> RhsBlasTraits;
+    typedef typename RhsBlasTraits::DirectLinearAccessType ActualRhs;
+    typedef typename internal::remove_all<ActualRhs>::type _ActualRhs;
+    typename internal::add_const_on_value_type<ActualRhs>::type actualRhs = RhsBlasTraits::extract(prod.rhs());
+
+    typename ProductType::Scalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(prod.lhs().derived()) * RhsBlasTraits::extractScalarFactor(prod.rhs().derived());
+
+    internal::general_matrix_matrix_triangular_product<Index,
+      typename Lhs::Scalar, _ActualLhs::Flags&RowMajorBit ? RowMajor : ColMajor, LhsBlasTraits::NeedToConjugate,
+      typename Rhs::Scalar, _ActualRhs::Flags&RowMajorBit ? RowMajor : ColMajor, RhsBlasTraits::NeedToConjugate,
+      MatrixType::Flags&RowMajorBit ? RowMajor : ColMajor, UpLo>
+      ::run(mat.cols(), actualLhs.cols(),
+            &actualLhs.coeffRef(0,0), actualLhs.outerStride(), &actualRhs.coeffRef(0,0), actualRhs.outerStride(),
+            mat.data(), mat.outerStride(), actualAlpha);
+  }
+};
+
 template<typename MatrixType, unsigned int UpLo>
 template<typename ProductDerived, typename _Lhs, typename _Rhs>
 TriangularView<MatrixType,UpLo>& TriangularView<MatrixType,UpLo>::assignProduct(const ProductBase<ProductDerived, _Lhs,_Rhs>& prod, const Scalar& alpha)
 {
-  typedef typename internal::remove_all<typename ProductDerived::LhsNested>::type Lhs;
-  typedef internal::blas_traits<Lhs> LhsBlasTraits;
-  typedef typename LhsBlasTraits::DirectLinearAccessType ActualLhs;
-  typedef typename internal::remove_all<ActualLhs>::type _ActualLhs;
-  typename internal::add_const_on_value_type<ActualLhs>::type actualLhs = LhsBlasTraits::extract(prod.lhs());
-  
-  typedef typename internal::remove_all<typename ProductDerived::RhsNested>::type Rhs;
-  typedef internal::blas_traits<Rhs> RhsBlasTraits;
-  typedef typename RhsBlasTraits::DirectLinearAccessType ActualRhs;
-  typedef typename internal::remove_all<ActualRhs>::type _ActualRhs;
-  typename internal::add_const_on_value_type<ActualRhs>::type actualRhs = RhsBlasTraits::extract(prod.rhs());
-
-  typename ProductDerived::Scalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(prod.lhs().derived()) * RhsBlasTraits::extractScalarFactor(prod.rhs().derived());
-
-  internal::general_matrix_matrix_triangular_product<Index,
-    typename Lhs::Scalar, _ActualLhs::Flags&RowMajorBit ? RowMajor : ColMajor, LhsBlasTraits::NeedToConjugate,
-    typename Rhs::Scalar, _ActualRhs::Flags&RowMajorBit ? RowMajor : ColMajor, RhsBlasTraits::NeedToConjugate,
-    MatrixType::Flags&RowMajorBit ? RowMajor : ColMajor, UpLo>
-    ::run(m_matrix.cols(), actualLhs.cols(),
-          &actualLhs.coeffRef(0,0), actualLhs.outerStride(), &actualRhs.coeffRef(0,0), actualRhs.outerStride(),
-          const_cast<Scalar*>(m_matrix.data()), m_matrix.outerStride(), actualAlpha);
+  general_product_to_triangular_selector<MatrixType, ProductDerived, UpLo, (_Lhs::ColsAtCompileTime==1) || (_Rhs::RowsAtCompileTime==1)>::run(m_matrix.const_cast_derived(), prod.derived(), alpha);
   
   return *this;
 }
diff --git a/ThirdParty/eigen3/Eigen/src/Core/products/GeneralMatrixVector.h b/ThirdParty/eigen3/Eigen/src/Core/products/GeneralMatrixVector.h
index 71eb7661dfc..c1cb784988d 100644
--- a/ThirdParty/eigen3/Eigen/src/Core/products/GeneralMatrixVector.h
+++ b/ThirdParty/eigen3/Eigen/src/Core/products/GeneralMatrixVector.h
@@ -49,6 +49,18 @@ typedef typename conditional<Vectorizable,_RhsPacket,RhsScalar>::type RhsPacket;
 typedef typename conditional<Vectorizable,_ResPacket,ResScalar>::type ResPacket;
 
 EIGEN_DONT_INLINE static void run(
+  Index rows, Index cols,
+  const LhsScalar* lhs, Index lhsStride,
+  const RhsScalar* rhs, Index rhsIncr,
+  ResScalar* res, Index
+  #ifdef EIGEN_INTERNAL_DEBUGGING
+    resIncr
+  #endif
+  , RhsScalar alpha);
+};
+
+template<typename Index, typename LhsScalar, bool ConjugateLhs, typename RhsScalar, bool ConjugateRhs, int Version>
+EIGEN_DONT_INLINE void general_matrix_vector_product<Index,LhsScalar,ColMajor,ConjugateLhs,RhsScalar,ConjugateRhs,Version>::run(
   Index rows, Index cols,
   const LhsScalar* lhs, Index lhsStride,
   const RhsScalar* rhs, Index rhsIncr,
@@ -74,13 +86,14 @@ EIGEN_DONT_INLINE static void run(
   conj_helper<LhsScalar,RhsScalar,ConjugateLhs,ConjugateRhs> cj;
   conj_helper<LhsPacket,RhsPacket,ConjugateLhs,ConjugateRhs> pcj;
   if(ConjugateRhs)
-    alpha = conj(alpha);
+    alpha = numext::conj(alpha);
 
   enum { AllAligned = 0, EvenAligned, FirstAligned, NoneAligned };
   const Index columnsAtOnce = 4;
   const Index peels = 2;
   const Index LhsPacketAlignedMask = LhsPacketSize-1;
   const Index ResPacketAlignedMask = ResPacketSize-1;
+//  const Index PeelAlignedMask = ResPacketSize*peels-1;
   const Index size = rows;
   
   // How many coeffs of the result do we have to skip to be aligned.
@@ -273,7 +286,6 @@ EIGEN_DONT_INLINE static void run(
   } while(Vectorizable);
   #undef _EIGEN_ACCUMULATE_PACKETS
 }
-};
 
 /* Optimized row-major matrix * vector product:
  * This algorithm processes 4 rows at onces that allows to both reduce
@@ -307,6 +319,15 @@ typedef typename conditional<Vectorizable,_RhsPacket,RhsScalar>::type RhsPacket;
 typedef typename conditional<Vectorizable,_ResPacket,ResScalar>::type ResPacket;
   
 EIGEN_DONT_INLINE static void run(
+  Index rows, Index cols,
+  const LhsScalar* lhs, Index lhsStride,
+  const RhsScalar* rhs, Index rhsIncr,
+  ResScalar* res, Index resIncr,
+  ResScalar alpha);
+};
+
+template<typename Index, typename LhsScalar, bool ConjugateLhs, typename RhsScalar, bool ConjugateRhs, int Version>
+EIGEN_DONT_INLINE void general_matrix_vector_product<Index,LhsScalar,RowMajor,ConjugateLhs,RhsScalar,ConjugateRhs,Version>::run(
   Index rows, Index cols,
   const LhsScalar* lhs, Index lhsStride,
   const RhsScalar* rhs, Index rhsIncr,
@@ -334,6 +355,7 @@ EIGEN_DONT_INLINE static void run(
   const Index peels = 2;
   const Index RhsPacketAlignedMask = RhsPacketSize-1;
   const Index LhsPacketAlignedMask = LhsPacketSize-1;
+//   const Index PeelAlignedMask = RhsPacketSize*peels-1;
   const Index depth = cols;
 
   // How many coeffs of the result do we have to skip to be aligned.
@@ -543,7 +565,6 @@ EIGEN_DONT_INLINE static void run(
 
   #undef _EIGEN_ACCUMULATE_PACKETS
 }
-};
 
 } // end namespace internal
 
diff --git a/ThirdParty/eigen3/Eigen/src/Core/products/GeneralMatrixVector_MKL.h b/ThirdParty/eigen3/Eigen/src/Core/products/GeneralMatrixVector_MKL.h
index e9de6af3ed1..1cb9fe6b5a7 100644
--- a/ThirdParty/eigen3/Eigen/src/Core/products/GeneralMatrixVector_MKL.h
+++ b/ThirdParty/eigen3/Eigen/src/Core/products/GeneralMatrixVector_MKL.h
@@ -53,7 +53,7 @@ struct general_matrix_vector_product_gemv :
 #define EIGEN_MKL_GEMV_SPECIALIZE(Scalar) \
 template<typename Index, bool ConjugateLhs, bool ConjugateRhs> \
 struct general_matrix_vector_product<Index,Scalar,ColMajor,ConjugateLhs,Scalar,ConjugateRhs,Specialized> { \
-static EIGEN_DONT_INLINE void run( \
+static void run( \
   Index rows, Index cols, \
   const Scalar* lhs, Index lhsStride, \
   const Scalar* rhs, Index rhsIncr, \
@@ -70,7 +70,7 @@ static EIGEN_DONT_INLINE void run( \
 }; \
 template<typename Index, bool ConjugateLhs, bool ConjugateRhs> \
 struct general_matrix_vector_product<Index,Scalar,RowMajor,ConjugateLhs,Scalar,ConjugateRhs,Specialized> { \
-static EIGEN_DONT_INLINE void run( \
+static void run( \
   Index rows, Index cols, \
   const Scalar* lhs, Index lhsStride, \
   const Scalar* rhs, Index rhsIncr, \
@@ -92,7 +92,7 @@ struct general_matrix_vector_product_gemv<Index,EIGTYPE,LhsStorageOrder,Conjugat
 { \
 typedef Matrix<EIGTYPE,Dynamic,1,ColMajor> GEMVVector;\
 \
-static EIGEN_DONT_INLINE void run( \
+static void run( \
   Index rows, Index cols, \
   const EIGTYPE* lhs, Index lhsStride, \
   const EIGTYPE* rhs, Index rhsIncr, \
diff --git a/ThirdParty/eigen3/Eigen/src/Core/products/SelfadjointMatrixMatrix.h b/ThirdParty/eigen3/Eigen/src/Core/products/SelfadjointMatrixMatrix.h
index 48209636eed..99cf9e0ae27 100644
--- a/ThirdParty/eigen3/Eigen/src/Core/products/SelfadjointMatrixMatrix.h
+++ b/ThirdParty/eigen3/Eigen/src/Core/products/SelfadjointMatrixMatrix.h
@@ -30,9 +30,9 @@ struct symm_pack_lhs
     for(Index k=i; k<i+BlockRows; k++)
     {
       for(Index w=0; w<h; w++)
-        blockA[count++] = conj(lhs(k, i+w)); // transposed
+        blockA[count++] = numext::conj(lhs(k, i+w)); // transposed
 
-      blockA[count++] = real(lhs(k,k));   // real (diagonal)
+      blockA[count++] = numext::real(lhs(k,k));   // real (diagonal)
 
       for(Index w=h+1; w<BlockRows; w++)
         blockA[count++] = lhs(i+w, k);          // normal
@@ -41,7 +41,7 @@ struct symm_pack_lhs
     // transposed copy
     for(Index k=i+BlockRows; k<cols; k++)
       for(Index w=0; w<BlockRows; w++)
-        blockA[count++] = conj(lhs(k, i+w)); // transposed
+        blockA[count++] = numext::conj(lhs(k, i+w)); // transposed
   }
   void operator()(Scalar* blockA, const Scalar* _lhs, Index lhsStride, Index cols, Index rows)
   {
@@ -65,10 +65,10 @@ struct symm_pack_lhs
       for(Index k=0; k<i; k++)
         blockA[count++] = lhs(i, k);              // normal
 
-      blockA[count++] = real(lhs(i, i));       // real (diagonal)
+      blockA[count++] = numext::real(lhs(i, i));       // real (diagonal)
 
       for(Index k=i+1; k<cols; k++)
-        blockA[count++] = conj(lhs(k, i));     // transposed
+        blockA[count++] = numext::conj(lhs(k, i));     // transposed
     }
   }
 };
@@ -107,12 +107,12 @@ struct symm_pack_rhs
       // transpose
       for(Index k=k2; k<j2; k++)
       {
-        blockB[count+0] = conj(rhs(j2+0,k));
-        blockB[count+1] = conj(rhs(j2+1,k));
+        blockB[count+0] = numext::conj(rhs(j2+0,k));
+        blockB[count+1] = numext::conj(rhs(j2+1,k));
         if (nr==4)
         {
-          blockB[count+2] = conj(rhs(j2+2,k));
-          blockB[count+3] = conj(rhs(j2+3,k));
+          blockB[count+2] = numext::conj(rhs(j2+2,k));
+          blockB[count+3] = numext::conj(rhs(j2+3,k));
         }
         count += nr;
       }
@@ -124,11 +124,11 @@ struct symm_pack_rhs
         for (Index w=0 ; w<h; ++w)
           blockB[count+w] = rhs(k,j2+w);
 
-        blockB[count+h] = real(rhs(k,k));
+        blockB[count+h] = numext::real(rhs(k,k));
 
         // transpose
         for (Index w=h+1 ; w<nr; ++w)
-          blockB[count+w] = conj(rhs(j2+w,k));
+          blockB[count+w] = numext::conj(rhs(j2+w,k));
         count += nr;
         ++h;
       }
@@ -151,12 +151,12 @@ struct symm_pack_rhs
     {
       for(Index k=k2; k<end_k; k++)
       {
-        blockB[count+0] = conj(rhs(j2+0,k));
-        blockB[count+1] = conj(rhs(j2+1,k));
+        blockB[count+0] = numext::conj(rhs(j2+0,k));
+        blockB[count+1] = numext::conj(rhs(j2+1,k));
         if (nr==4)
         {
-          blockB[count+2] = conj(rhs(j2+2,k));
-          blockB[count+3] = conj(rhs(j2+3,k));
+          blockB[count+2] = numext::conj(rhs(j2+2,k));
+          blockB[count+3] = numext::conj(rhs(j2+3,k));
         }
         count += nr;
       }
@@ -169,13 +169,13 @@ struct symm_pack_rhs
       Index half = (std::min)(end_k,j2);
       for(Index k=k2; k<half; k++)
       {
-        blockB[count] = conj(rhs(j2,k));
+        blockB[count] = numext::conj(rhs(j2,k));
         count += 1;
       }
 
       if(half==j2 && half<k2+rows)
       {
-        blockB[count] = real(rhs(j2,j2));
+        blockB[count] = numext::real(rhs(j2,j2));
         count += 1;
       }
       else
@@ -211,7 +211,7 @@ struct product_selfadjoint_matrix<Scalar,Index,LhsStorageOrder,LhsSelfAdjoint,Co
     const Scalar* lhs, Index lhsStride,
     const Scalar* rhs, Index rhsStride,
     Scalar* res,       Index resStride,
-    Scalar alpha)
+    const Scalar& alpha)
   {
     product_selfadjoint_matrix<Scalar, Index,
       EIGEN_LOGICAL_XOR(RhsSelfAdjoint,RhsStorageOrder==RowMajor) ? ColMajor : RowMajor,
@@ -234,7 +234,18 @@ struct product_selfadjoint_matrix<Scalar,Index,LhsStorageOrder,true,ConjugateLhs
     const Scalar* _lhs, Index lhsStride,
     const Scalar* _rhs, Index rhsStride,
     Scalar* res,        Index resStride,
-    Scalar alpha)
+    const Scalar& alpha);
+};
+
+template <typename Scalar, typename Index,
+          int LhsStorageOrder, bool ConjugateLhs,
+          int RhsStorageOrder, bool ConjugateRhs>
+EIGEN_DONT_INLINE void product_selfadjoint_matrix<Scalar,Index,LhsStorageOrder,true,ConjugateLhs, RhsStorageOrder,false,ConjugateRhs,ColMajor>::run(
+    Index rows, Index cols,
+    const Scalar* _lhs, Index lhsStride,
+    const Scalar* _rhs, Index rhsStride,
+    Scalar* res,        Index resStride,
+    const Scalar& alpha)
   {
     Index size = rows;
 
@@ -301,7 +312,6 @@ struct product_selfadjoint_matrix<Scalar,Index,LhsStorageOrder,true,ConjugateLhs
       }
     }
   }
-};
 
 // matrix * selfadjoint product
 template <typename Scalar, typename Index,
@@ -315,7 +325,18 @@ struct product_selfadjoint_matrix<Scalar,Index,LhsStorageOrder,false,ConjugateLh
     const Scalar* _lhs, Index lhsStride,
     const Scalar* _rhs, Index rhsStride,
     Scalar* res,        Index resStride,
-    Scalar alpha)
+    const Scalar& alpha);
+};
+
+template <typename Scalar, typename Index,
+          int LhsStorageOrder, bool ConjugateLhs,
+          int RhsStorageOrder, bool ConjugateRhs>
+EIGEN_DONT_INLINE void product_selfadjoint_matrix<Scalar,Index,LhsStorageOrder,false,ConjugateLhs, RhsStorageOrder,true,ConjugateRhs,ColMajor>::run(
+    Index rows, Index cols,
+    const Scalar* _lhs, Index lhsStride,
+    const Scalar* _rhs, Index rhsStride,
+    Scalar* res,        Index resStride,
+    const Scalar& alpha)
   {
     Index size = cols;
 
@@ -353,7 +374,6 @@ struct product_selfadjoint_matrix<Scalar,Index,LhsStorageOrder,false,ConjugateLh
       }
     }
   }
-};
 
 } // end namespace internal
 
@@ -383,7 +403,7 @@ struct SelfadjointProductMatrix<Lhs,LhsMode,false,Rhs,RhsMode,false>
     RhsIsSelfAdjoint = (RhsMode&SelfAdjoint)==SelfAdjoint
   };
 
-  template<typename Dest> void scaleAndAddTo(Dest& dst, Scalar alpha) const
+  template<typename Dest> void scaleAndAddTo(Dest& dst, const Scalar& alpha) const
   {
     eigen_assert(dst.rows()==m_lhs.rows() && dst.cols()==m_rhs.cols());
 
diff --git a/ThirdParty/eigen3/Eigen/src/Core/products/SelfadjointMatrixMatrix_MKL.h b/ThirdParty/eigen3/Eigen/src/Core/products/SelfadjointMatrixMatrix_MKL.h
index 4e5c4125c01..dfa687fefe6 100644
--- a/ThirdParty/eigen3/Eigen/src/Core/products/SelfadjointMatrixMatrix_MKL.h
+++ b/ThirdParty/eigen3/Eigen/src/Core/products/SelfadjointMatrixMatrix_MKL.h
@@ -23,7 +23,7 @@
  ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
+//
  ********************************************************************************
  *   Content : Eigen bindings to Intel(R) MKL
  *   Self adjoint matrix * matrix product functionality based on ?SYMM/?HEMM.
@@ -47,7 +47,7 @@ template <typename Index, \
 struct product_selfadjoint_matrix<EIGTYPE,Index,LhsStorageOrder,true,ConjugateLhs,RhsStorageOrder,false,ConjugateRhs,ColMajor> \
 {\
 \
-  static EIGEN_DONT_INLINE void run( \
+  static void run( \
     Index rows, Index cols, \
     const EIGTYPE* _lhs, Index lhsStride, \
     const EIGTYPE* _rhs, Index rhsStride, \
@@ -98,7 +98,7 @@ template <typename Index, \
           int RhsStorageOrder, bool ConjugateRhs> \
 struct product_selfadjoint_matrix<EIGTYPE,Index,LhsStorageOrder,true,ConjugateLhs,RhsStorageOrder,false,ConjugateRhs,ColMajor> \
 {\
-  static EIGEN_DONT_INLINE void run( \
+  static void run( \
     Index rows, Index cols, \
     const EIGTYPE* _lhs, Index lhsStride, \
     const EIGTYPE* _rhs, Index rhsStride, \
@@ -174,7 +174,7 @@ template <typename Index, \
 struct product_selfadjoint_matrix<EIGTYPE,Index,LhsStorageOrder,false,ConjugateLhs,RhsStorageOrder,true,ConjugateRhs,ColMajor> \
 {\
 \
-  static EIGEN_DONT_INLINE void run( \
+  static void run( \
     Index rows, Index cols, \
     const EIGTYPE* _lhs, Index lhsStride, \
     const EIGTYPE* _rhs, Index rhsStride, \
@@ -224,7 +224,7 @@ template <typename Index, \
           int RhsStorageOrder, bool ConjugateRhs> \
 struct product_selfadjoint_matrix<EIGTYPE,Index,LhsStorageOrder,false,ConjugateLhs,RhsStorageOrder,true,ConjugateRhs,ColMajor> \
 {\
-  static EIGEN_DONT_INLINE void run( \
+  static void run( \
     Index rows, Index cols, \
     const EIGTYPE* _lhs, Index lhsStride, \
     const EIGTYPE* _rhs, Index rhsStride, \
diff --git a/ThirdParty/eigen3/Eigen/src/Core/products/SelfadjointMatrixVector.h b/ThirdParty/eigen3/Eigen/src/Core/products/SelfadjointMatrixVector.h
index c3145c69a5f..c40e80f53cc 100644
--- a/ThirdParty/eigen3/Eigen/src/Core/products/SelfadjointMatrixVector.h
+++ b/ThirdParty/eigen3/Eigen/src/Core/products/SelfadjointMatrixVector.h
@@ -28,6 +28,15 @@ struct selfadjoint_matrix_vector_product
 
 {
 static EIGEN_DONT_INLINE void run(
+  Index size,
+  const Scalar*  lhs, Index lhsStride,
+  const Scalar* _rhs, Index rhsIncr,
+  Scalar* res,
+  Scalar alpha);
+};
+
+template<typename Scalar, typename Index, int StorageOrder, int UpLo, bool ConjugateLhs, bool ConjugateRhs, int Version>
+EIGEN_DONT_INLINE void selfadjoint_matrix_vector_product<Scalar,Index,StorageOrder,UpLo,ConjugateLhs,ConjugateRhs,Version>::run(
   Index size,
   const Scalar*  lhs, Index lhsStride,
   const Scalar* _rhs, Index rhsIncr,
@@ -35,7 +44,6 @@ static EIGEN_DONT_INLINE void run(
   Scalar alpha)
 {
   typedef typename packet_traits<Scalar>::type Packet;
-  typedef typename NumTraits<Scalar>::Real RealScalar;
   const Index PacketSize = sizeof(Packet)/sizeof(Scalar);
 
   enum {
@@ -51,7 +59,7 @@ static EIGEN_DONT_INLINE void run(
   conj_helper<Packet,Packet,NumTraits<Scalar>::IsComplex && EIGEN_LOGICAL_XOR(ConjugateLhs,  IsRowMajor), ConjugateRhs> pcj0;
   conj_helper<Packet,Packet,NumTraits<Scalar>::IsComplex && EIGEN_LOGICAL_XOR(ConjugateLhs, !IsRowMajor), ConjugateRhs> pcj1;
 
-  Scalar cjAlpha = ConjugateRhs ? conj(alpha) : alpha;
+  Scalar cjAlpha = ConjugateRhs ? numext::conj(alpha) : alpha;
 
   // FIXME this copy is now handled outside product_selfadjoint_vector, so it could probably be removed.
   // if the rhs is not sequentially stored in memory we copy it to a temporary buffer,
@@ -90,8 +98,8 @@ static EIGEN_DONT_INLINE void run(
     size_t alignedEnd = alignedStart + ((endi-alignedStart)/(PacketSize))*(PacketSize);
 
     // TODO make sure this product is a real * complex and that the rhs is properly conjugated if needed
-    res[j]   += cjd.pmul(internal::real(A0[j]), t0);
-    res[j+1] += cjd.pmul(internal::real(A1[j+1]), t1);
+    res[j]   += cjd.pmul(numext::real(A0[j]), t0);
+    res[j+1] += cjd.pmul(numext::real(A1[j+1]), t1);
     if(FirstTriangular)
     {
       res[j]   += cj0.pmul(A1[j],   t1);
@@ -106,8 +114,8 @@ static EIGEN_DONT_INLINE void run(
     for (size_t i=starti; i<alignedStart; ++i)
     {
       res[i] += t0 * A0[i] + t1 * A1[i];
-      t2 += conj(A0[i]) * rhs[i];
-      t3 += conj(A1[i]) * rhs[i];
+      t2 += numext::conj(A0[i]) * rhs[i];
+      t3 += numext::conj(A1[i]) * rhs[i];
     }
     // Yes this an optimization for gcc 4.3 and 4.4 (=> huge speed up)
     // gcc 4.2 does this optimization automatically.
@@ -144,7 +152,7 @@ static EIGEN_DONT_INLINE void run(
     Scalar t1 = cjAlpha * rhs[j];
     Scalar t2(0);
     // TODO make sure this product is a real * complex and that the rhs is properly conjugated if needed
-    res[j] += cjd.pmul(internal::real(A0[j]), t1);
+    res[j] += cjd.pmul(numext::real(A0[j]), t1);
     for (Index i=FirstTriangular ? 0 : j+1; i<(FirstTriangular ? j : size); i++)
     {
       res[i] += cj0.pmul(A0[i], t1);
@@ -153,7 +161,6 @@ static EIGEN_DONT_INLINE void run(
     res[j] += alpha * t2;
   }
 }
-};
 
 } // end namespace internal 
 
@@ -180,7 +187,7 @@ struct SelfadjointProductMatrix<Lhs,LhsMode,false,Rhs,0,true>
 
   SelfadjointProductMatrix(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs) {}
 
-  template<typename Dest> void scaleAndAddTo(Dest& dest, Scalar alpha) const
+  template<typename Dest> void scaleAndAddTo(Dest& dest, const Scalar& alpha) const
   {
     typedef typename Dest::Scalar ResScalar;
     typedef typename Base::RhsScalar RhsScalar;
@@ -260,7 +267,7 @@ struct SelfadjointProductMatrix<Lhs,0,true,Rhs,RhsMode,false>
 
   SelfadjointProductMatrix(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs) {}
 
-  template<typename Dest> void scaleAndAddTo(Dest& dest, Scalar alpha) const
+  template<typename Dest> void scaleAndAddTo(Dest& dest, const Scalar& alpha) const
   {
     // let's simply transpose the product
     Transpose<Dest> destT(dest);
diff --git a/ThirdParty/eigen3/Eigen/src/Core/products/SelfadjointMatrixVector_MKL.h b/ThirdParty/eigen3/Eigen/src/Core/products/SelfadjointMatrixVector_MKL.h
index f88d483b653..86684b66d94 100644
--- a/ThirdParty/eigen3/Eigen/src/Core/products/SelfadjointMatrixVector_MKL.h
+++ b/ThirdParty/eigen3/Eigen/src/Core/products/SelfadjointMatrixVector_MKL.h
@@ -50,7 +50,7 @@ struct selfadjoint_matrix_vector_product_symv :
 #define EIGEN_MKL_SYMV_SPECIALIZE(Scalar) \
 template<typename Index, int StorageOrder, int UpLo, bool ConjugateLhs, bool ConjugateRhs> \
 struct selfadjoint_matrix_vector_product<Scalar,Index,StorageOrder,UpLo,ConjugateLhs,ConjugateRhs,Specialized> { \
-static EIGEN_DONT_INLINE void run( \
+static void run( \
   Index size, const Scalar*  lhs, Index lhsStride, \
   const Scalar* _rhs, Index rhsIncr, Scalar* res, Scalar alpha) { \
     enum {\
@@ -77,7 +77,7 @@ struct selfadjoint_matrix_vector_product_symv<EIGTYPE,Index,StorageOrder,UpLo,Co
 { \
 typedef Matrix<EIGTYPE,Dynamic,1,ColMajor> SYMVVector;\
 \
-static EIGEN_DONT_INLINE void run( \
+static void run( \
 Index size, const EIGTYPE*  lhs, Index lhsStride, \
 const EIGTYPE* _rhs, Index rhsIncr, EIGTYPE* res, EIGTYPE alpha) \
 { \
diff --git a/ThirdParty/eigen3/Eigen/src/Core/products/SelfadjointProduct.h b/ThirdParty/eigen3/Eigen/src/Core/products/SelfadjointProduct.h
index 6a55f3d7715..6ca4ae6c0fe 100644
--- a/ThirdParty/eigen3/Eigen/src/Core/products/SelfadjointProduct.h
+++ b/ThirdParty/eigen3/Eigen/src/Core/products/SelfadjointProduct.h
@@ -18,21 +18,19 @@
 
 namespace Eigen { 
 
-template<typename Scalar, typename Index, int StorageOrder, int UpLo, bool ConjLhs, bool ConjRhs>
-struct selfadjoint_rank1_update;
 
 template<typename Scalar, typename Index, int UpLo, bool ConjLhs, bool ConjRhs>
 struct selfadjoint_rank1_update<Scalar,Index,ColMajor,UpLo,ConjLhs,ConjRhs>
 {
-  static void run(Index size, Scalar* mat, Index stride, const Scalar* vec, Scalar alpha)
+  static void run(Index size, Scalar* mat, Index stride, const Scalar* vecX, const Scalar* vecY, const Scalar& alpha)
   {
     internal::conj_if<ConjRhs> cj;
     typedef Map<const Matrix<Scalar,Dynamic,1> > OtherMap;
-    typedef typename internal::conditional<ConjLhs,typename OtherMap::ConjugateReturnType,const OtherMap&>::type ConjRhsType;
+    typedef typename internal::conditional<ConjLhs,typename OtherMap::ConjugateReturnType,const OtherMap&>::type ConjLhsType;
     for (Index i=0; i<size; ++i)
     {
       Map<Matrix<Scalar,Dynamic,1> >(mat+stride*i+(UpLo==Lower ? i : 0), (UpLo==Lower ? size-i : (i+1)))
-          += (alpha * cj(vec[i])) * ConjRhsType(OtherMap(vec+(UpLo==Lower ? i : 0),UpLo==Lower ? size-i : (i+1)));
+          += (alpha * cj(vecY[i])) * ConjLhsType(OtherMap(vecX+(UpLo==Lower ? i : 0),UpLo==Lower ? size-i : (i+1)));
     }
   }
 };
@@ -40,9 +38,9 @@ struct selfadjoint_rank1_update<Scalar,Index,ColMajor,UpLo,ConjLhs,ConjRhs>
 template<typename Scalar, typename Index, int UpLo, bool ConjLhs, bool ConjRhs>
 struct selfadjoint_rank1_update<Scalar,Index,RowMajor,UpLo,ConjLhs,ConjRhs>
 {
-  static void run(Index size, Scalar* mat, Index stride, const Scalar* vec, Scalar alpha)
+  static void run(Index size, Scalar* mat, Index stride, const Scalar* vecX, const Scalar* vecY, const Scalar& alpha)
   {
-    selfadjoint_rank1_update<Scalar,Index,ColMajor,UpLo==Lower?Upper:Lower,ConjRhs,ConjLhs>::run(size,mat,stride,vec,alpha);
+    selfadjoint_rank1_update<Scalar,Index,ColMajor,UpLo==Lower?Upper:Lower,ConjRhs,ConjLhs>::run(size,mat,stride,vecY,vecX,alpha);
   }
 };
 
@@ -52,7 +50,7 @@ struct selfadjoint_product_selector;
 template<typename MatrixType, typename OtherType, int UpLo>
 struct selfadjoint_product_selector<MatrixType,OtherType,UpLo,true>
 {
-  static void run(MatrixType& mat, const OtherType& other, typename MatrixType::Scalar alpha)
+  static void run(MatrixType& mat, const OtherType& other, const typename MatrixType::Scalar& alpha)
   {
     typedef typename MatrixType::Scalar Scalar;
     typedef typename MatrixType::Index Index;
@@ -78,14 +76,14 @@ struct selfadjoint_product_selector<MatrixType,OtherType,UpLo,true>
     selfadjoint_rank1_update<Scalar,Index,StorageOrder,UpLo,
                               OtherBlasTraits::NeedToConjugate  && NumTraits<Scalar>::IsComplex,
                             (!OtherBlasTraits::NeedToConjugate) && NumTraits<Scalar>::IsComplex>
-          ::run(other.size(), mat.data(), mat.outerStride(), actualOtherPtr, actualAlpha);
+          ::run(other.size(), mat.data(), mat.outerStride(), actualOtherPtr, actualOtherPtr, actualAlpha);
   }
 };
 
 template<typename MatrixType, typename OtherType, int UpLo>
 struct selfadjoint_product_selector<MatrixType,OtherType,UpLo,false>
 {
-  static void run(MatrixType& mat, const OtherType& other, typename MatrixType::Scalar alpha)
+  static void run(MatrixType& mat, const OtherType& other, const typename MatrixType::Scalar& alpha)
   {
     typedef typename MatrixType::Scalar Scalar;
     typedef typename MatrixType::Index Index;
@@ -113,7 +111,7 @@ struct selfadjoint_product_selector<MatrixType,OtherType,UpLo,false>
 template<typename MatrixType, unsigned int UpLo>
 template<typename DerivedU>
 SelfAdjointView<MatrixType,UpLo>& SelfAdjointView<MatrixType,UpLo>
-::rankUpdate(const MatrixBase<DerivedU>& u, Scalar alpha)
+::rankUpdate(const MatrixBase<DerivedU>& u, const Scalar& alpha)
 {
   selfadjoint_product_selector<MatrixType,DerivedU,UpLo>::run(_expression().const_cast_derived(), u.derived(), alpha);
 
diff --git a/ThirdParty/eigen3/Eigen/src/Core/products/SelfadjointRank2Update.h b/ThirdParty/eigen3/Eigen/src/Core/products/SelfadjointRank2Update.h
index 57a98cc2de9..8594a97cea3 100644
--- a/ThirdParty/eigen3/Eigen/src/Core/products/SelfadjointRank2Update.h
+++ b/ThirdParty/eigen3/Eigen/src/Core/products/SelfadjointRank2Update.h
@@ -24,14 +24,14 @@ struct selfadjoint_rank2_update_selector;
 template<typename Scalar, typename Index, typename UType, typename VType>
 struct selfadjoint_rank2_update_selector<Scalar,Index,UType,VType,Lower>
 {
-  static void run(Scalar* mat, Index stride, const UType& u, const VType& v, Scalar alpha)
+  static void run(Scalar* mat, Index stride, const UType& u, const VType& v, const Scalar& alpha)
   {
     const Index size = u.size();
     for (Index i=0; i<size; ++i)
     {
       Map<Matrix<Scalar,Dynamic,1> >(mat+stride*i+i, size-i) +=
-                        (conj(alpha)  * conj(u.coeff(i))) * v.tail(size-i)
-                      + (alpha * conj(v.coeff(i))) * u.tail(size-i);
+                        (numext::conj(alpha) * numext::conj(u.coeff(i))) * v.tail(size-i)
+                      + (alpha * numext::conj(v.coeff(i))) * u.tail(size-i);
     }
   }
 };
@@ -39,13 +39,13 @@ struct selfadjoint_rank2_update_selector<Scalar,Index,UType,VType,Lower>
 template<typename Scalar, typename Index, typename UType, typename VType>
 struct selfadjoint_rank2_update_selector<Scalar,Index,UType,VType,Upper>
 {
-  static void run(Scalar* mat, Index stride, const UType& u, const VType& v, Scalar alpha)
+  static void run(Scalar* mat, Index stride, const UType& u, const VType& v, const Scalar& alpha)
   {
     const Index size = u.size();
     for (Index i=0; i<size; ++i)
       Map<Matrix<Scalar,Dynamic,1> >(mat+stride*i, i+1) +=
-                        (conj(alpha)  * conj(u.coeff(i))) * v.head(i+1)
-                      + (alpha * conj(v.coeff(i))) * u.head(i+1);
+                        (numext::conj(alpha)  * numext::conj(u.coeff(i))) * v.head(i+1)
+                      + (alpha * numext::conj(v.coeff(i))) * u.head(i+1);
   }
 };
 
@@ -58,7 +58,7 @@ template<bool Cond, typename T> struct conj_expr_if
 template<typename MatrixType, unsigned int UpLo>
 template<typename DerivedU, typename DerivedV>
 SelfAdjointView<MatrixType,UpLo>& SelfAdjointView<MatrixType,UpLo>
-::rankUpdate(const MatrixBase<DerivedU>& u, const MatrixBase<DerivedV>& v, Scalar alpha)
+::rankUpdate(const MatrixBase<DerivedU>& u, const MatrixBase<DerivedV>& v, const Scalar& alpha)
 {
   typedef internal::blas_traits<DerivedU> UBlasTraits;
   typedef typename UBlasTraits::DirectLinearAccessType ActualUType;
@@ -75,9 +75,9 @@ SelfAdjointView<MatrixType,UpLo>& SelfAdjointView<MatrixType,UpLo>
 
   enum { IsRowMajor = (internal::traits<MatrixType>::Flags&RowMajorBit) ? 1 : 0 };
   Scalar actualAlpha = alpha * UBlasTraits::extractScalarFactor(u.derived())
-                             * internal::conj(VBlasTraits::extractScalarFactor(v.derived()));
+                             * numext::conj(VBlasTraits::extractScalarFactor(v.derived()));
   if (IsRowMajor)
-    actualAlpha = internal::conj(actualAlpha);
+    actualAlpha = numext::conj(actualAlpha);
 
   internal::selfadjoint_rank2_update_selector<Scalar, Index,
     typename internal::remove_all<typename internal::conj_expr_if<IsRowMajor ^ UBlasTraits::NeedToConjugate,_ActualUType>::type>::type,
diff --git a/ThirdParty/eigen3/Eigen/src/Core/products/TriangularMatrixMatrix.h b/ThirdParty/eigen3/Eigen/src/Core/products/TriangularMatrixMatrix.h
index 92cba66f615..8110507b50b 100644
--- a/ThirdParty/eigen3/Eigen/src/Core/products/TriangularMatrixMatrix.h
+++ b/ThirdParty/eigen3/Eigen/src/Core/products/TriangularMatrixMatrix.h
@@ -61,7 +61,7 @@ struct product_triangular_matrix_matrix<Scalar,Index,Mode,LhsIsTriangular,
     const Scalar* lhs, Index lhsStride,
     const Scalar* rhs, Index rhsStride,
     Scalar* res,       Index resStride,
-    Scalar alpha, level3_blocking<Scalar,Scalar>& blocking)
+    const Scalar& alpha, level3_blocking<Scalar,Scalar>& blocking)
   {
     product_triangular_matrix_matrix<Scalar, Index,
       (Mode&(UnitDiag|ZeroDiag)) | ((Mode&Upper) ? Lower : Upper),
@@ -96,7 +96,20 @@ struct product_triangular_matrix_matrix<Scalar,Index,Mode,true,
     const Scalar* _lhs, Index lhsStride,
     const Scalar* _rhs, Index rhsStride,
     Scalar* res,        Index resStride,
-    Scalar alpha, level3_blocking<Scalar,Scalar>& blocking)
+    const Scalar& alpha, level3_blocking<Scalar,Scalar>& blocking);
+};
+
+template <typename Scalar, typename Index, int Mode,
+          int LhsStorageOrder, bool ConjugateLhs,
+          int RhsStorageOrder, bool ConjugateRhs, int Version>
+EIGEN_DONT_INLINE void product_triangular_matrix_matrix<Scalar,Index,Mode,true,
+                                                        LhsStorageOrder,ConjugateLhs,
+                                                        RhsStorageOrder,ConjugateRhs,ColMajor,Version>::run(
+    Index _rows, Index _cols, Index _depth,
+    const Scalar* _lhs, Index lhsStride,
+    const Scalar* _rhs, Index rhsStride,
+    Scalar* res,        Index resStride,
+    const Scalar& alpha, level3_blocking<Scalar,Scalar>& blocking)
   {
     // strip zeros
     Index diagSize  = (std::min)(_rows,_depth);
@@ -203,15 +216,14 @@ struct product_triangular_matrix_matrix<Scalar,Index,Mode,true,
       }
     }
   }
-};
 
 // implements col-major += alpha * op(general) * op(triangular)
 template <typename Scalar, typename Index, int Mode,
           int LhsStorageOrder, bool ConjugateLhs,
           int RhsStorageOrder, bool ConjugateRhs, int Version>
 struct product_triangular_matrix_matrix<Scalar,Index,Mode,false,
-                                           LhsStorageOrder,ConjugateLhs,
-                                           RhsStorageOrder,ConjugateRhs,ColMajor,Version>
+                                        LhsStorageOrder,ConjugateLhs,
+                                        RhsStorageOrder,ConjugateRhs,ColMajor,Version>
 {
   typedef gebp_traits<Scalar,Scalar> Traits;
   enum {
@@ -225,7 +237,20 @@ struct product_triangular_matrix_matrix<Scalar,Index,Mode,false,
     const Scalar* _lhs, Index lhsStride,
     const Scalar* _rhs, Index rhsStride,
     Scalar* res,        Index resStride,
-    Scalar alpha, level3_blocking<Scalar,Scalar>& blocking)
+    const Scalar& alpha, level3_blocking<Scalar,Scalar>& blocking);
+};
+
+template <typename Scalar, typename Index, int Mode,
+          int LhsStorageOrder, bool ConjugateLhs,
+          int RhsStorageOrder, bool ConjugateRhs, int Version>
+EIGEN_DONT_INLINE void product_triangular_matrix_matrix<Scalar,Index,Mode,false,
+                                                        LhsStorageOrder,ConjugateLhs,
+                                                        RhsStorageOrder,ConjugateRhs,ColMajor,Version>::run(
+    Index _rows, Index _cols, Index _depth,
+    const Scalar* _lhs, Index lhsStride,
+    const Scalar* _rhs, Index rhsStride,
+    Scalar* res,        Index resStride,
+    const Scalar& alpha, level3_blocking<Scalar,Scalar>& blocking)
   {
     // strip zeros
     Index diagSize  = (std::min)(_cols,_depth);
@@ -343,7 +368,6 @@ struct product_triangular_matrix_matrix<Scalar,Index,Mode,false,
       }
     }
   }
-};
 
 /***************************************************************************
 * Wrapper to product_triangular_matrix_matrix
@@ -364,7 +388,7 @@ struct TriangularProduct<Mode,LhsIsTriangular,Lhs,false,Rhs,false>
 
   TriangularProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs) {}
 
-  template<typename Dest> void scaleAndAddTo(Dest& dst, Scalar alpha) const
+  template<typename Dest> void scaleAndAddTo(Dest& dst, const Scalar& alpha) const
   {
     typename internal::add_const_on_value_type<ActualLhsType>::type lhs = LhsBlasTraits::extract(m_lhs);
     typename internal::add_const_on_value_type<ActualRhsType>::type rhs = RhsBlasTraits::extract(m_rhs);
diff --git a/ThirdParty/eigen3/Eigen/src/Core/products/TriangularMatrixMatrix_MKL.h b/ThirdParty/eigen3/Eigen/src/Core/products/TriangularMatrixMatrix_MKL.h
index 4d20de61763..ba41a1c99f6 100644
--- a/ThirdParty/eigen3/Eigen/src/Core/products/TriangularMatrixMatrix_MKL.h
+++ b/ThirdParty/eigen3/Eigen/src/Core/products/TriangularMatrixMatrix_MKL.h
@@ -91,7 +91,7 @@ struct product_triangular_matrix_matrix_trmm<EIGTYPE,Index,Mode,true, \
     conjA = ((LhsStorageOrder==ColMajor) && ConjugateLhs) ? 1 : 0 \
   }; \
 \
-  static EIGEN_DONT_INLINE void run( \
+  static void run( \
     Index _rows, Index _cols, Index _depth, \
     const EIGTYPE* _lhs, Index lhsStride, \
     const EIGTYPE* _rhs, Index rhsStride, \
@@ -205,7 +205,7 @@ struct product_triangular_matrix_matrix_trmm<EIGTYPE,Index,Mode,false, \
     conjA = ((RhsStorageOrder==ColMajor) && ConjugateRhs) ? 1 : 0 \
   }; \
 \
-  static EIGEN_DONT_INLINE void run( \
+  static void run( \
     Index _rows, Index _cols, Index _depth, \
     const EIGTYPE* _lhs, Index lhsStride, \
     const EIGTYPE* _rhs, Index rhsStride, \
diff --git a/ThirdParty/eigen3/Eigen/src/Core/products/TriangularMatrixVector.h b/ThirdParty/eigen3/Eigen/src/Core/products/TriangularMatrixVector.h
index b1c10c201c5..6117d5a8262 100644
--- a/ThirdParty/eigen3/Eigen/src/Core/products/TriangularMatrixVector.h
+++ b/ThirdParty/eigen3/Eigen/src/Core/products/TriangularMatrixVector.h
@@ -27,7 +27,13 @@ struct triangular_matrix_vector_product<Index,Mode,LhsScalar,ConjLhs,RhsScalar,C
     HasZeroDiag = (Mode & ZeroDiag)==ZeroDiag
   };
   static EIGEN_DONT_INLINE  void run(Index _rows, Index _cols, const LhsScalar* _lhs, Index lhsStride,
-                                     const RhsScalar* _rhs, Index rhsIncr, ResScalar* _res, Index resIncr, ResScalar alpha)
+                                     const RhsScalar* _rhs, Index rhsIncr, ResScalar* _res, Index resIncr, const ResScalar& alpha);
+};
+
+template<typename Index, int Mode, typename LhsScalar, bool ConjLhs, typename RhsScalar, bool ConjRhs, int Version>
+EIGEN_DONT_INLINE void triangular_matrix_vector_product<Index,Mode,LhsScalar,ConjLhs,RhsScalar,ConjRhs,ColMajor,Version>
+  ::run(Index _rows, Index _cols, const LhsScalar* _lhs, Index lhsStride,
+        const RhsScalar* _rhs, Index rhsIncr, ResScalar* _res, Index resIncr, const ResScalar& alpha)
   {
     static const Index PanelWidth = EIGEN_TUNE_TRIANGULAR_PANEL_WIDTH;
     Index size = (std::min)(_rows,_cols);
@@ -78,7 +84,6 @@ struct triangular_matrix_vector_product<Index,Mode,LhsScalar,ConjLhs,RhsScalar,C
           _res, resIncr, alpha);
     }
   }
-};
 
 template<typename Index, int Mode, typename LhsScalar, bool ConjLhs, typename RhsScalar, bool ConjRhs,int Version>
 struct triangular_matrix_vector_product<Index,Mode,LhsScalar,ConjLhs,RhsScalar,ConjRhs,RowMajor,Version>
@@ -89,8 +94,14 @@ struct triangular_matrix_vector_product<Index,Mode,LhsScalar,ConjLhs,RhsScalar,C
     HasUnitDiag = (Mode & UnitDiag)==UnitDiag,
     HasZeroDiag = (Mode & ZeroDiag)==ZeroDiag
   };
-  static void run(Index _rows, Index _cols, const LhsScalar* _lhs, Index lhsStride,
-                  const RhsScalar* _rhs, Index rhsIncr, ResScalar* _res, Index resIncr, ResScalar alpha)
+  static EIGEN_DONT_INLINE void run(Index _rows, Index _cols, const LhsScalar* _lhs, Index lhsStride,
+                                    const RhsScalar* _rhs, Index rhsIncr, ResScalar* _res, Index resIncr, const ResScalar& alpha);
+};
+
+template<typename Index, int Mode, typename LhsScalar, bool ConjLhs, typename RhsScalar, bool ConjRhs,int Version>
+EIGEN_DONT_INLINE void triangular_matrix_vector_product<Index,Mode,LhsScalar,ConjLhs,RhsScalar,ConjRhs,RowMajor,Version>
+  ::run(Index _rows, Index _cols, const LhsScalar* _lhs, Index lhsStride,
+        const RhsScalar* _rhs, Index rhsIncr, ResScalar* _res, Index resIncr, const ResScalar& alpha)
   {
     static const Index PanelWidth = EIGEN_TUNE_TRIANGULAR_PANEL_WIDTH;
     Index diagSize = (std::min)(_rows,_cols);
@@ -141,7 +152,6 @@ struct triangular_matrix_vector_product<Index,Mode,LhsScalar,ConjLhs,RhsScalar,C
             &res.coeffRef(diagSize), resIncr, alpha);
     }
   }
-};
 
 /***************************************************************************
 * Wrapper to product_triangular_vector
@@ -171,7 +181,7 @@ struct TriangularProduct<Mode,true,Lhs,false,Rhs,true>
 
   TriangularProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs) {}
 
-  template<typename Dest> void scaleAndAddTo(Dest& dst, Scalar alpha) const
+  template<typename Dest> void scaleAndAddTo(Dest& dst, const Scalar& alpha) const
   {
     eigen_assert(dst.rows()==m_lhs.rows() && dst.cols()==m_rhs.cols());
   
@@ -187,7 +197,7 @@ struct TriangularProduct<Mode,false,Lhs,true,Rhs,false>
 
   TriangularProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs) {}
 
-  template<typename Dest> void scaleAndAddTo(Dest& dst, Scalar alpha) const
+  template<typename Dest> void scaleAndAddTo(Dest& dst, const Scalar& alpha) const
   {
     eigen_assert(dst.rows()==m_lhs.rows() && dst.cols()==m_rhs.cols());
 
@@ -205,7 +215,7 @@ namespace internal {
 template<> struct trmv_selector<ColMajor>
 {
   template<int Mode, typename Lhs, typename Rhs, typename Dest>
-  static void run(const TriangularProduct<Mode,true,Lhs,false,Rhs,true>& prod, Dest& dest, typename TriangularProduct<Mode,true,Lhs,false,Rhs,true>::Scalar alpha)
+  static void run(const TriangularProduct<Mode,true,Lhs,false,Rhs,true>& prod, Dest& dest, const typename TriangularProduct<Mode,true,Lhs,false,Rhs,true>::Scalar& alpha)
   {
     typedef TriangularProduct<Mode,true,Lhs,false,Rhs,true> ProductType;
     typedef typename ProductType::Index Index;
@@ -235,7 +245,7 @@ template<> struct trmv_selector<ColMajor>
 
     gemv_static_vector_if<ResScalar,Dest::SizeAtCompileTime,Dest::MaxSizeAtCompileTime,MightCannotUseDest> static_dest;
 
-    bool alphaIsCompatible = (!ComplexByReal) || (imag(actualAlpha)==RealScalar(0));
+    bool alphaIsCompatible = (!ComplexByReal) || (numext::imag(actualAlpha)==RealScalar(0));
     bool evalToDest = EvalToDestAtCompileTime && alphaIsCompatible;
     
     RhsScalar compatibleAlpha = get_factor<ResScalar,RhsScalar>::run(actualAlpha);
@@ -246,7 +256,7 @@ template<> struct trmv_selector<ColMajor>
     if(!evalToDest)
     {
       #ifdef EIGEN_DENSE_STORAGE_CTOR_PLUGIN
-      int size = dest.size();
+      Index size = dest.size();
       EIGEN_DENSE_STORAGE_CTOR_PLUGIN
       #endif
       if(!alphaIsCompatible)
@@ -281,7 +291,7 @@ template<> struct trmv_selector<ColMajor>
 template<> struct trmv_selector<RowMajor>
 {
   template<int Mode, typename Lhs, typename Rhs, typename Dest>
-  static void run(const TriangularProduct<Mode,true,Lhs,false,Rhs,true>& prod, Dest& dest, typename TriangularProduct<Mode,true,Lhs,false,Rhs,true>::Scalar alpha)
+  static void run(const TriangularProduct<Mode,true,Lhs,false,Rhs,true>& prod, Dest& dest, const typename TriangularProduct<Mode,true,Lhs,false,Rhs,true>::Scalar& alpha)
   {
     typedef TriangularProduct<Mode,true,Lhs,false,Rhs,true> ProductType;
     typedef typename ProductType::LhsScalar LhsScalar;
diff --git a/ThirdParty/eigen3/Eigen/src/Core/products/TriangularMatrixVector_MKL.h b/ThirdParty/eigen3/Eigen/src/Core/products/TriangularMatrixVector_MKL.h
index 3c2c3049aa3..09f110da712 100644
--- a/ThirdParty/eigen3/Eigen/src/Core/products/TriangularMatrixVector_MKL.h
+++ b/ThirdParty/eigen3/Eigen/src/Core/products/TriangularMatrixVector_MKL.h
@@ -50,7 +50,7 @@ struct triangular_matrix_vector_product_trmv :
 #define EIGEN_MKL_TRMV_SPECIALIZE(Scalar) \
 template<typename Index, int Mode, bool ConjLhs, bool ConjRhs> \
 struct triangular_matrix_vector_product<Index,Mode,Scalar,ConjLhs,Scalar,ConjRhs,ColMajor,Specialized> { \
- static EIGEN_DONT_INLINE void run(Index _rows, Index _cols, const Scalar* _lhs, Index lhsStride, \
+ static void run(Index _rows, Index _cols, const Scalar* _lhs, Index lhsStride, \
                                      const Scalar* _rhs, Index rhsIncr, Scalar* _res, Index resIncr, Scalar alpha) { \
       triangular_matrix_vector_product_trmv<Index,Mode,Scalar,ConjLhs,Scalar,ConjRhs,ColMajor>::run( \
         _rows, _cols, _lhs, lhsStride, _rhs, rhsIncr, _res, resIncr, alpha); \
@@ -58,7 +58,7 @@ struct triangular_matrix_vector_product<Index,Mode,Scalar,ConjLhs,Scalar,ConjRhs
 }; \
 template<typename Index, int Mode, bool ConjLhs, bool ConjRhs> \
 struct triangular_matrix_vector_product<Index,Mode,Scalar,ConjLhs,Scalar,ConjRhs,RowMajor,Specialized> { \
- static EIGEN_DONT_INLINE void run(Index _rows, Index _cols, const Scalar* _lhs, Index lhsStride, \
+ static void run(Index _rows, Index _cols, const Scalar* _lhs, Index lhsStride, \
                                      const Scalar* _rhs, Index rhsIncr, Scalar* _res, Index resIncr, Scalar alpha) { \
       triangular_matrix_vector_product_trmv<Index,Mode,Scalar,ConjLhs,Scalar,ConjRhs,RowMajor>::run( \
         _rows, _cols, _lhs, lhsStride, _rhs, rhsIncr, _res, resIncr, alpha); \
@@ -81,8 +81,8 @@ struct triangular_matrix_vector_product_trmv<Index,Mode,EIGTYPE,ConjLhs,EIGTYPE,
     IsZeroDiag  = (Mode&ZeroDiag) ? 1 : 0, \
     LowUp = IsLower ? Lower : Upper \
   }; \
- static EIGEN_DONT_INLINE void run(Index _rows, Index _cols, const EIGTYPE* _lhs, Index lhsStride, \
-                             const EIGTYPE* _rhs, Index rhsIncr, EIGTYPE* _res, Index resIncr, EIGTYPE alpha) \
+ static void run(Index _rows, Index _cols, const EIGTYPE* _lhs, Index lhsStride, \
+                 const EIGTYPE* _rhs, Index rhsIncr, EIGTYPE* _res, Index resIncr, EIGTYPE alpha) \
  { \
    if (ConjLhs || IsZeroDiag) { \
      triangular_matrix_vector_product<Index,Mode,EIGTYPE,ConjLhs,EIGTYPE,ConjRhs,ColMajor,BuiltIn>::run( \
@@ -166,8 +166,8 @@ struct triangular_matrix_vector_product_trmv<Index,Mode,EIGTYPE,ConjLhs,EIGTYPE,
     IsZeroDiag  = (Mode&ZeroDiag) ? 1 : 0, \
     LowUp = IsLower ? Lower : Upper \
   }; \
- static EIGEN_DONT_INLINE void run(Index _rows, Index _cols, const EIGTYPE* _lhs, Index lhsStride, \
-                             const EIGTYPE* _rhs, Index rhsIncr, EIGTYPE* _res, Index resIncr, EIGTYPE alpha) \
+ static void run(Index _rows, Index _cols, const EIGTYPE* _lhs, Index lhsStride, \
+                 const EIGTYPE* _rhs, Index rhsIncr, EIGTYPE* _res, Index resIncr, EIGTYPE alpha) \
  { \
    if (IsZeroDiag) { \
      triangular_matrix_vector_product<Index,Mode,EIGTYPE,ConjLhs,EIGTYPE,ConjRhs,RowMajor,BuiltIn>::run( \
diff --git a/ThirdParty/eigen3/Eigen/src/Core/products/TriangularSolverMatrix.h b/ThirdParty/eigen3/Eigen/src/Core/products/TriangularSolverMatrix.h
index a49ea318345..f103eae72c0 100644
--- a/ThirdParty/eigen3/Eigen/src/Core/products/TriangularSolverMatrix.h
+++ b/ThirdParty/eigen3/Eigen/src/Core/products/TriangularSolverMatrix.h
@@ -18,7 +18,7 @@ namespace internal {
 template <typename Scalar, typename Index, int Side, int Mode, bool Conjugate, int TriStorageOrder>
 struct triangular_solve_matrix<Scalar,Index,Side,Mode,Conjugate,TriStorageOrder,RowMajor>
 {
-  static EIGEN_DONT_INLINE void run(
+  static void run(
     Index size, Index cols,
     const Scalar*  tri, Index triStride,
     Scalar* _other, Index otherStride,
@@ -39,6 +39,13 @@ template <typename Scalar, typename Index, int Mode, bool Conjugate, int TriStor
 struct triangular_solve_matrix<Scalar,Index,OnTheLeft,Mode,Conjugate,TriStorageOrder,ColMajor>
 {
   static EIGEN_DONT_INLINE void run(
+    Index size, Index otherSize,
+    const Scalar* _tri, Index triStride,
+    Scalar* _other, Index otherStride,
+    level3_blocking<Scalar,Scalar>& blocking);
+};
+template <typename Scalar, typename Index, int Mode, bool Conjugate, int TriStorageOrder>
+EIGEN_DONT_INLINE void triangular_solve_matrix<Scalar,Index,OnTheLeft,Mode,Conjugate,TriStorageOrder,ColMajor>::run(
     Index size, Index otherSize,
     const Scalar* _tri, Index triStride,
     Scalar* _other, Index otherStride,
@@ -173,7 +180,6 @@ struct triangular_solve_matrix<Scalar,Index,OnTheLeft,Mode,Conjugate,TriStorageO
       }
     }
   }
-};
 
 /* Optimized triangular solver with multiple left hand sides and the trinagular matrix on the right
  */
@@ -181,6 +187,13 @@ template <typename Scalar, typename Index, int Mode, bool Conjugate, int TriStor
 struct triangular_solve_matrix<Scalar,Index,OnTheRight,Mode,Conjugate,TriStorageOrder,ColMajor>
 {
   static EIGEN_DONT_INLINE void run(
+    Index size, Index otherSize,
+    const Scalar* _tri, Index triStride,
+    Scalar* _other, Index otherStride,
+    level3_blocking<Scalar,Scalar>& blocking);
+};
+template <typename Scalar, typename Index, int Mode, bool Conjugate, int TriStorageOrder>
+EIGEN_DONT_INLINE void triangular_solve_matrix<Scalar,Index,OnTheRight,Mode,Conjugate,TriStorageOrder,ColMajor>::run(
     Index size, Index otherSize,
     const Scalar* _tri, Index triStride,
     Scalar* _other, Index otherStride,
@@ -308,7 +321,6 @@ struct triangular_solve_matrix<Scalar,Index,OnTheRight,Mode,Conjugate,TriStorage
       }
     }
   }
-};
 
 } // end namespace internal
 
diff --git a/ThirdParty/eigen3/Eigen/src/Core/products/TriangularSolverMatrix_MKL.h b/ThirdParty/eigen3/Eigen/src/Core/products/TriangularSolverMatrix_MKL.h
index a4f508b2e83..6a0bb833931 100644
--- a/ThirdParty/eigen3/Eigen/src/Core/products/TriangularSolverMatrix_MKL.h
+++ b/ThirdParty/eigen3/Eigen/src/Core/products/TriangularSolverMatrix_MKL.h
@@ -48,7 +48,7 @@ struct triangular_solve_matrix<EIGTYPE,Index,OnTheLeft,Mode,Conjugate,TriStorage
     IsZeroDiag  = (Mode&ZeroDiag) ? 1 : 0, \
     conjA = ((TriStorageOrder==ColMajor) && Conjugate) ? 1 : 0 \
   }; \
-  static EIGEN_DONT_INLINE void run( \
+  static void run( \
       Index size, Index otherSize, \
       const EIGTYPE* _tri, Index triStride, \
       EIGTYPE* _other, Index otherStride, level3_blocking<EIGTYPE,EIGTYPE>& /*blocking*/) \
@@ -103,7 +103,7 @@ struct triangular_solve_matrix<EIGTYPE,Index,OnTheRight,Mode,Conjugate,TriStorag
     IsZeroDiag  = (Mode&ZeroDiag) ? 1 : 0, \
     conjA = ((TriStorageOrder==ColMajor) && Conjugate) ? 1 : 0 \
   }; \
-  static EIGEN_DONT_INLINE void run( \
+  static void run( \
       Index size, Index otherSize, \
       const EIGTYPE* _tri, Index triStride, \
       EIGTYPE* _other, Index otherStride, level3_blocking<EIGTYPE,EIGTYPE>& /*blocking*/) \
diff --git a/ThirdParty/eigen3/Eigen/src/Core/util/ForwardDeclarations.h b/ThirdParty/eigen3/Eigen/src/Core/util/ForwardDeclarations.h
index bcdfe3914e3..d6a814586d3 100644
--- a/ThirdParty/eigen3/Eigen/src/Core/util/ForwardDeclarations.h
+++ b/ThirdParty/eigen3/Eigen/src/Core/util/ForwardDeclarations.h
@@ -78,8 +78,7 @@ template<typename ExpressionType> class NestByValue;
 template<typename ExpressionType> class ForceAlignedAccess;
 template<typename ExpressionType> class SwapWrapper;
 
-template<typename XprType, int BlockRows=Dynamic, int BlockCols=Dynamic, bool InnerPanel = false,
-         bool HasDirectAccess = internal::has_direct_access<XprType>::ret> class Block;
+template<typename XprType, int BlockRows=Dynamic, int BlockCols=Dynamic, bool InnerPanel = false> class Block;
 
 template<typename MatrixType, int Size=Dynamic> class VectorBlock;
 template<typename MatrixType> class Transpose;
@@ -154,7 +153,6 @@ template<typename LhsScalar, typename RhsScalar, bool ConjLhs=false, bool ConjRh
 template<typename Scalar> struct scalar_sum_op;
 template<typename Scalar> struct scalar_difference_op;
 template<typename LhsScalar,typename RhsScalar> struct scalar_conj_product_op;
-template<typename Scalar> struct scalar_quotient_op;
 template<typename Scalar> struct scalar_opposite_op;
 template<typename Scalar> struct scalar_conjugate_op;
 template<typename Scalar> struct scalar_real_op;
@@ -185,6 +183,7 @@ template<typename Scalar> struct scalar_identity_op;
 
 template<typename LhsScalar,typename RhsScalar=LhsScalar> struct scalar_product_op;
 template<typename LhsScalar,typename RhsScalar> struct scalar_multiple2_op;
+template<typename LhsScalar,typename RhsScalar=LhsScalar> struct scalar_quotient_op;
 
 } // end namespace internal
 
@@ -271,6 +270,8 @@ template<typename Derived> struct MatrixExponentialReturnValue;
 template<typename Derived> class MatrixFunctionReturnValue;
 template<typename Derived> class MatrixSquareRootReturnValue;
 template<typename Derived> class MatrixLogarithmReturnValue;
+template<typename Derived> class MatrixPowerReturnValue;
+template<typename Derived, typename Lhs, typename Rhs> class MatrixPowerProduct;
 
 namespace internal {
 template <typename Scalar>
diff --git a/ThirdParty/eigen3/Eigen/src/Core/util/Macros.h b/ThirdParty/eigen3/Eigen/src/Core/util/Macros.h
index cbbc2d8a821..a50f1f8c891 100644
--- a/ThirdParty/eigen3/Eigen/src/Core/util/Macros.h
+++ b/ThirdParty/eigen3/Eigen/src/Core/util/Macros.h
@@ -12,8 +12,8 @@
 #define EIGEN_MACROS_H
 
 #define EIGEN_WORLD_VERSION 3
-#define EIGEN_MAJOR_VERSION 1
-#define EIGEN_MINOR_VERSION 3
+#define EIGEN_MAJOR_VERSION 2
+#define EIGEN_MINOR_VERSION 0
 
 #define EIGEN_VERSION_AT_LEAST(x,y,z) (EIGEN_WORLD_VERSION>x || (EIGEN_WORLD_VERSION>=x && \
                                       (EIGEN_MAJOR_VERSION>y || (EIGEN_MAJOR_VERSION>=y && \
@@ -115,12 +115,6 @@
 #define EIGEN_MAKESTRING2(a) #a
 #define EIGEN_MAKESTRING(a) EIGEN_MAKESTRING2(a)
 
-#if EIGEN_GNUC_AT_LEAST(4,1) && !defined(__clang__) && !defined(__INTEL_COMPILER)
-#define EIGEN_FLATTEN_ATTRIB __attribute__((flatten))
-#else
-#define EIGEN_FLATTEN_ATTRIB
-#endif
-
 // EIGEN_STRONG_INLINE is a stronger version of the inline, using __forceinline on MSVC,
 // but it still doesn't use GCC's always_inline. This is useful in (common) situations where MSVC needs forceinline
 // but GCC is still doing fine with just inline.
@@ -151,6 +145,12 @@
 #define EIGEN_DONT_INLINE
 #endif
 
+#if (defined __GNUC__)
+#define EIGEN_PERMISSIVE_EXPR __extension__
+#else
+#define EIGEN_PERMISSIVE_EXPR
+#endif
+
 // this macro allows to get rid of linking errors about multiply defined functions.
 //  - static is not very good because it prevents definitions from different object files to be merged.
 //           So static causes the resulting linked executable to be bloated with multiple copies of the same function.
@@ -240,10 +240,12 @@
 // Suppresses 'unused variable' warnings.
 #define EIGEN_UNUSED_VARIABLE(var) (void)var;
 
-#if !defined(EIGEN_ASM_COMMENT) && (defined __GNUC__)
-#define EIGEN_ASM_COMMENT(X)  asm("#" X)
-#else
-#define EIGEN_ASM_COMMENT(X)
+#if !defined(EIGEN_ASM_COMMENT)
+  #if (defined __GNUC__) && ( defined(__i386__) || defined(__x86_64__) )
+    #define EIGEN_ASM_COMMENT(X)  asm("#" X)
+  #else
+    #define EIGEN_ASM_COMMENT(X)
+  #endif
 #endif
 
 /* EIGEN_ALIGN_TO_BOUNDARY(n) forces data to be n-byte aligned. This is used to satisfy SIMD requirements.
@@ -301,6 +303,12 @@
 #if defined(_MSC_VER) && (!defined(__INTEL_COMPILER))
 #define EIGEN_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Derived) \
   using Base::operator =;
+#elif defined(__clang__) // workaround clang bug (see http://forum.kde.org/viewtopic.php?f=74&t=102653)
+#define EIGEN_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Derived) \
+  using Base::operator =; \
+  EIGEN_STRONG_INLINE Derived& operator=(const Derived& other) { Base::operator=(other); return *this; } \
+  template <typename OtherDerived> \
+  EIGEN_STRONG_INLINE Derived& operator=(const DenseBase<OtherDerived>& other) { Base::operator=(other.derived()); return *this; }
 #else
 #define EIGEN_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Derived) \
   using Base::operator =; \
diff --git a/ThirdParty/eigen3/Eigen/src/Core/util/Memory.h b/ThirdParty/eigen3/Eigen/src/Core/util/Memory.h
index eda688b472e..451535a0c8c 100644
--- a/ThirdParty/eigen3/Eigen/src/Core/util/Memory.h
+++ b/ThirdParty/eigen3/Eigen/src/Core/util/Memory.h
@@ -19,6 +19,10 @@
 #ifndef EIGEN_MEMORY_H
 #define EIGEN_MEMORY_H
 
+#ifndef EIGEN_MALLOC_ALREADY_ALIGNED
+
+// Try to determine automatically if malloc is already aligned.
+
 // On 64-bit systems, glibc's malloc returns 16-byte-aligned pointers, see:
 //   http://www.gnu.org/s/libc/manual/html_node/Aligned-Memory-Blocks.html
 // This is true at least since glibc 2.8.
@@ -27,7 +31,7 @@
 // page 114, "[The] LP64 model [...] is used by all 64-bit UNIX ports" so it's indeed
 // quite safe, at least within the context of glibc, to equate 64-bit with LP64.
 #if defined(__GLIBC__) && ((__GLIBC__>=2 && __GLIBC_MINOR__ >= 8) || __GLIBC__>2) \
- && defined(__LP64__)
+ && defined(__LP64__) && ! defined( __SANITIZE_ADDRESS__ )
   #define EIGEN_GLIBC_MALLOC_ALREADY_ALIGNED 1
 #else
   #define EIGEN_GLIBC_MALLOC_ALREADY_ALIGNED 0
@@ -52,10 +56,19 @@
   #define EIGEN_MALLOC_ALREADY_ALIGNED 0
 #endif
 
-#if ((defined __QNXNTO__) || (defined _GNU_SOURCE) || ((defined _XOPEN_SOURCE) && (_XOPEN_SOURCE >= 600))) \
- && (defined _POSIX_ADVISORY_INFO) && (_POSIX_ADVISORY_INFO > 0)
-  #define EIGEN_HAS_POSIX_MEMALIGN 1
-#else
+#endif
+
+// See bug 554 (http://eigen.tuxfamily.org/bz/show_bug.cgi?id=554)
+// It seems to be unsafe to check _POSIX_ADVISORY_INFO without including unistd.h first.
+// Currently, let's include it only on unix systems:
+#if defined(__unix__) || defined(__unix)
+  #include <unistd.h>
+  #if ((defined __QNXNTO__) || (defined _GNU_SOURCE) || ((defined _XOPEN_SOURCE) && (_XOPEN_SOURCE >= 600))) && (defined _POSIX_ADVISORY_INFO) && (_POSIX_ADVISORY_INFO > 0)
+    #define EIGEN_HAS_POSIX_MEMALIGN 1
+  #endif
+#endif
+
+#ifndef EIGEN_HAS_POSIX_MEMALIGN
   #define EIGEN_HAS_POSIX_MEMALIGN 0
 #endif
 
@@ -209,7 +222,7 @@ inline void* aligned_malloc(size_t size)
     if(posix_memalign(&result, 16, size)) result = 0;
   #elif EIGEN_HAS_MM_MALLOC
     result = _mm_malloc(size, 16);
-#elif defined(_MSC_VER) && (!defined(_WIN32_WCE))
+  #elif defined(_MSC_VER) && (!defined(_WIN32_WCE))
     result = _aligned_malloc(size, 16);
   #else
     result = handmade_aligned_malloc(size);
@@ -451,7 +464,6 @@ template<typename T, bool Align> inline void conditional_aligned_delete_auto(T *
 template<typename Scalar, typename Index>
 static inline Index first_aligned(const Scalar* array, Index size)
 {
-  typedef typename packet_traits<Scalar>::type Packet;
   enum { PacketSize = packet_traits<Scalar>::size,
          PacketAlignedMask = PacketSize-1
   };
@@ -475,6 +487,13 @@ static inline Index first_aligned(const Scalar* array, Index size)
   }
 }
 
+/** \internal Returns the smallest integer multiple of \a base and greater or equal to \a size
+  */ 
+template<typename Index> 
+inline static Index first_multiple(Index size, Index base)
+{
+  return ((size+base-1)/base)*base;
+}
 
 // std::copy is much slower than memcpy, so let's introduce a smart_copy which
 // use memcpy on trivial types, i.e., on types that does not require an initialization ctor.
@@ -743,11 +762,16 @@ public:
 #    if defined(__PIC__) && defined(__i386__)
        // Case for x86 with PIC
 #      define EIGEN_CPUID(abcd,func,id) \
-         __asm__ __volatile__ ("xchgl %%ebx, %%esi;cpuid; xchgl %%ebx,%%esi": "=a" (abcd[0]), "=S" (abcd[1]), "=c" (abcd[2]), "=d" (abcd[3]) : "a" (func), "c" (id));
+         __asm__ __volatile__ ("xchgl %%ebx, %k1;cpuid; xchgl %%ebx,%k1": "=a" (abcd[0]), "=&r" (abcd[1]), "=c" (abcd[2]), "=d" (abcd[3]) : "a" (func), "c" (id));
+#    elif defined(__PIC__) && defined(__x86_64__)
+       // Case for x64 with PIC. In theory this is only a problem with recent gcc and with medium or large code model, not with the default small code model.
+       // However, we cannot detect which code model is used, and the xchg overhead is negligible anyway.
+#      define EIGEN_CPUID(abcd,func,id) \
+        __asm__ __volatile__ ("xchg{q}\t{%%}rbx, %q1; cpuid; xchg{q}\t{%%}rbx, %q1": "=a" (abcd[0]), "=&r" (abcd[1]), "=c" (abcd[2]), "=d" (abcd[3]) : "0" (func), "2" (id));
 #    else
        // Case for x86_64 or x86 w/o PIC
 #      define EIGEN_CPUID(abcd,func,id) \
-         __asm__ __volatile__ ("cpuid": "=a" (abcd[0]), "=b" (abcd[1]), "=c" (abcd[2]), "=d" (abcd[3]) : "a" (func), "c" (id) );
+         __asm__ __volatile__ ("cpuid": "=a" (abcd[0]), "=b" (abcd[1]), "=c" (abcd[2]), "=d" (abcd[3]) : "0" (func), "2" (id) );
 #    endif
 #  elif defined(_MSC_VER)
 #    if (_MSC_VER > 1500) && ( defined(_M_IX86) || defined(_M_X64) )
diff --git a/ThirdParty/eigen3/Eigen/src/Core/util/Meta.h b/ThirdParty/eigen3/Eigen/src/Core/util/Meta.h
index a5f31164d15..71d58710871 100644
--- a/ThirdParty/eigen3/Eigen/src/Core/util/Meta.h
+++ b/ThirdParty/eigen3/Eigen/src/Core/util/Meta.h
@@ -186,23 +186,35 @@ template<int Y, int InfX, int SupX>
 class meta_sqrt<Y, InfX, SupX, true> { public:  enum { ret = (SupX*SupX <= Y) ? SupX : InfX }; };
 
 /** \internal determines whether the product of two numeric types is allowed and what the return type is */
-template<typename T, typename U> struct scalar_product_traits;
+template<typename T, typename U> struct scalar_product_traits
+{
+  enum { Defined = 0 };
+};
 
 template<typename T> struct scalar_product_traits<T,T>
 {
-  //enum { Cost = NumTraits<T>::MulCost };
+  enum {
+    // Cost = NumTraits<T>::MulCost,
+    Defined = 1
+  };
   typedef T ReturnType;
 };
 
 template<typename T> struct scalar_product_traits<T,std::complex<T> >
 {
-  //enum { Cost = 2*NumTraits<T>::MulCost };
+  enum {
+    // Cost = 2*NumTraits<T>::MulCost,
+    Defined = 1
+  };
   typedef std::complex<T> ReturnType;
 };
 
 template<typename T> struct scalar_product_traits<std::complex<T>, T>
 {
-  //enum { Cost = 2*NumTraits<T>::MulCost  };
+  enum {
+    // Cost = 2*NumTraits<T>::MulCost,
+    Defined = 1
+  };
   typedef std::complex<T> ReturnType;
 };
 
diff --git a/ThirdParty/eigen3/Eigen/src/Core/util/StaticAssert.h b/ThirdParty/eigen3/Eigen/src/Core/util/StaticAssert.h
index b46a75b3783..8872c5b648e 100644
--- a/ThirdParty/eigen3/Eigen/src/Core/util/StaticAssert.h
+++ b/ThirdParty/eigen3/Eigen/src/Core/util/StaticAssert.h
@@ -89,7 +89,8 @@
         YOU_PASSED_A_ROW_VECTOR_BUT_A_COLUMN_VECTOR_WAS_EXPECTED,
         YOU_PASSED_A_COLUMN_VECTOR_BUT_A_ROW_VECTOR_WAS_EXPECTED,
         THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE,
-        THE_STORAGE_ORDER_OF_BOTH_SIDES_MUST_MATCH
+        THE_STORAGE_ORDER_OF_BOTH_SIDES_MUST_MATCH,
+        OBJECT_ALLOCATED_ON_STACK_IS_TOO_BIG
       };
     };
 
diff --git a/ThirdParty/eigen3/Eigen/src/Core/util/XprHelper.h b/ThirdParty/eigen3/Eigen/src/Core/util/XprHelper.h
index e6f8aaef8cb..3c4773054b4 100644
--- a/ThirdParty/eigen3/Eigen/src/Core/util/XprHelper.h
+++ b/ThirdParty/eigen3/Eigen/src/Core/util/XprHelper.h
@@ -65,12 +65,34 @@ template<typename T> class variable_if_dynamic<T, Dynamic>
     void setValue(T value) { m_value = value; }
 };
 
+/** \internal like variable_if_dynamic but for DynamicIndex
+  */
+template<typename T, int Value> class variable_if_dynamicindex
+{
+  public:
+    EIGEN_EMPTY_STRUCT_CTOR(variable_if_dynamicindex)
+    explicit variable_if_dynamicindex(T v) { EIGEN_ONLY_USED_FOR_DEBUG(v); assert(v == T(Value)); }
+    static T value() { return T(Value); }
+    void setValue(T) {}
+};
+
+template<typename T> class variable_if_dynamicindex<T, DynamicIndex>
+{
+    T m_value;
+    variable_if_dynamicindex() { assert(false); }
+  public:
+    explicit variable_if_dynamicindex(T value) : m_value(value) {}
+    T value() const { return m_value; }
+    void setValue(T value) { m_value = value; }
+};
+
 template<typename T> struct functor_traits
 {
   enum
   {
     Cost = 10,
-    PacketAccess = false
+    PacketAccess = false,
+    IsRepeatable = false
   };
 };
 
diff --git a/ThirdParty/eigen3/Eigen/src/Eigen2Support/Geometry/Hyperplane.h b/ThirdParty/eigen3/Eigen/src/Eigen2Support/Geometry/Hyperplane.h
index 19cc1bfd883..b95bf00ecff 100644
--- a/ThirdParty/eigen3/Eigen/src/Eigen2Support/Geometry/Hyperplane.h
+++ b/ThirdParty/eigen3/Eigen/src/Eigen2Support/Geometry/Hyperplane.h
@@ -1,5 +1,5 @@
 // This file is part of Eigen, a lightweight C++ template library
-// for linear algebra. Eigen itself is part of the KDE project.
+// for linear algebra.
 //
 // Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
 // Copyright (C) 2008 Benoit Jacob <jacob.benoit.1@gmail.com>
@@ -44,7 +44,7 @@ public:
   typedef Block<Coefficients,AmbientDimAtCompileTime,1> NormalReturnType;
 
   /** Default constructor without initialization */
-  inline explicit Hyperplane() {}
+  inline Hyperplane() {}
 
   /** Constructs a dynamic-size hyperplane with \a _dim the dimension
     * of the ambient space */
diff --git a/ThirdParty/eigen3/Eigen/src/Eigen2Support/Geometry/ParametrizedLine.h b/ThirdParty/eigen3/Eigen/src/Eigen2Support/Geometry/ParametrizedLine.h
index 6e4a168a8cd..9b57b7e0bb4 100644
--- a/ThirdParty/eigen3/Eigen/src/Eigen2Support/Geometry/ParametrizedLine.h
+++ b/ThirdParty/eigen3/Eigen/src/Eigen2Support/Geometry/ParametrizedLine.h
@@ -1,5 +1,5 @@
 // This file is part of Eigen, a lightweight C++ template library
-// for linear algebra. Eigen itself is part of the KDE project.
+// for linear algebra.
 //
 // Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
 // Copyright (C) 2008 Benoit Jacob <jacob.benoit.1@gmail.com>
@@ -36,7 +36,7 @@ public:
   typedef Matrix<Scalar,AmbientDimAtCompileTime,1> VectorType;
 
   /** Default constructor without initialization */
-  inline explicit ParametrizedLine() {}
+  inline ParametrizedLine() {}
 
   /** Constructs a dynamic-size line with \a _dim the dimension
     * of the ambient space */
diff --git a/ThirdParty/eigen3/Eigen/src/Eigen2Support/Geometry/Quaternion.h b/ThirdParty/eigen3/Eigen/src/Eigen2Support/Geometry/Quaternion.h
index ec87da054d6..4b6390cf1de 100644
--- a/ThirdParty/eigen3/Eigen/src/Eigen2Support/Geometry/Quaternion.h
+++ b/ThirdParty/eigen3/Eigen/src/Eigen2Support/Geometry/Quaternion.h
@@ -1,5 +1,5 @@
 // This file is part of Eigen, a lightweight C++ template library
-// for linear algebra. Eigen itself is part of the KDE project.
+// for linear algebra.
 //
 // Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
 //
diff --git a/ThirdParty/eigen3/Eigen/src/Eigen2Support/Geometry/Rotation2D.h b/ThirdParty/eigen3/Eigen/src/Eigen2Support/Geometry/Rotation2D.h
index 3e02b7a4fd1..19b8582a1b1 100644
--- a/ThirdParty/eigen3/Eigen/src/Eigen2Support/Geometry/Rotation2D.h
+++ b/ThirdParty/eigen3/Eigen/src/Eigen2Support/Geometry/Rotation2D.h
@@ -1,5 +1,5 @@
 // This file is part of Eigen, a lightweight C++ template library
-// for linear algebra. Eigen itself is part of the KDE project.
+// for linear algebra.
 //
 // Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
 //
diff --git a/ThirdParty/eigen3/Eigen/src/Eigen2Support/Geometry/RotationBase.h b/ThirdParty/eigen3/Eigen/src/Eigen2Support/Geometry/RotationBase.h
index 78ad73b60ad..b1c8f38da93 100644
--- a/ThirdParty/eigen3/Eigen/src/Eigen2Support/Geometry/RotationBase.h
+++ b/ThirdParty/eigen3/Eigen/src/Eigen2Support/Geometry/RotationBase.h
@@ -1,5 +1,5 @@
 // This file is part of Eigen, a lightweight C++ template library
-// for linear algebra. Eigen itself is part of the KDE project.
+// for linear algebra.
 //
 // Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
 //
diff --git a/ThirdParty/eigen3/Eigen/src/Eigen2Support/Geometry/Scaling.h b/ThirdParty/eigen3/Eigen/src/Eigen2Support/Geometry/Scaling.h
index a07c1c7c762..b8fa6cd3f64 100644
--- a/ThirdParty/eigen3/Eigen/src/Eigen2Support/Geometry/Scaling.h
+++ b/ThirdParty/eigen3/Eigen/src/Eigen2Support/Geometry/Scaling.h
@@ -1,5 +1,5 @@
 // This file is part of Eigen, a lightweight C++ template library
-// for linear algebra. Eigen itself is part of the KDE project.
+// for linear algebra.
 //
 // Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
 //
diff --git a/ThirdParty/eigen3/Eigen/src/Eigen2Support/Geometry/Transform.h b/ThirdParty/eigen3/Eigen/src/Eigen2Support/Geometry/Transform.h
index dceb8020383..fab60b251df 100644
--- a/ThirdParty/eigen3/Eigen/src/Eigen2Support/Geometry/Transform.h
+++ b/ThirdParty/eigen3/Eigen/src/Eigen2Support/Geometry/Transform.h
@@ -1,5 +1,5 @@
 // This file is part of Eigen, a lightweight C++ template library
-// for linear algebra. Eigen itself is part of the KDE project.
+// for linear algebra.
 //
 // Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
 // Copyright (C) 2009 Benoit Jacob <jacob.benoit.1@gmail.com>
diff --git a/ThirdParty/eigen3/Eigen/src/Eigen2Support/Geometry/Translation.h b/ThirdParty/eigen3/Eigen/src/Eigen2Support/Geometry/Translation.h
index 0fb9a9f9a5a..2b9859f6f4c 100644
--- a/ThirdParty/eigen3/Eigen/src/Eigen2Support/Geometry/Translation.h
+++ b/ThirdParty/eigen3/Eigen/src/Eigen2Support/Geometry/Translation.h
@@ -1,5 +1,5 @@
 // This file is part of Eigen, a lightweight C++ template library
-// for linear algebra. Eigen itself is part of the KDE project.
+// for linear algebra.
 //
 // Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
 //
diff --git a/ThirdParty/eigen3/Eigen/src/Eigen2Support/LeastSquares.h b/ThirdParty/eigen3/Eigen/src/Eigen2Support/LeastSquares.h
index 7aff428dc45..0e6fdb4889d 100644
--- a/ThirdParty/eigen3/Eigen/src/Eigen2Support/LeastSquares.h
+++ b/ThirdParty/eigen3/Eigen/src/Eigen2Support/LeastSquares.h
@@ -1,5 +1,5 @@
 // This file is part of Eigen, a lightweight C++ template library
-// for linear algebra. Eigen itself is part of the KDE project.
+// for linear algebra.
 //
 // Copyright (C) 2006-2009 Benoit Jacob <jacob.benoit.1@gmail.com>
 //
diff --git a/ThirdParty/eigen3/Eigen/src/Eigen2Support/MathFunctions.h b/ThirdParty/eigen3/Eigen/src/Eigen2Support/MathFunctions.h
index 3a8a9ca8146..3544af2538d 100644
--- a/ThirdParty/eigen3/Eigen/src/Eigen2Support/MathFunctions.h
+++ b/ThirdParty/eigen3/Eigen/src/Eigen2Support/MathFunctions.h
@@ -12,18 +12,18 @@
 
 namespace Eigen { 
 
-template<typename T> inline typename NumTraits<T>::Real ei_real(const T& x) { return internal::real(x); }
-template<typename T> inline typename NumTraits<T>::Real ei_imag(const T& x) { return internal::imag(x); }
-template<typename T> inline T ei_conj(const T& x) { return internal::conj(x); }
-template<typename T> inline typename NumTraits<T>::Real ei_abs (const T& x) { return internal::abs(x); }
-template<typename T> inline typename NumTraits<T>::Real ei_abs2(const T& x) { return internal::abs2(x); }
-template<typename T> inline T ei_sqrt(const T& x) { return internal::sqrt(x); }
-template<typename T> inline T ei_exp (const T& x) { return internal::exp(x); }
-template<typename T> inline T ei_log (const T& x) { return internal::log(x); }
-template<typename T> inline T ei_sin (const T& x) { return internal::sin(x); }
-template<typename T> inline T ei_cos (const T& x) { return internal::cos(x); }
-template<typename T> inline T ei_atan2(const T& x,const T& y) { return internal::atan2(x,y); }
-template<typename T> inline T ei_pow (const T& x,const T& y) { return internal::pow(x,y); }
+template<typename T> inline typename NumTraits<T>::Real ei_real(const T& x) { return numext::real(x); }
+template<typename T> inline typename NumTraits<T>::Real ei_imag(const T& x) { return numext::imag(x); }
+template<typename T> inline T ei_conj(const T& x) { return numext::conj(x); }
+template<typename T> inline typename NumTraits<T>::Real ei_abs (const T& x) { using std::abs; return abs(x); }
+template<typename T> inline typename NumTraits<T>::Real ei_abs2(const T& x) { return numext::abs2(x); }
+template<typename T> inline T ei_sqrt(const T& x) { using std::sqrt; return sqrt(x); }
+template<typename T> inline T ei_exp (const T& x) { using std::exp;  return exp(x); }
+template<typename T> inline T ei_log (const T& x) { using std::log;  return log(x); }
+template<typename T> inline T ei_sin (const T& x) { using std::sin;  return sin(x); }
+template<typename T> inline T ei_cos (const T& x) { using std::cos;  return cos(x); }
+template<typename T> inline T ei_atan2(const T& x,const T& y) { using std::atan2; return atan2(x,y); }
+template<typename T> inline T ei_pow (const T& x,const T& y) { return numext::pow(x,y); }
 template<typename T> inline T ei_random () { return internal::random<T>(); }
 template<typename T> inline T ei_random (const T& x, const T& y) { return internal::random(x, y); }
 
diff --git a/ThirdParty/eigen3/Eigen/src/Eigen2Support/SVD.h b/ThirdParty/eigen3/Eigen/src/Eigen2Support/SVD.h
index 3d2eeb44586..077d26d5432 100644
--- a/ThirdParty/eigen3/Eigen/src/Eigen2Support/SVD.h
+++ b/ThirdParty/eigen3/Eigen/src/Eigen2Support/SVD.h
@@ -1,5 +1,5 @@
 // This file is part of Eigen, a lightweight C++ template library
-// for linear algebra. Eigen itself is part of the KDE project.
+// for linear algebra.
 //
 // Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
 //
@@ -315,7 +315,7 @@ void SVD<MatrixType>::compute(const MatrixType& matrix)
         e[p-2] = 0.0;
         for (j = p-2; j >= k; --j)
         {
-          Scalar t(internal::hypot(m_sigma[j],f));
+          Scalar t(numext::hypot(m_sigma[j],f));
           Scalar cs(m_sigma[j]/t);
           Scalar sn(f/t);
           m_sigma[j] = t;
@@ -344,7 +344,7 @@ void SVD<MatrixType>::compute(const MatrixType& matrix)
         e[k-1] = 0.0;
         for (j = k; j < p; ++j)
         {
-          Scalar t(internal::hypot(m_sigma[j],f));
+          Scalar t(numext::hypot(m_sigma[j],f));
           Scalar cs( m_sigma[j]/t);
           Scalar sn(f/t);
           m_sigma[j] = t;
@@ -392,7 +392,7 @@ void SVD<MatrixType>::compute(const MatrixType& matrix)
 
         for (j = k; j < p-1; ++j)
         {
-          Scalar t = internal::hypot(f,g);
+          Scalar t = numext::hypot(f,g);
           Scalar cs = f/t;
           Scalar sn = g/t;
           if (j != k)
@@ -410,7 +410,7 @@ void SVD<MatrixType>::compute(const MatrixType& matrix)
               m_matV(i,j) = t;
             }
           }
-          t = internal::hypot(f,g);
+          t = numext::hypot(f,g);
           cs = f/t;
           sn = g/t;
           m_sigma[j] = t;
diff --git a/ThirdParty/eigen3/Eigen/src/Eigenvalues/EigenSolver.h b/ThirdParty/eigen3/Eigen/src/Eigenvalues/EigenSolver.h
index c16ff2b74e2..6e7150685a2 100644
--- a/ThirdParty/eigen3/Eigen/src/Eigenvalues/EigenSolver.h
+++ b/ThirdParty/eigen3/Eigen/src/Eigenvalues/EigenSolver.h
@@ -2,7 +2,7 @@
 // for linear algebra.
 //
 // Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
-// Copyright (C) 2010 Jitse Niesen <jitse@maths.leeds.ac.uk>
+// Copyright (C) 2010,2012 Jitse Niesen <jitse@maths.leeds.ac.uk>
 //
 // This Source Code Form is subject to the terms of the Mozilla
 // Public License v. 2.0. If a copy of the MPL was not distributed
@@ -281,6 +281,19 @@ template<typename _MatrixType> class EigenSolver
       return m_realSchur.info();
     }
 
+    /** \brief Sets the maximum number of iterations allowed. */
+    EigenSolver& setMaxIterations(Index maxIters)
+    {
+      m_realSchur.setMaxIterations(maxIters);
+      return *this;
+    }
+
+    /** \brief Returns the maximum number of iterations. */
+    Index getMaxIterations()
+    {
+      return m_realSchur.getMaxIterations();
+    }
+
   private:
     void doComputeEigenvectors();
 
@@ -304,12 +317,12 @@ MatrixType EigenSolver<MatrixType>::pseudoEigenvalueMatrix() const
   MatrixType matD = MatrixType::Zero(n,n);
   for (Index i=0; i<n; ++i)
   {
-    if (internal::isMuchSmallerThan(internal::imag(m_eivalues.coeff(i)), internal::real(m_eivalues.coeff(i))))
-      matD.coeffRef(i,i) = internal::real(m_eivalues.coeff(i));
+    if (internal::isMuchSmallerThan(numext::imag(m_eivalues.coeff(i)), numext::real(m_eivalues.coeff(i))))
+      matD.coeffRef(i,i) = numext::real(m_eivalues.coeff(i));
     else
     {
-      matD.template block<2,2>(i,i) <<  internal::real(m_eivalues.coeff(i)), internal::imag(m_eivalues.coeff(i)),
-                                       -internal::imag(m_eivalues.coeff(i)), internal::real(m_eivalues.coeff(i));
+      matD.template block<2,2>(i,i) <<  numext::real(m_eivalues.coeff(i)), numext::imag(m_eivalues.coeff(i)),
+                                       -numext::imag(m_eivalues.coeff(i)), numext::real(m_eivalues.coeff(i));
       ++i;
     }
   }
@@ -325,7 +338,7 @@ typename EigenSolver<MatrixType>::EigenvectorsType EigenSolver<MatrixType>::eige
   EigenvectorsType matV(n,n);
   for (Index j=0; j<n; ++j)
   {
-    if (internal::isMuchSmallerThan(internal::imag(m_eivalues.coeff(j)), internal::real(m_eivalues.coeff(j))) || j+1==n)
+    if (internal::isMuchSmallerThan(numext::imag(m_eivalues.coeff(j)), numext::real(m_eivalues.coeff(j))) || j+1==n)
     {
       // we have a real eigen value
       matV.col(j) = m_eivec.col(j).template cast<ComplexScalar>();
@@ -348,12 +361,16 @@ typename EigenSolver<MatrixType>::EigenvectorsType EigenSolver<MatrixType>::eige
 }
 
 template<typename MatrixType>
-EigenSolver<MatrixType>& EigenSolver<MatrixType>::compute(const MatrixType& matrix, bool computeEigenvectors)
+EigenSolver<MatrixType>& 
+EigenSolver<MatrixType>::compute(const MatrixType& matrix, bool computeEigenvectors)
 {
-  assert(matrix.cols() == matrix.rows());
+  using std::sqrt;
+  using std::abs;
+  eigen_assert(matrix.cols() == matrix.rows());
 
   // Reduce to real Schur form.
   m_realSchur.compute(matrix, computeEigenvectors);
+
   if (m_realSchur.info() == Success)
   {
     m_matT = m_realSchur.matrixT();
@@ -373,7 +390,7 @@ EigenSolver<MatrixType>& EigenSolver<MatrixType>::compute(const MatrixType& matr
       else
       {
         Scalar p = Scalar(0.5) * (m_matT.coeff(i, i) - m_matT.coeff(i+1, i+1));
-        Scalar z = internal::sqrt(internal::abs(p * p + m_matT.coeff(i+1, i) * m_matT.coeff(i, i+1)));
+        Scalar z = sqrt(abs(p * p + m_matT.coeff(i+1, i) * m_matT.coeff(i, i+1)));
         m_eivalues.coeffRef(i)   = ComplexScalar(m_matT.coeff(i+1, i+1) + p, z);
         m_eivalues.coeffRef(i+1) = ComplexScalar(m_matT.coeff(i+1, i+1) + p, -z);
         i += 2;
@@ -393,10 +410,11 @@ EigenSolver<MatrixType>& EigenSolver<MatrixType>::compute(const MatrixType& matr
 
 // Complex scalar division.
 template<typename Scalar>
-std::complex<Scalar> cdiv(Scalar xr, Scalar xi, Scalar yr, Scalar yi)
+std::complex<Scalar> cdiv(const Scalar& xr, const Scalar& xi, const Scalar& yr, const Scalar& yi)
 {
+  using std::abs;
   Scalar r,d;
-  if (internal::abs(yr) > internal::abs(yi))
+  if (abs(yr) > abs(yi))
   {
       r = yi/yr;
       d = yr + r*yi;
@@ -414,6 +432,7 @@ std::complex<Scalar> cdiv(Scalar xr, Scalar xi, Scalar yr, Scalar yi)
 template<typename MatrixType>
 void EigenSolver<MatrixType>::doComputeEigenvectors()
 {
+  using std::abs;
   const Index size = m_eivec.cols();
   const Scalar eps = NumTraits<Scalar>::epsilon();
 
@@ -469,14 +488,14 @@ void EigenSolver<MatrixType>::doComputeEigenvectors()
             Scalar denom = (m_eivalues.coeff(i).real() - p) * (m_eivalues.coeff(i).real() - p) + m_eivalues.coeff(i).imag() * m_eivalues.coeff(i).imag();
             Scalar t = (x * lastr - lastw * r) / denom;
             m_matT.coeffRef(i,n) = t;
-            if (internal::abs(x) > internal::abs(lastw))
+            if (abs(x) > abs(lastw))
               m_matT.coeffRef(i+1,n) = (-r - w * t) / x;
             else
               m_matT.coeffRef(i+1,n) = (-lastr - y * t) / lastw;
           }
 
           // Overflow control
-          Scalar t = internal::abs(m_matT.coeff(i,n));
+          Scalar t = abs(m_matT.coeff(i,n));
           if ((eps * t) * t > Scalar(1))
             m_matT.col(n).tail(size-i) /= t;
         }
@@ -488,7 +507,7 @@ void EigenSolver<MatrixType>::doComputeEigenvectors()
       Index l = n-1;
 
       // Last vector component imaginary so matrix is triangular
-      if (internal::abs(m_matT.coeff(n,n-1)) > internal::abs(m_matT.coeff(n-1,n)))
+      if (abs(m_matT.coeff(n,n-1)) > abs(m_matT.coeff(n-1,n)))
       {
         m_matT.coeffRef(n-1,n-1) = q / m_matT.coeff(n,n-1);
         m_matT.coeffRef(n-1,n) = -(m_matT.coeff(n,n) - p) / m_matT.coeff(n,n-1);
@@ -496,8 +515,8 @@ void EigenSolver<MatrixType>::doComputeEigenvectors()
       else
       {
         std::complex<Scalar> cc = cdiv<Scalar>(0.0,-m_matT.coeff(n-1,n),m_matT.coeff(n-1,n-1)-p,q);
-        m_matT.coeffRef(n-1,n-1) = internal::real(cc);
-        m_matT.coeffRef(n-1,n) = internal::imag(cc);
+        m_matT.coeffRef(n-1,n-1) = numext::real(cc);
+        m_matT.coeffRef(n-1,n) = numext::imag(cc);
       }
       m_matT.coeffRef(n,n-1) = 0.0;
       m_matT.coeffRef(n,n) = 1.0;
@@ -519,8 +538,8 @@ void EigenSolver<MatrixType>::doComputeEigenvectors()
           if (m_eivalues.coeff(i).imag() == RealScalar(0))
           {
             std::complex<Scalar> cc = cdiv(-ra,-sa,w,q);
-            m_matT.coeffRef(i,n-1) = internal::real(cc);
-            m_matT.coeffRef(i,n) = internal::imag(cc);
+            m_matT.coeffRef(i,n-1) = numext::real(cc);
+            m_matT.coeffRef(i,n) = numext::imag(cc);
           }
           else
           {
@@ -530,12 +549,12 @@ void EigenSolver<MatrixType>::doComputeEigenvectors()
             Scalar vr = (m_eivalues.coeff(i).real() - p) * (m_eivalues.coeff(i).real() - p) + m_eivalues.coeff(i).imag() * m_eivalues.coeff(i).imag() - q * q;
             Scalar vi = (m_eivalues.coeff(i).real() - p) * Scalar(2) * q;
             if ((vr == 0.0) && (vi == 0.0))
-              vr = eps * norm * (internal::abs(w) + internal::abs(q) + internal::abs(x) + internal::abs(y) + internal::abs(lastw));
+              vr = eps * norm * (abs(w) + abs(q) + abs(x) + abs(y) + abs(lastw));
 
-	    std::complex<Scalar> cc = cdiv(x*lastra-lastw*ra+q*sa,x*lastsa-lastw*sa-q*ra,vr,vi);
-            m_matT.coeffRef(i,n-1) = internal::real(cc);
-            m_matT.coeffRef(i,n) = internal::imag(cc);
-            if (internal::abs(x) > (internal::abs(lastw) + internal::abs(q)))
+            std::complex<Scalar> cc = cdiv(x*lastra-lastw*ra+q*sa,x*lastsa-lastw*sa-q*ra,vr,vi);
+            m_matT.coeffRef(i,n-1) = numext::real(cc);
+            m_matT.coeffRef(i,n) = numext::imag(cc);
+            if (abs(x) > (abs(lastw) + abs(q)))
             {
               m_matT.coeffRef(i+1,n-1) = (-ra - w * m_matT.coeff(i,n-1) + q * m_matT.coeff(i,n)) / x;
               m_matT.coeffRef(i+1,n) = (-sa - w * m_matT.coeff(i,n) - q * m_matT.coeff(i,n-1)) / x;
@@ -543,14 +562,14 @@ void EigenSolver<MatrixType>::doComputeEigenvectors()
             else
             {
               cc = cdiv(-lastra-y*m_matT.coeff(i,n-1),-lastsa-y*m_matT.coeff(i,n),lastw,q);
-              m_matT.coeffRef(i+1,n-1) = internal::real(cc);
-              m_matT.coeffRef(i+1,n) = internal::imag(cc);
+              m_matT.coeffRef(i+1,n-1) = numext::real(cc);
+              m_matT.coeffRef(i+1,n) = numext::imag(cc);
             }
           }
 
           // Overflow control
           using std::max;
-          Scalar t = (max)(internal::abs(m_matT.coeff(i,n-1)),internal::abs(m_matT.coeff(i,n)));
+          Scalar t = (max)(abs(m_matT.coeff(i,n-1)),abs(m_matT.coeff(i,n)));
           if ((eps * t) * t > Scalar(1))
             m_matT.block(i, n-1, size-i, 2) /= t;
 
diff --git a/ThirdParty/eigen3/Eigen/src/Eigenvalues/GeneralizedEigenSolver.h b/ThirdParty/eigen3/Eigen/src/Eigenvalues/GeneralizedEigenSolver.h
new file mode 100644
index 00000000000..dc240e13e13
--- /dev/null
+++ b/ThirdParty/eigen3/Eigen/src/Eigenvalues/GeneralizedEigenSolver.h
@@ -0,0 +1,341 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2012 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2010,2012 Jitse Niesen <jitse@maths.leeds.ac.uk>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_GENERALIZEDEIGENSOLVER_H
+#define EIGEN_GENERALIZEDEIGENSOLVER_H
+
+#include "./RealQZ.h"
+
+namespace Eigen { 
+
+/** \eigenvalues_module \ingroup Eigenvalues_Module
+  *
+  *
+  * \class GeneralizedEigenSolver
+  *
+  * \brief Computes the generalized eigenvalues and eigenvectors of a pair of general matrices
+  *
+  * \tparam _MatrixType the type of the matrices of which we are computing the
+  * eigen-decomposition; this is expected to be an instantiation of the Matrix
+  * class template. Currently, only real matrices are supported.
+  *
+  * The generalized eigenvalues and eigenvectors of a matrix pair \f$ A \f$ and \f$ B \f$ are scalars
+  * \f$ \lambda \f$ and vectors \f$ v \f$ such that \f$ Av = \lambda Bv \f$.  If
+  * \f$ D \f$ is a diagonal matrix with the eigenvalues on the diagonal, and
+  * \f$ V \f$ is a matrix with the eigenvectors as its columns, then \f$ A V =
+  * B V D \f$. The matrix \f$ V \f$ is almost always invertible, in which case we
+  * have \f$ A = B V D V^{-1} \f$. This is called the generalized eigen-decomposition.
+  *
+  * The generalized eigenvalues and eigenvectors of a matrix pair may be complex, even when the
+  * matrices are real. Moreover, the generalized eigenvalue might be infinite if the matrix B is
+  * singular. To workaround this difficulty, the eigenvalues are provided as a pair of complex \f$ \alpha \f$
+  * and real \f$ \beta \f$ such that: \f$ \lambda_i = \alpha_i / \beta_i \f$. If \f$ \beta_i \f$ is (nearly) zero,
+  * then one can consider the well defined left eigenvalue \f$ \mu = \beta_i / \alpha_i\f$ such that:
+  * \f$ \mu_i A v_i = B v_i \f$, or even \f$ \mu_i u_i^T A  = u_i^T B \f$ where \f$ u_i \f$ is
+  * called the left eigenvector.
+  *
+  * Call the function compute() to compute the generalized eigenvalues and eigenvectors of
+  * a given matrix pair. Alternatively, you can use the
+  * GeneralizedEigenSolver(const MatrixType&, const MatrixType&, bool) constructor which computes the
+  * eigenvalues and eigenvectors at construction time. Once the eigenvalue and
+  * eigenvectors are computed, they can be retrieved with the eigenvalues() and
+  * eigenvectors() functions.
+  *
+  * Here is an usage example of this class:
+  * Example: \include GeneralizedEigenSolver.cpp
+  * Output: \verbinclude GeneralizedEigenSolver.out
+  *
+  * \sa MatrixBase::eigenvalues(), class ComplexEigenSolver, class SelfAdjointEigenSolver
+  */
+template<typename _MatrixType> class GeneralizedEigenSolver
+{
+  public:
+
+    /** \brief Synonym for the template parameter \p _MatrixType. */
+    typedef _MatrixType MatrixType;
+
+    enum {
+      RowsAtCompileTime = MatrixType::RowsAtCompileTime,
+      ColsAtCompileTime = MatrixType::ColsAtCompileTime,
+      Options = MatrixType::Options,
+      MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
+      MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime
+    };
+
+    /** \brief Scalar type for matrices of type #MatrixType. */
+    typedef typename MatrixType::Scalar Scalar;
+    typedef typename NumTraits<Scalar>::Real RealScalar;
+    typedef typename MatrixType::Index Index;
+
+    /** \brief Complex scalar type for #MatrixType. 
+      *
+      * This is \c std::complex<Scalar> if #Scalar is real (e.g.,
+      * \c float or \c double) and just \c Scalar if #Scalar is
+      * complex.
+      */
+    typedef std::complex<RealScalar> ComplexScalar;
+
+    /** \brief Type for vector of real scalar values eigenvalues as returned by betas().
+      *
+      * This is a column vector with entries of type #Scalar.
+      * The length of the vector is the size of #MatrixType.
+      */
+    typedef Matrix<Scalar, ColsAtCompileTime, 1, Options & ~RowMajor, MaxColsAtCompileTime, 1> VectorType;
+
+    /** \brief Type for vector of complex scalar values eigenvalues as returned by betas().
+      *
+      * This is a column vector with entries of type #ComplexScalar.
+      * The length of the vector is the size of #MatrixType.
+      */
+    typedef Matrix<ComplexScalar, ColsAtCompileTime, 1, Options & ~RowMajor, MaxColsAtCompileTime, 1> ComplexVectorType;
+
+    /** \brief Expression type for the eigenvalues as returned by eigenvalues().
+      */
+    typedef CwiseBinaryOp<internal::scalar_quotient_op<ComplexScalar,Scalar>,ComplexVectorType,VectorType> EigenvalueType;
+
+    /** \brief Type for matrix of eigenvectors as returned by eigenvectors(). 
+      *
+      * This is a square matrix with entries of type #ComplexScalar. 
+      * The size is the same as the size of #MatrixType.
+      */
+    typedef Matrix<ComplexScalar, RowsAtCompileTime, ColsAtCompileTime, Options, MaxRowsAtCompileTime, MaxColsAtCompileTime> EigenvectorsType;
+
+    /** \brief Default constructor.
+      *
+      * The default constructor is useful in cases in which the user intends to
+      * perform decompositions via EigenSolver::compute(const MatrixType&, bool).
+      *
+      * \sa compute() for an example.
+      */
+    GeneralizedEigenSolver() : m_eivec(), m_alphas(), m_betas(), m_isInitialized(false), m_realQZ(), m_matS(), m_tmp() {}
+
+    /** \brief Default constructor with memory preallocation
+      *
+      * Like the default constructor but with preallocation of the internal data
+      * according to the specified problem \a size.
+      * \sa GeneralizedEigenSolver()
+      */
+    GeneralizedEigenSolver(Index size)
+      : m_eivec(size, size),
+        m_alphas(size),
+        m_betas(size),
+        m_isInitialized(false),
+        m_eigenvectorsOk(false),
+        m_realQZ(size),
+        m_matS(size, size),
+        m_tmp(size)
+    {}
+
+    /** \brief Constructor; computes the generalized eigendecomposition of given matrix pair.
+      * 
+      * \param[in]  A  Square matrix whose eigendecomposition is to be computed.
+      * \param[in]  B  Square matrix whose eigendecomposition is to be computed.
+      * \param[in]  computeEigenvectors  If true, both the eigenvectors and the
+      *    eigenvalues are computed; if false, only the eigenvalues are computed.
+      *
+      * This constructor calls compute() to compute the generalized eigenvalues
+      * and eigenvectors.
+      *
+      * \sa compute()
+      */
+    GeneralizedEigenSolver(const MatrixType& A, const MatrixType& B, bool computeEigenvectors = true)
+      : m_eivec(A.rows(), A.cols()),
+        m_alphas(A.cols()),
+        m_betas(A.cols()),
+        m_isInitialized(false),
+        m_eigenvectorsOk(false),
+        m_realQZ(A.cols()),
+        m_matS(A.rows(), A.cols()),
+        m_tmp(A.cols())
+    {
+      compute(A, B, computeEigenvectors);
+    }
+
+    /* \brief Returns the computed generalized eigenvectors.
+      *
+      * \returns  %Matrix whose columns are the (possibly complex) eigenvectors.
+      *
+      * \pre Either the constructor 
+      * GeneralizedEigenSolver(const MatrixType&,const MatrixType&, bool) or the member function
+      * compute(const MatrixType&, const MatrixType& bool) has been called before, and
+      * \p computeEigenvectors was set to true (the default).
+      *
+      * Column \f$ k \f$ of the returned matrix is an eigenvector corresponding
+      * to eigenvalue number \f$ k \f$ as returned by eigenvalues().  The
+      * eigenvectors are normalized to have (Euclidean) norm equal to one. The
+      * matrix returned by this function is the matrix \f$ V \f$ in the
+      * generalized eigendecomposition \f$ A = B V D V^{-1} \f$, if it exists.
+      *
+      * \sa eigenvalues()
+      */
+//    EigenvectorsType eigenvectors() const;
+
+    /** \brief Returns an expression of the computed generalized eigenvalues.
+      *
+      * \returns An expression of the column vector containing the eigenvalues.
+      *
+      * It is a shortcut for \code this->alphas().cwiseQuotient(this->betas()); \endcode
+      * Not that betas might contain zeros. It is therefore not recommended to use this function,
+      * but rather directly deal with the alphas and betas vectors.
+      *
+      * \pre Either the constructor 
+      * GeneralizedEigenSolver(const MatrixType&,const MatrixType&,bool) or the member function
+      * compute(const MatrixType&,const MatrixType&,bool) has been called before.
+      *
+      * The eigenvalues are repeated according to their algebraic multiplicity,
+      * so there are as many eigenvalues as rows in the matrix. The eigenvalues 
+      * are not sorted in any particular order.
+      *
+      * \sa alphas(), betas(), eigenvectors()
+      */
+    EigenvalueType eigenvalues() const
+    {
+      eigen_assert(m_isInitialized && "GeneralizedEigenSolver is not initialized.");
+      return EigenvalueType(m_alphas,m_betas);
+    }
+
+    /** \returns A const reference to the vectors containing the alpha values
+      *
+      * This vector permits to reconstruct the j-th eigenvalues as alphas(i)/betas(j).
+      *
+      * \sa betas(), eigenvalues() */
+    ComplexVectorType alphas() const
+    {
+      eigen_assert(m_isInitialized && "GeneralizedEigenSolver is not initialized.");
+      return m_alphas;
+    }
+
+    /** \returns A const reference to the vectors containing the beta values
+      *
+      * This vector permits to reconstruct the j-th eigenvalues as alphas(i)/betas(j).
+      *
+      * \sa alphas(), eigenvalues() */
+    VectorType betas() const
+    {
+      eigen_assert(m_isInitialized && "GeneralizedEigenSolver is not initialized.");
+      return m_betas;
+    }
+
+    /** \brief Computes generalized eigendecomposition of given matrix.
+      * 
+      * \param[in]  A  Square matrix whose eigendecomposition is to be computed.
+      * \param[in]  B  Square matrix whose eigendecomposition is to be computed.
+      * \param[in]  computeEigenvectors  If true, both the eigenvectors and the
+      *    eigenvalues are computed; if false, only the eigenvalues are
+      *    computed. 
+      * \returns    Reference to \c *this
+      *
+      * This function computes the eigenvalues of the real matrix \p matrix.
+      * The eigenvalues() function can be used to retrieve them.  If 
+      * \p computeEigenvectors is true, then the eigenvectors are also computed
+      * and can be retrieved by calling eigenvectors().
+      *
+      * The matrix is first reduced to real generalized Schur form using the RealQZ
+      * class. The generalized Schur decomposition is then used to compute the eigenvalues
+      * and eigenvectors.
+      *
+      * The cost of the computation is dominated by the cost of the
+      * generalized Schur decomposition.
+      *
+      * This method reuses of the allocated data in the GeneralizedEigenSolver object.
+      */
+    GeneralizedEigenSolver& compute(const MatrixType& A, const MatrixType& B, bool computeEigenvectors = true);
+
+    ComputationInfo info() const
+    {
+      eigen_assert(m_isInitialized && "EigenSolver is not initialized.");
+      return m_realQZ.info();
+    }
+
+    /** Sets the maximal number of iterations allowed.
+    */
+    GeneralizedEigenSolver& setMaxIterations(Index maxIters)
+    {
+      m_realQZ.setMaxIterations(maxIters);
+      return *this;
+    }
+
+  protected:
+    MatrixType m_eivec;
+    ComplexVectorType m_alphas;
+    VectorType m_betas;
+    bool m_isInitialized;
+    bool m_eigenvectorsOk;
+    RealQZ<MatrixType> m_realQZ;
+    MatrixType m_matS;
+
+    typedef Matrix<Scalar, ColsAtCompileTime, 1, Options & ~RowMajor, MaxColsAtCompileTime, 1> ColumnVectorType;
+    ColumnVectorType m_tmp;
+};
+
+//template<typename MatrixType>
+//typename GeneralizedEigenSolver<MatrixType>::EigenvectorsType GeneralizedEigenSolver<MatrixType>::eigenvectors() const
+//{
+//  eigen_assert(m_isInitialized && "EigenSolver is not initialized.");
+//  eigen_assert(m_eigenvectorsOk && "The eigenvectors have not been computed together with the eigenvalues.");
+//  Index n = m_eivec.cols();
+//  EigenvectorsType matV(n,n);
+//  // TODO
+//  return matV;
+//}
+
+template<typename MatrixType>
+GeneralizedEigenSolver<MatrixType>&
+GeneralizedEigenSolver<MatrixType>::compute(const MatrixType& A, const MatrixType& B, bool computeEigenvectors)
+{
+  using std::sqrt;
+  using std::abs;
+  eigen_assert(A.cols() == A.rows() && B.cols() == A.rows() && B.cols() == B.rows());
+
+  // Reduce to generalized real Schur form:
+  // A = Q S Z and B = Q T Z
+  m_realQZ.compute(A, B, computeEigenvectors);
+
+  if (m_realQZ.info() == Success)
+  {
+    m_matS = m_realQZ.matrixS();
+    if (computeEigenvectors)
+      m_eivec = m_realQZ.matrixZ().transpose();
+  
+    // Compute eigenvalues from matS
+    m_alphas.resize(A.cols());
+    m_betas.resize(A.cols());
+    Index i = 0;
+    while (i < A.cols())
+    {
+      if (i == A.cols() - 1 || m_matS.coeff(i+1, i) == Scalar(0))
+      {
+        m_alphas.coeffRef(i) = m_matS.coeff(i, i);
+        m_betas.coeffRef(i)  = m_realQZ.matrixT().coeff(i,i);
+        ++i;
+      }
+      else
+      {
+        Scalar p = Scalar(0.5) * (m_matS.coeff(i, i) - m_matS.coeff(i+1, i+1));
+        Scalar z = sqrt(abs(p * p + m_matS.coeff(i+1, i) * m_matS.coeff(i, i+1)));
+        m_alphas.coeffRef(i)   = ComplexScalar(m_matS.coeff(i+1, i+1) + p, z);
+        m_alphas.coeffRef(i+1) = ComplexScalar(m_matS.coeff(i+1, i+1) + p, -z);
+
+        m_betas.coeffRef(i)   = m_realQZ.matrixT().coeff(i,i);
+        m_betas.coeffRef(i+1) = m_realQZ.matrixT().coeff(i,i);
+        i += 2;
+      }
+    }
+  }
+
+  m_isInitialized = true;
+  m_eigenvectorsOk = false;//computeEigenvectors;
+
+  return *this;
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_GENERALIZEDEIGENSOLVER_H
diff --git a/ThirdParty/eigen3/Eigen/src/Eigenvalues/HessenbergDecomposition.h b/ThirdParty/eigen3/Eigen/src/Eigenvalues/HessenbergDecomposition.h
index b8378b08a09..3db0c0106c9 100644
--- a/ThirdParty/eigen3/Eigen/src/Eigenvalues/HessenbergDecomposition.h
+++ b/ThirdParty/eigen3/Eigen/src/Eigenvalues/HessenbergDecomposition.h
@@ -82,7 +82,7 @@ template<typename _MatrixType> class HessenbergDecomposition
     typedef Matrix<Scalar, SizeMinusOne, 1, Options & ~RowMajor, MaxSizeMinusOne, 1> CoeffVectorType;
 
     /** \brief Return type of matrixQ() */
-    typedef typename HouseholderSequence<MatrixType,CoeffVectorType>::ConjugateReturnType HouseholderSequenceType;
+    typedef HouseholderSequence<MatrixType,typename internal::remove_all<typename CoeffVectorType::ConjugateReturnType>::type> HouseholderSequenceType;
     
     typedef internal::HessenbergDecompositionMatrixHReturnType<MatrixType> MatrixHReturnType;
 
@@ -291,7 +291,7 @@ template<typename _MatrixType> class HessenbergDecomposition
 template<typename MatrixType>
 void HessenbergDecomposition<MatrixType>::_compute(MatrixType& matA, CoeffVectorType& hCoeffs, VectorType& temp)
 {
-  assert(matA.rows()==matA.cols());
+  eigen_assert(matA.rows()==matA.cols());
   Index n = matA.rows();
   temp.resize(n);
   for (Index i = 0; i<n-1; ++i)
@@ -313,7 +313,7 @@ void HessenbergDecomposition<MatrixType>::_compute(MatrixType& matA, CoeffVector
 
     // A = A H'
     matA.rightCols(remainingSize)
-        .applyHouseholderOnTheRight(matA.col(i).tail(remainingSize-1).conjugate(), internal::conj(h), &temp.coeffRef(0));
+        .applyHouseholderOnTheRight(matA.col(i).tail(remainingSize-1).conjugate(), numext::conj(h), &temp.coeffRef(0));
   }
 }
 
diff --git a/ThirdParty/eigen3/Eigen/src/Eigenvalues/MatrixBaseEigenvalues.h b/ThirdParty/eigen3/Eigen/src/Eigenvalues/MatrixBaseEigenvalues.h
index 6af481c75f6..4fec8af0a3e 100644
--- a/ThirdParty/eigen3/Eigen/src/Eigenvalues/MatrixBaseEigenvalues.h
+++ b/ThirdParty/eigen3/Eigen/src/Eigenvalues/MatrixBaseEigenvalues.h
@@ -121,10 +121,11 @@ template<typename Derived>
 inline typename MatrixBase<Derived>::RealScalar
 MatrixBase<Derived>::operatorNorm() const
 {
+  using std::sqrt;
   typename Derived::PlainObject m_eval(derived());
   // FIXME if it is really guaranteed that the eigenvalues are already sorted,
   // then we don't need to compute a maxCoeff() here, comparing the 1st and last ones is enough.
-  return internal::sqrt((m_eval*m_eval.adjoint())
+  return sqrt((m_eval*m_eval.adjoint())
                  .eval()
 		 .template selfadjointView<Lower>()
 		 .eigenvalues()
diff --git a/ThirdParty/eigen3/Eigen/src/Eigenvalues/RealQZ.h b/ThirdParty/eigen3/Eigen/src/Eigenvalues/RealQZ.h
new file mode 100644
index 00000000000..5706eeebe91
--- /dev/null
+++ b/ThirdParty/eigen3/Eigen/src/Eigenvalues/RealQZ.h
@@ -0,0 +1,624 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2012 Alexey Korepanov <kaikaikai@yandex.ru>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_REAL_QZ_H
+#define EIGEN_REAL_QZ_H
+
+namespace Eigen {
+
+  /** \eigenvalues_module \ingroup Eigenvalues_Module
+   *
+   *
+   * \class RealQZ
+   *
+   * \brief Performs a real QZ decomposition of a pair of square matrices
+   *
+   * \tparam _MatrixType the type of the matrix of which we are computing the
+   * real QZ decomposition; this is expected to be an instantiation of the
+   * Matrix class template.
+   *
+   * Given a real square matrices A and B, this class computes the real QZ
+   * decomposition: \f$ A = Q S Z \f$, \f$ B = Q T Z \f$ where Q and Z are
+   * real orthogonal matrixes, T is upper-triangular matrix, and S is upper
+   * quasi-triangular matrix. An orthogonal matrix is a matrix whose
+   * inverse is equal to its transpose, \f$ U^{-1} = U^T \f$. A quasi-triangular
+   * matrix is a block-triangular matrix whose diagonal consists of 1-by-1
+   * blocks and 2-by-2 blocks where further reduction is impossible due to
+   * complex eigenvalues. 
+   *
+   * The eigenvalues of the pencil \f$ A - z B \f$ can be obtained from
+   * 1x1 and 2x2 blocks on the diagonals of S and T.
+   *
+   * Call the function compute() to compute the real QZ decomposition of a
+   * given pair of matrices. Alternatively, you can use the 
+   * RealQZ(const MatrixType& B, const MatrixType& B, bool computeQZ)
+   * constructor which computes the real QZ decomposition at construction
+   * time. Once the decomposition is computed, you can use the matrixS(),
+   * matrixT(), matrixQ() and matrixZ() functions to retrieve the matrices
+   * S, T, Q and Z in the decomposition. If computeQZ==false, some time
+   * is saved by not computing matrices Q and Z.
+   *
+   * Example: \include RealQZ_compute.cpp
+   * Output: \include RealQZ_compute.out
+   *
+   * \note The implementation is based on the algorithm in "Matrix Computations"
+   * by Gene H. Golub and Charles F. Van Loan, and a paper "An algorithm for
+   * generalized eigenvalue problems" by C.B.Moler and G.W.Stewart.
+   *
+   * \sa class RealSchur, class ComplexSchur, class EigenSolver, class ComplexEigenSolver
+   */
+
+  template<typename _MatrixType> class RealQZ
+  {
+    public:
+      typedef _MatrixType MatrixType;
+      enum {
+        RowsAtCompileTime = MatrixType::RowsAtCompileTime,
+        ColsAtCompileTime = MatrixType::ColsAtCompileTime,
+        Options = MatrixType::Options,
+        MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
+        MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime
+      };
+      typedef typename MatrixType::Scalar Scalar;
+      typedef std::complex<typename NumTraits<Scalar>::Real> ComplexScalar;
+      typedef typename MatrixType::Index Index;
+
+      typedef Matrix<ComplexScalar, ColsAtCompileTime, 1, Options & ~RowMajor, MaxColsAtCompileTime, 1> EigenvalueType;
+      typedef Matrix<Scalar, ColsAtCompileTime, 1, Options & ~RowMajor, MaxColsAtCompileTime, 1> ColumnVectorType;
+
+      /** \brief Default constructor.
+       *
+       * \param [in] size  Positive integer, size of the matrix whose QZ decomposition will be computed.
+       *
+       * The default constructor is useful in cases in which the user intends to
+       * perform decompositions via compute().  The \p size parameter is only
+       * used as a hint. It is not an error to give a wrong \p size, but it may
+       * impair performance.
+       *
+       * \sa compute() for an example.
+       */
+      RealQZ(Index size = RowsAtCompileTime==Dynamic ? 1 : RowsAtCompileTime) : 
+        m_S(size, size),
+        m_T(size, size),
+        m_Q(size, size),
+        m_Z(size, size),
+        m_workspace(size*2),
+        m_maxIters(400),
+        m_isInitialized(false)
+        { }
+
+      /** \brief Constructor; computes real QZ decomposition of given matrices
+       * 
+       * \param[in]  A          Matrix A.
+       * \param[in]  B          Matrix B.
+       * \param[in]  computeQZ  If false, A and Z are not computed.
+       *
+       * This constructor calls compute() to compute the QZ decomposition.
+       */
+      RealQZ(const MatrixType& A, const MatrixType& B, bool computeQZ = true) :
+        m_S(A.rows(),A.cols()),
+        m_T(A.rows(),A.cols()),
+        m_Q(A.rows(),A.cols()),
+        m_Z(A.rows(),A.cols()),
+        m_workspace(A.rows()*2),
+        m_maxIters(400),
+        m_isInitialized(false) {
+          compute(A, B, computeQZ);
+        }
+
+      /** \brief Returns matrix Q in the QZ decomposition. 
+       *
+       * \returns A const reference to the matrix Q.
+       */
+      const MatrixType& matrixQ() const {
+        eigen_assert(m_isInitialized && "RealQZ is not initialized.");
+        eigen_assert(m_computeQZ && "The matrices Q and Z have not been computed during the QZ decomposition.");
+        return m_Q;
+      }
+
+      /** \brief Returns matrix Z in the QZ decomposition. 
+       *
+       * \returns A const reference to the matrix Z.
+       */
+      const MatrixType& matrixZ() const {
+        eigen_assert(m_isInitialized && "RealQZ is not initialized.");
+        eigen_assert(m_computeQZ && "The matrices Q and Z have not been computed during the QZ decomposition.");
+        return m_Z;
+      }
+
+      /** \brief Returns matrix S in the QZ decomposition. 
+       *
+       * \returns A const reference to the matrix S.
+       */
+      const MatrixType& matrixS() const {
+        eigen_assert(m_isInitialized && "RealQZ is not initialized.");
+        return m_S;
+      }
+
+      /** \brief Returns matrix S in the QZ decomposition. 
+       *
+       * \returns A const reference to the matrix S.
+       */
+      const MatrixType& matrixT() const {
+        eigen_assert(m_isInitialized && "RealQZ is not initialized.");
+        return m_T;
+      }
+
+      /** \brief Computes QZ decomposition of given matrix. 
+       * 
+       * \param[in]  A          Matrix A.
+       * \param[in]  B          Matrix B.
+       * \param[in]  computeQZ  If false, A and Z are not computed.
+       * \returns    Reference to \c *this
+       */
+      RealQZ& compute(const MatrixType& A, const MatrixType& B, bool computeQZ = true);
+
+      /** \brief Reports whether previous computation was successful.
+       *
+       * \returns \c Success if computation was succesful, \c NoConvergence otherwise.
+       */
+      ComputationInfo info() const
+      {
+        eigen_assert(m_isInitialized && "RealQZ is not initialized.");
+        return m_info;
+      }
+
+      /** \brief Returns number of performed QR-like iterations.
+      */
+      Index iterations() const
+      {
+        eigen_assert(m_isInitialized && "RealQZ is not initialized.");
+        return m_global_iter;
+      }
+
+      /** Sets the maximal number of iterations allowed to converge to one eigenvalue
+       * or decouple the problem.
+      */
+      RealQZ& setMaxIterations(Index maxIters)
+      {
+        m_maxIters = maxIters;
+        return *this;
+      }
+
+    private:
+
+      MatrixType m_S, m_T, m_Q, m_Z;
+      Matrix<Scalar,Dynamic,1> m_workspace;
+      ComputationInfo m_info;
+      Index m_maxIters;
+      bool m_isInitialized;
+      bool m_computeQZ;
+      Scalar m_normOfT, m_normOfS;
+      Index m_global_iter;
+
+      typedef Matrix<Scalar,3,1> Vector3s;
+      typedef Matrix<Scalar,2,1> Vector2s;
+      typedef Matrix<Scalar,2,2> Matrix2s;
+      typedef JacobiRotation<Scalar> JRs;
+
+      void hessenbergTriangular();
+      void computeNorms();
+      Index findSmallSubdiagEntry(Index iu);
+      Index findSmallDiagEntry(Index f, Index l);
+      void splitOffTwoRows(Index i);
+      void pushDownZero(Index z, Index f, Index l);
+      void step(Index f, Index l, Index iter);
+
+  }; // RealQZ
+
+  /** \internal Reduces S and T to upper Hessenberg - triangular form */
+  template<typename MatrixType>
+    void RealQZ<MatrixType>::hessenbergTriangular()
+    {
+
+      const Index dim = m_S.cols();
+
+      // perform QR decomposition of T, overwrite T with R, save Q
+      HouseholderQR<MatrixType> qrT(m_T);
+      m_T = qrT.matrixQR();
+      m_T.template triangularView<StrictlyLower>().setZero();
+      m_Q = qrT.householderQ();
+      // overwrite S with Q* S
+      m_S.applyOnTheLeft(m_Q.adjoint());
+      // init Z as Identity
+      if (m_computeQZ)
+        m_Z = MatrixType::Identity(dim,dim);
+      // reduce S to upper Hessenberg with Givens rotations
+      for (Index j=0; j<=dim-3; j++) {
+        for (Index i=dim-1; i>=j+2; i--) {
+          JRs G;
+          // kill S(i,j)
+          if(m_S.coeff(i,j) != 0)
+          {
+            G.makeGivens(m_S.coeff(i-1,j), m_S.coeff(i,j), &m_S.coeffRef(i-1, j));
+            m_S.coeffRef(i,j) = Scalar(0.0);
+            m_S.rightCols(dim-j-1).applyOnTheLeft(i-1,i,G.adjoint());
+            m_T.rightCols(dim-i+1).applyOnTheLeft(i-1,i,G.adjoint());
+          }
+          // update Q
+          if (m_computeQZ)
+            m_Q.applyOnTheRight(i-1,i,G);
+          // kill T(i,i-1)
+          if(m_T.coeff(i,i-1)!=Scalar(0))
+          {
+            G.makeGivens(m_T.coeff(i,i), m_T.coeff(i,i-1), &m_T.coeffRef(i,i));
+            m_T.coeffRef(i,i-1) = Scalar(0.0);
+            m_S.applyOnTheRight(i,i-1,G);
+            m_T.topRows(i).applyOnTheRight(i,i-1,G);
+          }
+          // update Z
+          if (m_computeQZ)
+            m_Z.applyOnTheLeft(i,i-1,G.adjoint());
+        }
+      }
+    }
+
+  /** \internal Computes vector L1 norms of S and T when in Hessenberg-Triangular form already */
+  template<typename MatrixType>
+    inline void RealQZ<MatrixType>::computeNorms()
+    {
+      const Index size = m_S.cols();
+      m_normOfS = Scalar(0.0);
+      m_normOfT = Scalar(0.0);
+      for (Index j = 0; j < size; ++j)
+      {
+        m_normOfS += m_S.col(j).segment(0, (std::min)(size,j+2)).cwiseAbs().sum();
+        m_normOfT += m_T.row(j).segment(j, size - j).cwiseAbs().sum();
+      }
+    }
+
+
+  /** \internal Look for single small sub-diagonal element S(res, res-1) and return res (or 0) */
+  template<typename MatrixType>
+    inline typename MatrixType::Index RealQZ<MatrixType>::findSmallSubdiagEntry(Index iu)
+    {
+      using std::abs;
+      Index res = iu;
+      while (res > 0)
+      {
+        Scalar s = abs(m_S.coeff(res-1,res-1)) + abs(m_S.coeff(res,res));
+        if (s == Scalar(0.0))
+          s = m_normOfS;
+        if (abs(m_S.coeff(res,res-1)) < NumTraits<Scalar>::epsilon() * s)
+          break;
+        res--;
+      }
+      return res;
+    }
+
+  /** \internal Look for single small diagonal element T(res, res) for res between f and l, and return res (or f-1)  */
+  template<typename MatrixType>
+    inline typename MatrixType::Index RealQZ<MatrixType>::findSmallDiagEntry(Index f, Index l)
+    {
+      using std::abs;
+      Index res = l;
+      while (res >= f) {
+        if (abs(m_T.coeff(res,res)) <= NumTraits<Scalar>::epsilon() * m_normOfT)
+          break;
+        res--;
+      }
+      return res;
+    }
+
+  /** \internal decouple 2x2 diagonal block in rows i, i+1 if eigenvalues are real */
+  template<typename MatrixType>
+    inline void RealQZ<MatrixType>::splitOffTwoRows(Index i)
+    {
+      using std::abs;
+      using std::sqrt;
+      const Index dim=m_S.cols();
+      if (abs(m_S.coeff(i+1,i)==Scalar(0)))
+        return;
+      Index z = findSmallDiagEntry(i,i+1);
+      if (z==i-1)
+      {
+        // block of (S T^{-1})
+        Matrix2s STi = m_T.template block<2,2>(i,i).template triangularView<Upper>().
+          template solve<OnTheRight>(m_S.template block<2,2>(i,i));
+        Scalar p = Scalar(0.5)*(STi(0,0)-STi(1,1));
+        Scalar q = p*p + STi(1,0)*STi(0,1);
+        if (q>=0) {
+          Scalar z = sqrt(q);
+          // one QR-like iteration for ABi - lambda I
+          // is enough - when we know exact eigenvalue in advance,
+          // convergence is immediate
+          JRs G;
+          if (p>=0)
+            G.makeGivens(p + z, STi(1,0));
+          else
+            G.makeGivens(p - z, STi(1,0));
+          m_S.rightCols(dim-i).applyOnTheLeft(i,i+1,G.adjoint());
+          m_T.rightCols(dim-i).applyOnTheLeft(i,i+1,G.adjoint());
+          // update Q
+          if (m_computeQZ)
+            m_Q.applyOnTheRight(i,i+1,G);
+
+          G.makeGivens(m_T.coeff(i+1,i+1), m_T.coeff(i+1,i));
+          m_S.topRows(i+2).applyOnTheRight(i+1,i,G);
+          m_T.topRows(i+2).applyOnTheRight(i+1,i,G);
+          // update Z
+          if (m_computeQZ)
+            m_Z.applyOnTheLeft(i+1,i,G.adjoint());
+
+          m_S.coeffRef(i+1,i) = Scalar(0.0);
+          m_T.coeffRef(i+1,i) = Scalar(0.0);
+        }
+      }
+      else
+      {
+        pushDownZero(z,i,i+1);
+      }
+    }
+
+  /** \internal use zero in T(z,z) to zero S(l,l-1), working in block f..l */
+  template<typename MatrixType>
+    inline void RealQZ<MatrixType>::pushDownZero(Index z, Index f, Index l)
+    {
+      JRs G;
+      const Index dim = m_S.cols();
+      for (Index zz=z; zz<l; zz++)
+      {
+        // push 0 down
+        Index firstColS = zz>f ? (zz-1) : zz;
+        G.makeGivens(m_T.coeff(zz, zz+1), m_T.coeff(zz+1, zz+1));
+        m_S.rightCols(dim-firstColS).applyOnTheLeft(zz,zz+1,G.adjoint());
+        m_T.rightCols(dim-zz).applyOnTheLeft(zz,zz+1,G.adjoint());
+        m_T.coeffRef(zz+1,zz+1) = Scalar(0.0);
+        // update Q
+        if (m_computeQZ)
+          m_Q.applyOnTheRight(zz,zz+1,G);
+        // kill S(zz+1, zz-1)
+        if (zz>f)
+        {
+          G.makeGivens(m_S.coeff(zz+1, zz), m_S.coeff(zz+1,zz-1));
+          m_S.topRows(zz+2).applyOnTheRight(zz, zz-1,G);
+          m_T.topRows(zz+1).applyOnTheRight(zz, zz-1,G);
+          m_S.coeffRef(zz+1,zz-1) = Scalar(0.0);
+          // update Z
+          if (m_computeQZ)
+            m_Z.applyOnTheLeft(zz,zz-1,G.adjoint());
+        }
+      }
+      // finally kill S(l,l-1)
+      G.makeGivens(m_S.coeff(l,l), m_S.coeff(l,l-1));
+      m_S.applyOnTheRight(l,l-1,G);
+      m_T.applyOnTheRight(l,l-1,G);
+      m_S.coeffRef(l,l-1)=Scalar(0.0);
+      // update Z
+      if (m_computeQZ)
+        m_Z.applyOnTheLeft(l,l-1,G.adjoint());
+    }
+
+  /** \internal QR-like iterative step for block f..l */
+  template<typename MatrixType>
+    inline void RealQZ<MatrixType>::step(Index f, Index l, Index iter)
+    {
+      using std::abs;
+      const Index dim = m_S.cols();
+
+      // x, y, z
+      Scalar x, y, z;
+      if (iter==10)
+      {
+        // Wilkinson ad hoc shift
+        const Scalar
+          a11=m_S.coeff(f+0,f+0), a12=m_S.coeff(f+0,f+1),
+          a21=m_S.coeff(f+1,f+0), a22=m_S.coeff(f+1,f+1), a32=m_S.coeff(f+2,f+1),
+          b12=m_T.coeff(f+0,f+1),
+          b11i=Scalar(1.0)/m_T.coeff(f+0,f+0),
+          b22i=Scalar(1.0)/m_T.coeff(f+1,f+1),
+          a87=m_S.coeff(l-1,l-2),
+          a98=m_S.coeff(l-0,l-1),
+          b77i=Scalar(1.0)/m_T.coeff(l-2,l-2),
+          b88i=Scalar(1.0)/m_T.coeff(l-1,l-1);
+        Scalar ss = abs(a87*b77i) + abs(a98*b88i),
+               lpl = Scalar(1.5)*ss,
+               ll = ss*ss;
+        x = ll + a11*a11*b11i*b11i - lpl*a11*b11i + a12*a21*b11i*b22i
+          - a11*a21*b12*b11i*b11i*b22i;
+        y = a11*a21*b11i*b11i - lpl*a21*b11i + a21*a22*b11i*b22i 
+          - a21*a21*b12*b11i*b11i*b22i;
+        z = a21*a32*b11i*b22i;
+      }
+      else if (iter==16)
+      {
+        // another exceptional shift
+        x = m_S.coeff(f,f)/m_T.coeff(f,f)-m_S.coeff(l,l)/m_T.coeff(l,l) + m_S.coeff(l,l-1)*m_T.coeff(l-1,l) /
+          (m_T.coeff(l-1,l-1)*m_T.coeff(l,l));
+        y = m_S.coeff(f+1,f)/m_T.coeff(f,f);
+        z = 0;
+      }
+      else if (iter>23 && !(iter%8))
+      {
+        // extremely exceptional shift
+        x = internal::random<Scalar>(-1.0,1.0);
+        y = internal::random<Scalar>(-1.0,1.0);
+        z = internal::random<Scalar>(-1.0,1.0);
+      }
+      else
+      {
+        // Compute the shifts: (x,y,z,0...) = (AB^-1 - l1 I) (AB^-1 - l2 I) e1
+        // where l1 and l2 are the eigenvalues of the 2x2 matrix C = U V^-1 where
+        // U and V are 2x2 bottom right sub matrices of A and B. Thus:
+        //  = AB^-1AB^-1 + l1 l2 I - (l1+l2)(AB^-1)
+        //  = AB^-1AB^-1 + det(M) - tr(M)(AB^-1)
+        // Since we are only interested in having x, y, z with a correct ratio, we have:
+        const Scalar
+          a11 = m_S.coeff(f,f),     a12 = m_S.coeff(f,f+1),
+          a21 = m_S.coeff(f+1,f),   a22 = m_S.coeff(f+1,f+1),
+                                    a32 = m_S.coeff(f+2,f+1),
+
+          a88 = m_S.coeff(l-1,l-1), a89 = m_S.coeff(l-1,l),
+          a98 = m_S.coeff(l,l-1),   a99 = m_S.coeff(l,l),
+
+          b11 = m_T.coeff(f,f),     b12 = m_T.coeff(f,f+1),
+                                    b22 = m_T.coeff(f+1,f+1),
+
+          b88 = m_T.coeff(l-1,l-1), b89 = m_T.coeff(l-1,l),
+                                    b99 = m_T.coeff(l,l);
+
+        x = ( (a88/b88 - a11/b11)*(a99/b99 - a11/b11) - (a89/b99)*(a98/b88) + (a98/b88)*(b89/b99)*(a11/b11) ) * (b11/a21)
+          + a12/b22 - (a11/b11)*(b12/b22);
+        y = (a22/b22-a11/b11) - (a21/b11)*(b12/b22) - (a88/b88-a11/b11) - (a99/b99-a11/b11) + (a98/b88)*(b89/b99);
+        z = a32/b22;
+      }
+
+      JRs G;
+
+      for (Index k=f; k<=l-2; k++)
+      {
+        // variables for Householder reflections
+        Vector2s essential2;
+        Scalar tau, beta;
+
+        Vector3s hr(x,y,z);
+
+        // Q_k to annihilate S(k+1,k-1) and S(k+2,k-1)
+        hr.makeHouseholderInPlace(tau, beta);
+        essential2 = hr.template bottomRows<2>();
+        Index fc=(std::max)(k-1,Index(0));  // first col to update
+        m_S.template middleRows<3>(k).rightCols(dim-fc).applyHouseholderOnTheLeft(essential2, tau, m_workspace.data());
+        m_T.template middleRows<3>(k).rightCols(dim-fc).applyHouseholderOnTheLeft(essential2, tau, m_workspace.data());
+        if (m_computeQZ)
+          m_Q.template middleCols<3>(k).applyHouseholderOnTheRight(essential2, tau, m_workspace.data());
+        if (k>f)
+          m_S.coeffRef(k+2,k-1) = m_S.coeffRef(k+1,k-1) = Scalar(0.0);
+
+        // Z_{k1} to annihilate T(k+2,k+1) and T(k+2,k)
+        hr << m_T.coeff(k+2,k+2),m_T.coeff(k+2,k),m_T.coeff(k+2,k+1);
+        hr.makeHouseholderInPlace(tau, beta);
+        essential2 = hr.template bottomRows<2>();
+        {
+          Index lr = (std::min)(k+4,dim); // last row to update
+          Map<Matrix<Scalar,Dynamic,1> > tmp(m_workspace.data(),lr);
+          // S
+          tmp = m_S.template middleCols<2>(k).topRows(lr) * essential2;
+          tmp += m_S.col(k+2).head(lr);
+          m_S.col(k+2).head(lr) -= tau*tmp;
+          m_S.template middleCols<2>(k).topRows(lr) -= (tau*tmp) * essential2.adjoint();
+          // T
+          tmp = m_T.template middleCols<2>(k).topRows(lr) * essential2;
+          tmp += m_T.col(k+2).head(lr);
+          m_T.col(k+2).head(lr) -= tau*tmp;
+          m_T.template middleCols<2>(k).topRows(lr) -= (tau*tmp) * essential2.adjoint();
+        }
+        if (m_computeQZ)
+        {
+          // Z
+          Map<Matrix<Scalar,1,Dynamic> > tmp(m_workspace.data(),dim);
+          tmp = essential2.adjoint()*(m_Z.template middleRows<2>(k));
+          tmp += m_Z.row(k+2);
+          m_Z.row(k+2) -= tau*tmp;
+          m_Z.template middleRows<2>(k) -= essential2 * (tau*tmp);
+        }
+        m_T.coeffRef(k+2,k) = m_T.coeffRef(k+2,k+1) = Scalar(0.0);
+
+        // Z_{k2} to annihilate T(k+1,k)
+        G.makeGivens(m_T.coeff(k+1,k+1), m_T.coeff(k+1,k));
+        m_S.applyOnTheRight(k+1,k,G);
+        m_T.applyOnTheRight(k+1,k,G);
+        // update Z
+        if (m_computeQZ)
+          m_Z.applyOnTheLeft(k+1,k,G.adjoint());
+        m_T.coeffRef(k+1,k) = Scalar(0.0);
+
+        // update x,y,z
+        x = m_S.coeff(k+1,k);
+        y = m_S.coeff(k+2,k);
+        if (k < l-2)
+          z = m_S.coeff(k+3,k);
+      } // loop over k
+
+      // Q_{n-1} to annihilate y = S(l,l-2)
+      G.makeGivens(x,y);
+      m_S.applyOnTheLeft(l-1,l,G.adjoint());
+      m_T.applyOnTheLeft(l-1,l,G.adjoint());
+      if (m_computeQZ)
+        m_Q.applyOnTheRight(l-1,l,G);
+      m_S.coeffRef(l,l-2) = Scalar(0.0);
+
+      // Z_{n-1} to annihilate T(l,l-1)
+      G.makeGivens(m_T.coeff(l,l),m_T.coeff(l,l-1));
+      m_S.applyOnTheRight(l,l-1,G);
+      m_T.applyOnTheRight(l,l-1,G);
+      if (m_computeQZ)
+        m_Z.applyOnTheLeft(l,l-1,G.adjoint());
+      m_T.coeffRef(l,l-1) = Scalar(0.0);
+    }
+
+
+  template<typename MatrixType>
+    RealQZ<MatrixType>& RealQZ<MatrixType>::compute(const MatrixType& A_in, const MatrixType& B_in, bool computeQZ)
+    {
+
+      const Index dim = A_in.cols();
+
+      eigen_assert (A_in.rows()==dim && A_in.cols()==dim 
+          && B_in.rows()==dim && B_in.cols()==dim 
+          && "Need square matrices of the same dimension");
+
+      m_isInitialized = true;
+      m_computeQZ = computeQZ;
+      m_S = A_in; m_T = B_in;
+      m_workspace.resize(dim*2);
+      m_global_iter = 0;
+
+      // entrance point: hessenberg triangular decomposition
+      hessenbergTriangular();
+      // compute L1 vector norms of T, S into m_normOfS, m_normOfT
+      computeNorms();
+
+      Index l = dim-1, 
+            f, 
+            local_iter = 0;
+
+      while (l>0 && local_iter<m_maxIters)
+      {
+        f = findSmallSubdiagEntry(l);
+        // now rows and columns f..l (including) decouple from the rest of the problem
+        if (f>0) m_S.coeffRef(f,f-1) = Scalar(0.0);
+        if (f == l) // One root found
+        {
+          l--;
+          local_iter = 0;
+        }
+        else if (f == l-1) // Two roots found
+        {
+          splitOffTwoRows(f);
+          l -= 2;
+          local_iter = 0;
+        }
+        else // No convergence yet
+        {
+          // if there's zero on diagonal of T, we can isolate an eigenvalue with Givens rotations
+          Index z = findSmallDiagEntry(f,l);
+          if (z>=f)
+          {
+            // zero found
+            pushDownZero(z,f,l);
+          }
+          else
+          {
+            // We are sure now that S.block(f,f, l-f+1,l-f+1) is underuced upper-Hessenberg 
+            // and T.block(f,f, l-f+1,l-f+1) is invertible uper-triangular, which allows to
+            // apply a QR-like iteration to rows and columns f..l.
+            step(f,l, local_iter);
+            local_iter++;
+            m_global_iter++;
+          }
+        }
+      }
+      // check if we converged before reaching iterations limit
+      m_info = (local_iter<m_maxIters) ? Success : NoConvergence;
+      return *this;
+    } // end compute
+
+} // end namespace Eigen
+
+#endif //EIGEN_REAL_QZ
diff --git a/ThirdParty/eigen3/Eigen/src/Eigenvalues/RealSchur.h b/ThirdParty/eigen3/Eigen/src/Eigenvalues/RealSchur.h
index d1949b83cde..64d13634141 100644
--- a/ThirdParty/eigen3/Eigen/src/Eigenvalues/RealSchur.h
+++ b/ThirdParty/eigen3/Eigen/src/Eigenvalues/RealSchur.h
@@ -2,7 +2,7 @@
 // for linear algebra.
 //
 // Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
-// Copyright (C) 2010 Jitse Niesen <jitse@maths.leeds.ac.uk>
+// Copyright (C) 2010,2012 Jitse Niesen <jitse@maths.leeds.ac.uk>
 //
 // This Source Code Form is subject to the terms of the Mozilla
 // Public License v. 2.0. If a copy of the MPL was not distributed
@@ -86,7 +86,8 @@ template<typename _MatrixType> class RealSchur
               m_workspaceVector(size),
               m_hess(size),
               m_isInitialized(false),
-              m_matUisUptodate(false)
+              m_matUisUptodate(false),
+              m_maxIters(-1)
     { }
 
     /** \brief Constructor; computes real Schur decomposition of given matrix. 
@@ -105,7 +106,8 @@ template<typename _MatrixType> class RealSchur
               m_workspaceVector(matrix.rows()),
               m_hess(matrix.rows()),
               m_isInitialized(false),
-              m_matUisUptodate(false)
+              m_matUisUptodate(false),
+              m_maxIters(-1)
     {
       compute(matrix, computeU);
     }
@@ -160,9 +162,30 @@ template<typename _MatrixType> class RealSchur
       *
       * Example: \include RealSchur_compute.cpp
       * Output: \verbinclude RealSchur_compute.out
+      *
+      * \sa compute(const MatrixType&, bool, Index)
       */
     RealSchur& compute(const MatrixType& matrix, bool computeU = true);
 
+    /** \brief Computes Schur decomposition of a Hessenberg matrix H = Z T Z^T
+     *  \param[in] matrixH Matrix in Hessenberg form H
+     *  \param[in] matrixQ orthogonal matrix Q that transform a matrix A to H : A = Q H Q^T
+     *  \param computeU Computes the matriX U of the Schur vectors
+     * \return Reference to \c *this
+     * 
+     *  This routine assumes that the matrix is already reduced in Hessenberg form matrixH
+     *  using either the class HessenbergDecomposition or another mean. 
+     *  It computes the upper quasi-triangular matrix T of the Schur decomposition of H
+     *  When computeU is true, this routine computes the matrix U such that 
+     *  A = U T U^T =  (QZ) T (QZ)^T = Q H Q^T where A is the initial matrix
+     * 
+     * NOTE Q is referenced if computeU is true; so, if the initial orthogonal matrix
+     * is not available, the user should give an identity matrix (Q.setIdentity())
+     * 
+     * \sa compute(const MatrixType&, bool)
+     */
+    template<typename HessMatrixType, typename OrthMatrixType>
+    RealSchur& computeFromHessenberg(const HessMatrixType& matrixH, const OrthMatrixType& matrixQ,  bool computeU);
     /** \brief Reports whether previous computation was successful.
       *
       * \returns \c Success if computation was succesful, \c NoConvergence otherwise.
@@ -173,11 +196,29 @@ template<typename _MatrixType> class RealSchur
       return m_info;
     }
 
-    /** \brief Maximum number of iterations.
+    /** \brief Sets the maximum number of iterations allowed. 
+      *
+      * If not specified by the user, the maximum number of iterations is m_maxIterationsPerRow times the size
+      * of the matrix.
+      */
+    RealSchur& setMaxIterations(Index maxIters)
+    {
+      m_maxIters = maxIters;
+      return *this;
+    }
+
+    /** \brief Returns the maximum number of iterations. */
+    Index getMaxIterations()
+    {
+      return m_maxIters;
+    }
+
+    /** \brief Maximum number of iterations per row.
       *
-      * Maximum number of iterations allowed for an eigenvalue to converge. 
+      * If not otherwise specified, the maximum number of iterations is this number times the size of the
+      * matrix. It is currently set to 40.
       */
-    static const int m_maxIterations = 40;
+    static const int m_maxIterationsPerRow = 40;
 
   private:
     
@@ -188,12 +229,13 @@ template<typename _MatrixType> class RealSchur
     ComputationInfo m_info;
     bool m_isInitialized;
     bool m_matUisUptodate;
+    Index m_maxIters;
 
     typedef Matrix<Scalar,3,1> Vector3s;
 
     Scalar computeNormOfT();
-    Index findSmallSubdiagEntry(Index iu, Scalar norm);
-    void splitOffTwoRows(Index iu, bool computeU, Scalar exshift);
+    Index findSmallSubdiagEntry(Index iu, const Scalar& norm);
+    void splitOffTwoRows(Index iu, bool computeU, const Scalar& exshift);
     void computeShift(Index iu, Index iter, Scalar& exshift, Vector3s& shiftInfo);
     void initFrancisQRStep(Index il, Index iu, const Vector3s& shiftInfo, Index& im, Vector3s& firstHouseholderVector);
     void performFrancisQRStep(Index il, Index im, Index iu, bool computeU, const Vector3s& firstHouseholderVector, Scalar* workspace);
@@ -203,15 +245,30 @@ template<typename _MatrixType> class RealSchur
 template<typename MatrixType>
 RealSchur<MatrixType>& RealSchur<MatrixType>::compute(const MatrixType& matrix, bool computeU)
 {
-  assert(matrix.cols() == matrix.rows());
+  eigen_assert(matrix.cols() == matrix.rows());
+  Index maxIters = m_maxIters;
+  if (maxIters == -1)
+    maxIters = m_maxIterationsPerRow * matrix.rows();
 
   // Step 1. Reduce to Hessenberg form
   m_hess.compute(matrix);
-  m_matT = m_hess.matrixH();
-  if (computeU)
-    m_matU = m_hess.matrixQ();
 
   // Step 2. Reduce to real Schur form  
+  computeFromHessenberg(m_hess.matrixH(), m_hess.matrixQ(), computeU);
+  
+  return *this;
+}
+template<typename MatrixType>
+template<typename HessMatrixType, typename OrthMatrixType>
+RealSchur<MatrixType>& RealSchur<MatrixType>::computeFromHessenberg(const HessMatrixType& matrixH, const OrthMatrixType& matrixQ,  bool computeU)
+{  
+  m_matT = matrixH; 
+  if(computeU)
+    m_matU = matrixQ;
+  
+  Index maxIters = m_maxIters;
+  if (maxIters == -1)
+    maxIters = m_maxIterationsPerRow * matrixH.rows();
   m_workspaceVector.resize(m_matT.cols());
   Scalar* workspace = &m_workspaceVector.coeffRef(0);
 
@@ -253,14 +310,14 @@ RealSchur<MatrixType>& RealSchur<MatrixType>::compute(const MatrixType& matrix,
         computeShift(iu, iter, exshift, shiftInfo);
         iter = iter + 1;
         totalIter = totalIter + 1;
-        if (totalIter > m_maxIterations * matrix.cols()) break;
+        if (totalIter > maxIters) break;
         Index im;
         initFrancisQRStep(il, iu, shiftInfo, im, firstHouseholderVector);
         performFrancisQRStep(il, im, iu, computeU, firstHouseholderVector, workspace);
       }
     }
   }
-  if(totalIter <= m_maxIterations * matrix.cols()) 
+  if(totalIter <= maxIters)
     m_info = Success;
   else
     m_info = NoConvergence;
@@ -280,21 +337,22 @@ inline typename MatrixType::Scalar RealSchur<MatrixType>::computeNormOfT()
   //               + m_matT.bottomLeftCorner(size-1,size-1).diagonal().cwiseAbs().sum();
   Scalar norm(0);
   for (Index j = 0; j < size; ++j)
-    norm += m_matT.row(j).segment((std::max)(j-1,Index(0)), size-(std::max)(j-1,Index(0))).cwiseAbs().sum();
+    norm += m_matT.col(j).segment(0, (std::min)(size,j+2)).cwiseAbs().sum();
   return norm;
 }
 
 /** \internal Look for single small sub-diagonal element and returns its index */
 template<typename MatrixType>
-inline typename MatrixType::Index RealSchur<MatrixType>::findSmallSubdiagEntry(Index iu, Scalar norm)
+inline typename MatrixType::Index RealSchur<MatrixType>::findSmallSubdiagEntry(Index iu, const Scalar& norm)
 {
+  using std::abs;
   Index res = iu;
   while (res > 0)
   {
-    Scalar s = internal::abs(m_matT.coeff(res-1,res-1)) + internal::abs(m_matT.coeff(res,res));
+    Scalar s = abs(m_matT.coeff(res-1,res-1)) + abs(m_matT.coeff(res,res));
     if (s == 0.0)
       s = norm;
-    if (internal::abs(m_matT.coeff(res,res-1)) < NumTraits<Scalar>::epsilon() * s)
+    if (abs(m_matT.coeff(res,res-1)) < NumTraits<Scalar>::epsilon() * s)
       break;
     res--;
   }
@@ -303,8 +361,10 @@ inline typename MatrixType::Index RealSchur<MatrixType>::findSmallSubdiagEntry(I
 
 /** \internal Update T given that rows iu-1 and iu decouple from the rest. */
 template<typename MatrixType>
-inline void RealSchur<MatrixType>::splitOffTwoRows(Index iu, bool computeU, Scalar exshift)
+inline void RealSchur<MatrixType>::splitOffTwoRows(Index iu, bool computeU, const Scalar& exshift)
 {
+  using std::sqrt;
+  using std::abs;
   const Index size = m_matT.cols();
 
   // The eigenvalues of the 2x2 matrix [a b; c d] are 
@@ -316,7 +376,7 @@ inline void RealSchur<MatrixType>::splitOffTwoRows(Index iu, bool computeU, Scal
 
   if (q >= Scalar(0)) // Two real eigenvalues
   {
-    Scalar z = internal::sqrt(internal::abs(q));
+    Scalar z = sqrt(abs(q));
     JacobiRotation<Scalar> rot;
     if (p >= Scalar(0))
       rot.makeGivens(p + z, m_matT.coeff(iu, iu-1));
@@ -338,6 +398,8 @@ inline void RealSchur<MatrixType>::splitOffTwoRows(Index iu, bool computeU, Scal
 template<typename MatrixType>
 inline void RealSchur<MatrixType>::computeShift(Index iu, Index iter, Scalar& exshift, Vector3s& shiftInfo)
 {
+  using std::sqrt;
+  using std::abs;
   shiftInfo.coeffRef(0) = m_matT.coeff(iu,iu);
   shiftInfo.coeffRef(1) = m_matT.coeff(iu-1,iu-1);
   shiftInfo.coeffRef(2) = m_matT.coeff(iu,iu-1) * m_matT.coeff(iu-1,iu);
@@ -348,7 +410,7 @@ inline void RealSchur<MatrixType>::computeShift(Index iu, Index iter, Scalar& ex
     exshift += shiftInfo.coeff(0);
     for (Index i = 0; i <= iu; ++i)
       m_matT.coeffRef(i,i) -= shiftInfo.coeff(0);
-    Scalar s = internal::abs(m_matT.coeff(iu,iu-1)) + internal::abs(m_matT.coeff(iu-1,iu-2));
+    Scalar s = abs(m_matT.coeff(iu,iu-1)) + abs(m_matT.coeff(iu-1,iu-2));
     shiftInfo.coeffRef(0) = Scalar(0.75) * s;
     shiftInfo.coeffRef(1) = Scalar(0.75) * s;
     shiftInfo.coeffRef(2) = Scalar(-0.4375) * s * s;
@@ -361,7 +423,7 @@ inline void RealSchur<MatrixType>::computeShift(Index iu, Index iter, Scalar& ex
     s = s * s + shiftInfo.coeff(2);
     if (s > Scalar(0))
     {
-      s = internal::sqrt(s);
+      s = sqrt(s);
       if (shiftInfo.coeff(1) < shiftInfo.coeff(0))
         s = -s;
       s = s + (shiftInfo.coeff(1) - shiftInfo.coeff(0)) / Scalar(2.0);
@@ -378,6 +440,7 @@ inline void RealSchur<MatrixType>::computeShift(Index iu, Index iter, Scalar& ex
 template<typename MatrixType>
 inline void RealSchur<MatrixType>::initFrancisQRStep(Index il, Index iu, const Vector3s& shiftInfo, Index& im, Vector3s& firstHouseholderVector)
 {
+  using std::abs;
   Vector3s& v = firstHouseholderVector; // alias to save typing
 
   for (im = iu-2; im >= il; --im)
@@ -391,9 +454,9 @@ inline void RealSchur<MatrixType>::initFrancisQRStep(Index il, Index iu, const V
     if (im == il) {
       break;
     }
-    const Scalar lhs = m_matT.coeff(im,im-1) * (internal::abs(v.coeff(1)) + internal::abs(v.coeff(2)));
-    const Scalar rhs = v.coeff(0) * (internal::abs(m_matT.coeff(im-1,im-1)) + internal::abs(Tmm) + internal::abs(m_matT.coeff(im+1,im+1)));
-    if (internal::abs(lhs) < NumTraits<Scalar>::epsilon() * rhs)
+    const Scalar lhs = m_matT.coeff(im,im-1) * (abs(v.coeff(1)) + abs(v.coeff(2)));
+    const Scalar rhs = v.coeff(0) * (abs(m_matT.coeff(im-1,im-1)) + abs(Tmm) + abs(m_matT.coeff(im+1,im+1)));
+    if (abs(lhs) < NumTraits<Scalar>::epsilon() * rhs)
     {
       break;
     }
@@ -404,8 +467,8 @@ inline void RealSchur<MatrixType>::initFrancisQRStep(Index il, Index iu, const V
 template<typename MatrixType>
 inline void RealSchur<MatrixType>::performFrancisQRStep(Index il, Index im, Index iu, bool computeU, const Vector3s& firstHouseholderVector, Scalar* workspace)
 {
-  assert(im >= il);
-  assert(im <= iu-2);
+  eigen_assert(im >= il);
+  eigen_assert(im <= iu-2);
 
   const Index size = m_matT.cols();
 
diff --git a/ThirdParty/eigen3/Eigen/src/Eigenvalues/RealSchur_MKL.h b/ThirdParty/eigen3/Eigen/src/Eigenvalues/RealSchur_MKL.h
index 960ec3c764a..ad973646028 100644
--- a/ThirdParty/eigen3/Eigen/src/Eigenvalues/RealSchur_MKL.h
+++ b/ThirdParty/eigen3/Eigen/src/Eigenvalues/RealSchur_MKL.h
@@ -48,7 +48,7 @@ RealSchur<Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW> >::compute(const Matrix<E
   typedef MatrixType::Scalar Scalar; \
   typedef MatrixType::RealScalar RealScalar; \
 \
-  assert(matrix.cols() == matrix.rows()); \
+  eigen_assert(matrix.cols() == matrix.rows()); \
 \
   lapack_int n = matrix.cols(), sdim, info; \
   lapack_int lda = matrix.outerStride(); \
diff --git a/ThirdParty/eigen3/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h b/ThirdParty/eigen3/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h
index 24c78b4b2f1..3993046a88e 100644
--- a/ThirdParty/eigen3/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h
+++ b/ThirdParty/eigen3/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h
@@ -384,6 +384,7 @@ template<typename MatrixType>
 SelfAdjointEigenSolver<MatrixType>& SelfAdjointEigenSolver<MatrixType>
 ::compute(const MatrixType& matrix, int options)
 {
+  using std::abs;
   eigen_assert(matrix.cols() == matrix.rows());
   eigen_assert((options&~(EigVecMask|GenEigMask))==0
           && (options&EigVecMask)!=EigVecMask
@@ -394,7 +395,7 @@ SelfAdjointEigenSolver<MatrixType>& SelfAdjointEigenSolver<MatrixType>
 
   if(n==1)
   {
-    m_eivalues.coeffRef(0,0) = internal::real(matrix.coeff(0,0));
+    m_eivalues.coeffRef(0,0) = numext::real(matrix.coeff(0,0));
     if(computeEigenvectors)
       m_eivec.setOnes(n,n);
     m_info = Success;
@@ -408,9 +409,10 @@ SelfAdjointEigenSolver<MatrixType>& SelfAdjointEigenSolver<MatrixType>
   MatrixType& mat = m_eivec;
 
   // map the matrix coefficients to [-1:1] to avoid over- and underflow.
-  RealScalar scale = matrix.cwiseAbs().maxCoeff();
+  mat = matrix.template triangularView<Lower>();
+  RealScalar scale = mat.cwiseAbs().maxCoeff();
   if(scale==RealScalar(0)) scale = RealScalar(1);
-  mat = matrix / scale;
+  mat.template triangularView<Lower>() /= scale;
   m_subdiag.resize(n-1);
   internal::tridiagonalization_inplace(mat, diag, m_subdiag, computeEigenvectors);
   
@@ -421,7 +423,7 @@ SelfAdjointEigenSolver<MatrixType>& SelfAdjointEigenSolver<MatrixType>
   while (end>0)
   {
     for (Index i = start; i<end; ++i)
-      if (internal::isMuchSmallerThan(internal::abs(m_subdiag[i]),(internal::abs(diag[i])+internal::abs(diag[i+1]))))
+      if (internal::isMuchSmallerThan(abs(m_subdiag[i]),(abs(diag[i])+abs(diag[i+1]))))
         m_subdiag[i] = 0;
 
     // find the largest unreduced block
@@ -667,7 +669,7 @@ template<typename SolverType> struct direct_selfadjoint_eigenvalues<SolverType,2
   static inline void computeRoots(const MatrixType& m, VectorType& roots)
   {
     using std::sqrt;
-    const Scalar t0 = Scalar(0.5) * sqrt( abs2(m(0,0)-m(1,1)) + Scalar(4)*m(1,0)*m(1,0));
+    const Scalar t0 = Scalar(0.5) * sqrt( numext::abs2(m(0,0)-m(1,1)) + Scalar(4)*m(1,0)*m(1,0));
     const Scalar t1 = Scalar(0.5) * (m(0,0) + m(1,1));
     roots(0) = t1 - t0;
     roots(1) = t1 + t0;
@@ -675,6 +677,7 @@ template<typename SolverType> struct direct_selfadjoint_eigenvalues<SolverType,2
   
   static inline void run(SolverType& solver, const MatrixType& mat, int options)
   {
+    using std::sqrt;
     eigen_assert(mat.cols() == 2 && mat.cols() == mat.rows());
     eigen_assert((options&~(EigVecMask|GenEigMask))==0
             && (options&EigVecMask)!=EigVecMask
@@ -696,9 +699,9 @@ template<typename SolverType> struct direct_selfadjoint_eigenvalues<SolverType,2
     if(computeEigenvectors)
     {
       scaledMat.diagonal().array () -= eivals(1);
-      Scalar a2 = abs2(scaledMat(0,0));
-      Scalar c2 = abs2(scaledMat(1,1));
-      Scalar b2 = abs2(scaledMat(1,0));
+      Scalar a2 = numext::abs2(scaledMat(0,0));
+      Scalar c2 = numext::abs2(scaledMat(1,1));
+      Scalar b2 = numext::abs2(scaledMat(1,0));
       if(a2>c2)
       {
         eivecs.col(1) << -scaledMat(1,0), scaledMat(0,0);
@@ -736,11 +739,12 @@ namespace internal {
 template<int StorageOrder,typename RealScalar, typename Scalar, typename Index>
 static void tridiagonal_qr_step(RealScalar* diag, RealScalar* subdiag, Index start, Index end, Scalar* matrixQ, Index n)
 {
+  using std::abs;
   RealScalar td = (diag[end-1] - diag[end])*RealScalar(0.5);
   RealScalar e = subdiag[end-1];
   // Note that thanks to scaling, e^2 or td^2 cannot overflow, however they can still
   // underflow thus leading to inf/NaN values when using the following commented code:
-//   RealScalar e2 = abs2(subdiag[end-1]);
+//   RealScalar e2 = numext::abs2(subdiag[end-1]);
 //   RealScalar mu = diag[end] - e2 / (td + (td>0 ? 1 : -1) * sqrt(td*td + e2));
   // This explain the following, somewhat more complicated, version:
   RealScalar mu = diag[end];
@@ -748,8 +752,8 @@ static void tridiagonal_qr_step(RealScalar* diag, RealScalar* subdiag, Index sta
     mu -= abs(e);
   else
   {
-    RealScalar e2 = abs2(subdiag[end-1]);
-    RealScalar h = hypot(td,e);
+    RealScalar e2 = numext::abs2(subdiag[end-1]);
+    RealScalar h = numext::hypot(td,e);
     if(e2==0)  mu -= (e / (td + (td>0 ? 1 : -1))) * (e / h);
     else       mu -= e2 / (td + (td>0 ? h : -h));
   }
diff --git a/ThirdParty/eigen3/Eigen/src/Eigenvalues/SelfAdjointEigenSolver_MKL.h b/ThirdParty/eigen3/Eigen/src/Eigenvalues/SelfAdjointEigenSolver_MKL.h
index 5de5f15d641..17c0dadd23e 100644
--- a/ThirdParty/eigen3/Eigen/src/Eigenvalues/SelfAdjointEigenSolver_MKL.h
+++ b/ThirdParty/eigen3/Eigen/src/Eigenvalues/SelfAdjointEigenSolver_MKL.h
@@ -56,7 +56,7 @@ SelfAdjointEigenSolver<Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW> >::compute(c
 \
   if(n==1) \
   { \
-    m_eivalues.coeffRef(0,0) = internal::real(matrix.coeff(0,0)); \
+    m_eivalues.coeffRef(0,0) = numext::real(matrix.coeff(0,0)); \
     if(computeEigenvectors) m_eivec.setOnes(n,n); \
     m_info = Success; \
     m_isInitialized = true; \
diff --git a/ThirdParty/eigen3/Eigen/src/Eigenvalues/Tridiagonalization.h b/ThirdParty/eigen3/Eigen/src/Eigenvalues/Tridiagonalization.h
index c34b7b3b801..192278d685d 100644
--- a/ThirdParty/eigen3/Eigen/src/Eigenvalues/Tridiagonalization.h
+++ b/ThirdParty/eigen3/Eigen/src/Eigenvalues/Tridiagonalization.h
@@ -96,7 +96,7 @@ template<typename _MatrixType> class Tridiagonalization
             >::type SubDiagonalReturnType;
 
     /** \brief Return type of matrixQ() */
-    typedef typename HouseholderSequence<MatrixType,CoeffVectorType>::ConjugateReturnType HouseholderSequenceType;
+    typedef HouseholderSequence<MatrixType,typename internal::remove_all<typename CoeffVectorType::ConjugateReturnType>::type> HouseholderSequenceType;
 
     /** \brief Default constructor.
       *
@@ -345,6 +345,7 @@ namespace internal {
 template<typename MatrixType, typename CoeffVectorType>
 void tridiagonalization_inplace(MatrixType& matA, CoeffVectorType& hCoeffs)
 {
+  using numext::conj;
   typedef typename MatrixType::Index Index;
   typedef typename MatrixType::Scalar Scalar;
   typedef typename MatrixType::RealScalar RealScalar;
@@ -425,8 +426,6 @@ struct tridiagonalization_inplace_selector;
 template<typename MatrixType, typename DiagonalType, typename SubDiagonalType>
 void tridiagonalization_inplace(MatrixType& mat, DiagonalType& diag, SubDiagonalType& subdiag, bool extractQ)
 {
-  typedef typename MatrixType::Index Index;
-  //Index n = mat.rows();
   eigen_assert(mat.cols()==mat.rows() && diag.size()==mat.rows() && subdiag.size()==mat.rows()-1);
   tridiagonalization_inplace_selector<MatrixType>::run(mat, diag, subdiag, extractQ);
 }
@@ -467,8 +466,9 @@ struct tridiagonalization_inplace_selector<MatrixType,3,false>
   template<typename DiagonalType, typename SubDiagonalType>
   static void run(MatrixType& mat, DiagonalType& diag, SubDiagonalType& subdiag, bool extractQ)
   {
+    using std::sqrt;
     diag[0] = mat(0,0);
-    RealScalar v1norm2 = abs2(mat(2,0));
+    RealScalar v1norm2 = numext::abs2(mat(2,0));
     if(v1norm2 == RealScalar(0))
     {
       diag[1] = mat(1,1);
@@ -480,7 +480,7 @@ struct tridiagonalization_inplace_selector<MatrixType,3,false>
     }
     else
     {
-      RealScalar beta = sqrt(abs2(mat(1,0)) + v1norm2);
+      RealScalar beta = sqrt(numext::abs2(mat(1,0)) + v1norm2);
       RealScalar invBeta = RealScalar(1)/beta;
       Scalar m01 = mat(1,0) * invBeta;
       Scalar m02 = mat(2,0) * invBeta;
@@ -510,7 +510,7 @@ struct tridiagonalization_inplace_selector<MatrixType,1,IsComplex>
   template<typename DiagonalType, typename SubDiagonalType>
   static void run(MatrixType& mat, DiagonalType& diag, SubDiagonalType&, bool extractQ)
   {
-    diag(0,0) = real(mat(0,0));
+    diag(0,0) = numext::real(mat(0,0));
     if(extractQ)
       mat(0,0) = Scalar(1);
   }
diff --git a/ThirdParty/eigen3/Eigen/src/Geometry/EulerAngles.h b/ThirdParty/eigen3/Eigen/src/Geometry/EulerAngles.h
index e424d240604..97984d590e3 100644
--- a/ThirdParty/eigen3/Eigen/src/Geometry/EulerAngles.h
+++ b/ThirdParty/eigen3/Eigen/src/Geometry/EulerAngles.h
@@ -27,55 +27,75 @@ namespace Eigen {
   *      * AngleAxisf(ea[1], Vector3f::UnitX())
   *      * AngleAxisf(ea[2], Vector3f::UnitZ()); \endcode
   * This corresponds to the right-multiply conventions (with right hand side frames).
+  * 
+  * The returned angles are in the ranges [0:pi]x[0:pi]x[-pi:pi].
+  * 
+  * \sa class AngleAxis
   */
 template<typename Derived>
 inline Matrix<typename MatrixBase<Derived>::Scalar,3,1>
 MatrixBase<Derived>::eulerAngles(Index a0, Index a1, Index a2) const
 {
+  using std::atan2;
+  using std::sin;
+  using std::cos;
   /* Implemented from Graphics Gems IV */
   EIGEN_STATIC_ASSERT_MATRIX_SPECIFIC_SIZE(Derived,3,3)
 
   Matrix<Scalar,3,1> res;
   typedef Matrix<typename Derived::Scalar,2,1> Vector2;
-  const Scalar epsilon = NumTraits<Scalar>::dummy_precision();
 
   const Index odd = ((a0+1)%3 == a1) ? 0 : 1;
   const Index i = a0;
   const Index j = (a0 + 1 + odd)%3;
   const Index k = (a0 + 2 - odd)%3;
-
+  
   if (a0==a2)
   {
-    Scalar s = Vector2(coeff(j,i) , coeff(k,i)).norm();
-    res[1] = internal::atan2(s, coeff(i,i));
-    if (s > epsilon)
+    res[0] = atan2(coeff(j,i), coeff(k,i));
+    if((odd && res[0]<Scalar(0)) || ((!odd) && res[0]>Scalar(0)))
     {
-      res[0] = internal::atan2(coeff(j,i), coeff(k,i));
-      res[2] = internal::atan2(coeff(i,j),-coeff(i,k));
+      res[0] = (res[0] > Scalar(0)) ? res[0] - Scalar(M_PI) : res[0] + Scalar(M_PI);
+      Scalar s2 = Vector2(coeff(j,i), coeff(k,i)).norm();
+      res[1] = -atan2(s2, coeff(i,i));
     }
     else
     {
-      res[0] = Scalar(0);
-      res[2] = (coeff(i,i)>0?1:-1)*internal::atan2(-coeff(k,j), coeff(j,j));
+      Scalar s2 = Vector2(coeff(j,i), coeff(k,i)).norm();
+      res[1] = atan2(s2, coeff(i,i));
     }
-  }
+    
+    // With a=(0,1,0), we have i=0; j=1; k=2, and after computing the first two angles,
+    // we can compute their respective rotation, and apply its inverse to M. Since the result must
+    // be a rotation around x, we have:
+    //
+    //  c2  s1.s2 c1.s2                   1  0   0 
+    //  0   c1    -s1       *    M    =   0  c3  s3
+    //  -s2 s1.c2 c1.c2                   0 -s3  c3
+    //
+    //  Thus:  m11.c1 - m21.s1 = c3  &   m12.c1 - m22.s1 = s3
+    
+    Scalar s1 = sin(res[0]);
+    Scalar c1 = cos(res[0]);
+    res[2] = atan2(c1*coeff(j,k)-s1*coeff(k,k), c1*coeff(j,j) - s1 * coeff(k,j));
+  } 
   else
   {
-    Scalar c = Vector2(coeff(i,i) , coeff(i,j)).norm();
-    res[1] = internal::atan2(-coeff(i,k), c);
-    if (c > epsilon)
-    {
-      res[0] = internal::atan2(coeff(j,k), coeff(k,k));
-      res[2] = internal::atan2(coeff(i,j), coeff(i,i));
+    res[0] = atan2(coeff(j,k), coeff(k,k));
+    Scalar c2 = Vector2(coeff(i,i), coeff(i,j)).norm();
+    if((odd && res[0]<Scalar(0)) || ((!odd) && res[0]>Scalar(0))) {
+      res[0] = (res[0] > Scalar(0)) ? res[0] - Scalar(M_PI) : res[0] + Scalar(M_PI);
+      res[1] = atan2(-coeff(i,k), -c2);
     }
     else
-    {
-      res[0] = Scalar(0);
-      res[2] = (coeff(i,k)>0?1:-1)*internal::atan2(-coeff(k,j), coeff(j,j));
-    }
+      res[1] = atan2(-coeff(i,k), c2);
+    Scalar s1 = sin(res[0]);
+    Scalar c1 = cos(res[0]);
+    res[2] = atan2(s1*coeff(k,i)-c1*coeff(j,i), c1*coeff(j,j) - s1 * coeff(k,j));
   }
   if (!odd)
     res = -res;
+  
   return res;
 }
 
diff --git a/ThirdParty/eigen3/Eigen/src/Geometry/Homogeneous.h b/ThirdParty/eigen3/Eigen/src/Geometry/Homogeneous.h
index df03feb55c6..00e71d190c3 100644
--- a/ThirdParty/eigen3/Eigen/src/Geometry/Homogeneous.h
+++ b/ThirdParty/eigen3/Eigen/src/Geometry/Homogeneous.h
@@ -59,7 +59,7 @@ template<typename MatrixType,typename Rhs> struct homogeneous_right_product_impl
 } // end namespace internal
 
 template<typename MatrixType,int _Direction> class Homogeneous
-  : public MatrixBase<Homogeneous<MatrixType,_Direction> >
+  : internal::no_assignment_operator, public MatrixBase<Homogeneous<MatrixType,_Direction> >
 {
   public:
 
diff --git a/ThirdParty/eigen3/Eigen/src/Geometry/Hyperplane.h b/ThirdParty/eigen3/Eigen/src/Geometry/Hyperplane.h
index 1b7c7c78c80..aeff43fefa6 100644
--- a/ThirdParty/eigen3/Eigen/src/Geometry/Hyperplane.h
+++ b/ThirdParty/eigen3/Eigen/src/Geometry/Hyperplane.h
@@ -50,7 +50,7 @@ public:
   typedef const Block<const Coefficients,AmbientDimAtCompileTime,1> ConstNormalReturnType;
 
   /** Default constructor without initialization */
-  inline explicit Hyperplane() {}
+  inline Hyperplane() {}
   
   template<int OtherOptions>
   Hyperplane(const Hyperplane<Scalar,AmbientDimAtCompileTime,OtherOptions>& other)
@@ -75,7 +75,7 @@ public:
     * such that the algebraic equation of the plane is \f$ n \cdot x + d = 0 \f$.
     * \warning the vector normal is assumed to be normalized.
     */
-  inline Hyperplane(const VectorType& n, Scalar d)
+  inline Hyperplane(const VectorType& n, const Scalar& d)
     : m_coeffs(n.size()+1)
   {
     normal() = n;
@@ -135,7 +135,7 @@ public:
   /** \returns the absolute distance between the plane \c *this and a point \a p.
     * \sa signedDistance()
     */
-  inline Scalar absDistance(const VectorType& p) const { return internal::abs(signedDistance(p)); }
+  inline Scalar absDistance(const VectorType& p) const { using std::abs; return abs(signedDistance(p)); }
 
   /** \returns the projection of a point \a p onto the plane \c *this.
     */
@@ -178,13 +178,14 @@ public:
     */
   VectorType intersection(const Hyperplane& other) const
   {
+    using std::abs;
     EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(VectorType, 2)
     Scalar det = coeffs().coeff(0) * other.coeffs().coeff(1) - coeffs().coeff(1) * other.coeffs().coeff(0);
     // since the line equations ax+by=c are normalized with a^2+b^2=1, the following tests
     // whether the two lines are approximately parallel.
     if(internal::isMuchSmallerThan(det, Scalar(1)))
     {   // special case where the two lines are approximately parallel. Pick any point on the first line.
-        if(internal::abs(coeffs().coeff(1))>internal::abs(coeffs().coeff(0)))
+        if(abs(coeffs().coeff(1))>abs(coeffs().coeff(0)))
             return VectorType(coeffs().coeff(1), -coeffs().coeff(2)/coeffs().coeff(1)-coeffs().coeff(0));
         else
             return VectorType(-coeffs().coeff(2)/coeffs().coeff(0)-coeffs().coeff(1), coeffs().coeff(0));
@@ -256,7 +257,7 @@ public:
     *
     * \sa MatrixBase::isApprox() */
   template<int OtherOptions>
-  bool isApprox(const Hyperplane<Scalar,AmbientDimAtCompileTime,OtherOptions>& other, typename NumTraits<Scalar>::Real prec = NumTraits<Scalar>::dummy_precision()) const
+  bool isApprox(const Hyperplane<Scalar,AmbientDimAtCompileTime,OtherOptions>& other, const typename NumTraits<Scalar>::Real& prec = NumTraits<Scalar>::dummy_precision()) const
   { return m_coeffs.isApprox(other.m_coeffs, prec); }
 
 protected:
diff --git a/ThirdParty/eigen3/Eigen/src/Geometry/OrthoMethods.h b/ThirdParty/eigen3/Eigen/src/Geometry/OrthoMethods.h
index 11ad5829cda..556bc81604e 100644
--- a/ThirdParty/eigen3/Eigen/src/Geometry/OrthoMethods.h
+++ b/ThirdParty/eigen3/Eigen/src/Geometry/OrthoMethods.h
@@ -33,9 +33,9 @@ MatrixBase<Derived>::cross(const MatrixBase<OtherDerived>& other) const
   typename internal::nested<Derived,2>::type lhs(derived());
   typename internal::nested<OtherDerived,2>::type rhs(other.derived());
   return typename cross_product_return_type<OtherDerived>::type(
-    internal::conj(lhs.coeff(1) * rhs.coeff(2) - lhs.coeff(2) * rhs.coeff(1)),
-    internal::conj(lhs.coeff(2) * rhs.coeff(0) - lhs.coeff(0) * rhs.coeff(2)),
-    internal::conj(lhs.coeff(0) * rhs.coeff(1) - lhs.coeff(1) * rhs.coeff(0))
+    numext::conj(lhs.coeff(1) * rhs.coeff(2) - lhs.coeff(2) * rhs.coeff(1)),
+    numext::conj(lhs.coeff(2) * rhs.coeff(0) - lhs.coeff(0) * rhs.coeff(2)),
+    numext::conj(lhs.coeff(0) * rhs.coeff(1) - lhs.coeff(1) * rhs.coeff(0))
   );
 }
 
@@ -49,9 +49,9 @@ struct cross3_impl {
   run(const VectorLhs& lhs, const VectorRhs& rhs)
   {
     return typename internal::plain_matrix_type<VectorLhs>::type(
-      internal::conj(lhs.coeff(1) * rhs.coeff(2) - lhs.coeff(2) * rhs.coeff(1)),
-      internal::conj(lhs.coeff(2) * rhs.coeff(0) - lhs.coeff(0) * rhs.coeff(2)),
-      internal::conj(lhs.coeff(0) * rhs.coeff(1) - lhs.coeff(1) * rhs.coeff(0)),
+      numext::conj(lhs.coeff(1) * rhs.coeff(2) - lhs.coeff(2) * rhs.coeff(1)),
+      numext::conj(lhs.coeff(2) * rhs.coeff(0) - lhs.coeff(0) * rhs.coeff(2)),
+      numext::conj(lhs.coeff(0) * rhs.coeff(1) - lhs.coeff(1) * rhs.coeff(0)),
       0
     );
   }
@@ -78,8 +78,8 @@ MatrixBase<Derived>::cross3(const MatrixBase<OtherDerived>& other) const
 
   typedef typename internal::nested<Derived,2>::type DerivedNested;
   typedef typename internal::nested<OtherDerived,2>::type OtherDerivedNested;
-  const DerivedNested lhs(derived());
-  const OtherDerivedNested rhs(other.derived());
+  DerivedNested lhs(derived());
+  OtherDerivedNested rhs(other.derived());
 
   return internal::cross3_impl<Architecture::Target,
                         typename internal::remove_all<DerivedNested>::type,
@@ -141,8 +141,8 @@ struct unitOrthogonal_selector
     if (maxi==0)
       sndi = 1;
     RealScalar invnm = RealScalar(1)/(Vector2() << src.coeff(sndi),src.coeff(maxi)).finished().norm();
-    perp.coeffRef(maxi) = -conj(src.coeff(sndi)) * invnm;
-    perp.coeffRef(sndi) =  conj(src.coeff(maxi)) * invnm;
+    perp.coeffRef(maxi) = -numext::conj(src.coeff(sndi)) * invnm;
+    perp.coeffRef(sndi) =  numext::conj(src.coeff(maxi)) * invnm;
 
     return perp;
    }
@@ -168,8 +168,8 @@ struct unitOrthogonal_selector<Derived,3>
     || (!isMuchSmallerThan(src.y(), src.z())))
     {
       RealScalar invnm = RealScalar(1)/src.template head<2>().norm();
-      perp.coeffRef(0) = -conj(src.y())*invnm;
-      perp.coeffRef(1) = conj(src.x())*invnm;
+      perp.coeffRef(0) = -numext::conj(src.y())*invnm;
+      perp.coeffRef(1) = numext::conj(src.x())*invnm;
       perp.coeffRef(2) = 0;
     }
     /* if both x and y are close to zero, then the vector is close
@@ -180,8 +180,8 @@ struct unitOrthogonal_selector<Derived,3>
     {
       RealScalar invnm = RealScalar(1)/src.template tail<2>().norm();
       perp.coeffRef(0) = 0;
-      perp.coeffRef(1) = -conj(src.z())*invnm;
-      perp.coeffRef(2) = conj(src.y())*invnm;
+      perp.coeffRef(1) = -numext::conj(src.z())*invnm;
+      perp.coeffRef(2) = numext::conj(src.y())*invnm;
     }
 
     return perp;
@@ -193,7 +193,7 @@ struct unitOrthogonal_selector<Derived,2>
 {
   typedef typename plain_matrix_type<Derived>::type VectorType;
   static inline VectorType run(const Derived& src)
-  { return VectorType(-conj(src.y()), conj(src.x())).normalized(); }
+  { return VectorType(-numext::conj(src.y()), numext::conj(src.x())).normalized(); }
 };
 
 } // end namespace internal
diff --git a/ThirdParty/eigen3/Eigen/src/Geometry/ParametrizedLine.h b/ThirdParty/eigen3/Eigen/src/Geometry/ParametrizedLine.h
index 719a904413d..77fa228e6a5 100644
--- a/ThirdParty/eigen3/Eigen/src/Geometry/ParametrizedLine.h
+++ b/ThirdParty/eigen3/Eigen/src/Geometry/ParametrizedLine.h
@@ -41,7 +41,7 @@ public:
   typedef Matrix<Scalar,AmbientDimAtCompileTime,1,Options> VectorType;
 
   /** Default constructor without initialization */
-  inline explicit ParametrizedLine() {}
+  inline ParametrizedLine() {}
   
   template<int OtherOptions>
   ParametrizedLine(const ParametrizedLine<Scalar,AmbientDimAtCompileTime,OtherOptions>& other)
@@ -87,13 +87,13 @@ public:
   /** \returns the distance of a point \a p to its projection onto the line \c *this.
     * \sa squaredDistance()
     */
-  RealScalar distance(const VectorType& p) const { return internal::sqrt(squaredDistance(p)); }
+  RealScalar distance(const VectorType& p) const { using std::sqrt; return sqrt(squaredDistance(p)); }
 
   /** \returns the projection of a point \a p onto the line \c *this. */
   VectorType projection(const VectorType& p) const
   { return origin() + direction().dot(p-origin()) * direction(); }
 
-  VectorType pointAt( Scalar t ) const;
+  VectorType pointAt(const Scalar& t) const;
   
   template <int OtherOptions>
   Scalar intersectionParameter(const Hyperplane<_Scalar, _AmbientDim, OtherOptions>& hyperplane) const;
@@ -154,7 +154,7 @@ inline ParametrizedLine<_Scalar, _AmbientDim,_Options>::ParametrizedLine(const H
   */
 template <typename _Scalar, int _AmbientDim, int _Options>
 inline typename ParametrizedLine<_Scalar, _AmbientDim,_Options>::VectorType
-ParametrizedLine<_Scalar, _AmbientDim,_Options>::pointAt( _Scalar t ) const
+ParametrizedLine<_Scalar, _AmbientDim,_Options>::pointAt(const _Scalar& t) const
 {
   return origin() + (direction()*t); 
 }
diff --git a/ThirdParty/eigen3/Eigen/src/Geometry/Quaternion.h b/ThirdParty/eigen3/Eigen/src/Geometry/Quaternion.h
index 973ceb84fb8..e135f2b6637 100644
--- a/ThirdParty/eigen3/Eigen/src/Geometry/Quaternion.h
+++ b/ThirdParty/eigen3/Eigen/src/Geometry/Quaternion.h
@@ -154,14 +154,14 @@ public:
     * \a t in [0;1]
     * see http://en.wikipedia.org/wiki/Slerp
     */
-  template<class OtherDerived> Quaternion<Scalar> slerp(Scalar t, const QuaternionBase<OtherDerived>& other) const;
+  template<class OtherDerived> Quaternion<Scalar> slerp(const Scalar& t, const QuaternionBase<OtherDerived>& other) const;
 
   /** \returns \c true if \c *this is approximately equal to \a other, within the precision
     * determined by \a prec.
     *
     * \sa MatrixBase::isApprox() */
   template<class OtherDerived>
-  bool isApprox(const QuaternionBase<OtherDerived>& other, RealScalar prec = NumTraits<Scalar>::dummy_precision()) const
+  bool isApprox(const QuaternionBase<OtherDerived>& other, const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const
   { return coeffs().isApprox(other.coeffs(), prec); }
 
 	/** return the result vector of \a v through the rotation*/
@@ -249,7 +249,7 @@ public:
     * while internally the coefficients are stored in the following order:
     * [\c x, \c y, \c z, \c w]
     */
-  inline Quaternion(Scalar w, Scalar x, Scalar y, Scalar z) : m_coeffs(x, y, z, w){}
+  inline Quaternion(const Scalar& w, const Scalar& x, const Scalar& y, const Scalar& z) : m_coeffs(x, y, z, w){}
 
   /** Constructs and initialize a quaternion from the array data */
   inline Quaternion(const Scalar* data) : m_coeffs(data) {}
@@ -495,9 +495,11 @@ EIGEN_STRONG_INLINE Derived& QuaternionBase<Derived>::operator=(const Quaternion
 template<class Derived>
 EIGEN_STRONG_INLINE Derived& QuaternionBase<Derived>::operator=(const AngleAxisType& aa)
 {
+  using std::cos;
+  using std::sin;
   Scalar ha = Scalar(0.5)*aa.angle(); // Scalar(0.5) to suppress precision loss warnings
-  this->w() = internal::cos(ha);
-  this->vec() = internal::sin(ha) * aa.axis();
+  this->w() = cos(ha);
+  this->vec() = sin(ha) * aa.axis();
   return derived();
 }
 
@@ -571,6 +573,7 @@ template<typename Derived1, typename Derived2>
 inline Derived& QuaternionBase<Derived>::setFromTwoVectors(const MatrixBase<Derived1>& a, const MatrixBase<Derived2>& b)
 {
   using std::max;
+  using std::sqrt;
   Vector3 v0 = a.normalized();
   Vector3 v1 = b.normalized();
   Scalar c = v1.dot(v0);
@@ -591,12 +594,12 @@ inline Derived& QuaternionBase<Derived>::setFromTwoVectors(const MatrixBase<Deri
     Vector3 axis = svd.matrixV().col(2);
 
     Scalar w2 = (Scalar(1)+c)*Scalar(0.5);
-    this->w() = internal::sqrt(w2);
-    this->vec() = axis * internal::sqrt(Scalar(1) - w2);
+    this->w() = sqrt(w2);
+    this->vec() = axis * sqrt(Scalar(1) - w2);
     return derived();
   }
   Vector3 axis = v0.cross(v1);
-  Scalar s = internal::sqrt((Scalar(1)+c)*Scalar(2));
+  Scalar s = sqrt((Scalar(1)+c)*Scalar(2));
   Scalar invs = Scalar(1)/s;
   this->vec() = axis * invs;
   this->w() = s * Scalar(0.5);
@@ -667,7 +670,8 @@ inline typename internal::traits<Derived>::Scalar
 QuaternionBase<Derived>::angularDistance(const QuaternionBase<OtherDerived>& other) const
 {
   using std::acos;
-  double d = internal::abs(this->dot(other));
+  using std::abs;
+  double d = abs(this->dot(other));
   if (d>=1.0)
     return Scalar(0);
   return static_cast<Scalar>(2 * acos(d));
@@ -679,12 +683,14 @@ QuaternionBase<Derived>::angularDistance(const QuaternionBase<OtherDerived>& oth
 template <class Derived>
 template <class OtherDerived>
 Quaternion<typename internal::traits<Derived>::Scalar>
-QuaternionBase<Derived>::slerp(Scalar t, const QuaternionBase<OtherDerived>& other) const
+QuaternionBase<Derived>::slerp(const Scalar& t, const QuaternionBase<OtherDerived>& other) const
 {
   using std::acos;
+  using std::sin;
+  using std::abs;
   static const Scalar one = Scalar(1) - NumTraits<Scalar>::epsilon();
   Scalar d = this->dot(other);
-  Scalar absD = internal::abs(d);
+  Scalar absD = abs(d);
 
   Scalar scale0;
   Scalar scale1;
@@ -698,10 +704,10 @@ QuaternionBase<Derived>::slerp(Scalar t, const QuaternionBase<OtherDerived>& oth
   {
     // theta is the angle between the 2 quaternions
     Scalar theta = acos(absD);
-    Scalar sinTheta = internal::sin(theta);
+    Scalar sinTheta = sin(theta);
 
-    scale0 = internal::sin( ( Scalar(1) - t ) * theta) / sinTheta;
-    scale1 = internal::sin( ( t * theta) ) / sinTheta;
+    scale0 = sin( ( Scalar(1) - t ) * theta) / sinTheta;
+    scale1 = sin( ( t * theta) ) / sinTheta;
   }
   if(d<0) scale1 = -scale1;
 
@@ -718,6 +724,7 @@ struct quaternionbase_assign_impl<Other,3,3>
   typedef DenseIndex Index;
   template<class Derived> static inline void run(QuaternionBase<Derived>& q, const Other& mat)
   {
+    using std::sqrt;
     // This algorithm comes from  "Quaternion Calculus and Fast Animation",
     // Ken Shoemake, 1987 SIGGRAPH course notes
     Scalar t = mat.trace();
diff --git a/ThirdParty/eigen3/Eigen/src/Geometry/Rotation2D.h b/ThirdParty/eigen3/Eigen/src/Geometry/Rotation2D.h
index 868e2ef312f..1cac343a5ee 100644
--- a/ThirdParty/eigen3/Eigen/src/Geometry/Rotation2D.h
+++ b/ThirdParty/eigen3/Eigen/src/Geometry/Rotation2D.h
@@ -59,7 +59,7 @@ protected:
 public:
 
   /** Construct a 2D counter clock wise rotation from the angle \a a in radian. */
-  inline Rotation2D(Scalar a) : m_angle(a) {}
+  inline Rotation2D(const Scalar& a) : m_angle(a) {}
 
   /** \returns the rotation angle */
   inline Scalar angle() const { return m_angle; }
@@ -89,7 +89,7 @@ public:
   /** \returns the spherical interpolation between \c *this and \a other using
     * parameter \a t. It is in fact equivalent to a linear interpolation.
     */
-  inline Rotation2D slerp(Scalar t, const Rotation2D& other) const
+  inline Rotation2D slerp(const Scalar& t, const Rotation2D& other) const
   { return m_angle * (1-t) + other.angle() * t; }
 
   /** \returns \c *this with scalar type casted to \a NewScalarType
@@ -114,7 +114,7 @@ public:
     * determined by \a prec.
     *
     * \sa MatrixBase::isApprox() */
-  bool isApprox(const Rotation2D& other, typename NumTraits<Scalar>::Real prec = NumTraits<Scalar>::dummy_precision()) const
+  bool isApprox(const Rotation2D& other, const typename NumTraits<Scalar>::Real& prec = NumTraits<Scalar>::dummy_precision()) const
   { return internal::isApprox(m_angle,other.m_angle, prec); }
 };
 
@@ -133,8 +133,9 @@ template<typename Scalar>
 template<typename Derived>
 Rotation2D<Scalar>& Rotation2D<Scalar>::fromRotationMatrix(const MatrixBase<Derived>& mat)
 {
+  using std::atan2;
   EIGEN_STATIC_ASSERT(Derived::RowsAtCompileTime==2 && Derived::ColsAtCompileTime==2,YOU_MADE_A_PROGRAMMING_MISTAKE)
-  m_angle = internal::atan2(mat.coeff(1,0), mat.coeff(0,0));
+  m_angle = atan2(mat.coeff(1,0), mat.coeff(0,0));
   return *this;
 }
 
@@ -144,8 +145,10 @@ template<typename Scalar>
 typename Rotation2D<Scalar>::Matrix2
 Rotation2D<Scalar>::toRotationMatrix(void) const
 {
-  Scalar sinA = internal::sin(m_angle);
-  Scalar cosA = internal::cos(m_angle);
+  using std::sin;
+  using std::cos;
+  Scalar sinA = sin(m_angle);
+  Scalar cosA = cos(m_angle);
   return (Matrix2() << cosA, -sinA, sinA, cosA).finished();
 }
 
diff --git a/ThirdParty/eigen3/Eigen/src/Geometry/Scaling.h b/ThirdParty/eigen3/Eigen/src/Geometry/Scaling.h
index 8edcac31c74..1c25f36fe62 100644
--- a/ThirdParty/eigen3/Eigen/src/Geometry/Scaling.h
+++ b/ThirdParty/eigen3/Eigen/src/Geometry/Scaling.h
@@ -99,7 +99,7 @@ public:
     * determined by \a prec.
     *
     * \sa MatrixBase::isApprox() */
-  bool isApprox(const UniformScaling& other, typename NumTraits<Scalar>::Real prec = NumTraits<Scalar>::dummy_precision()) const
+  bool isApprox(const UniformScaling& other, const typename NumTraits<Scalar>::Real& prec = NumTraits<Scalar>::dummy_precision()) const
   { return internal::isApprox(m_factor, other.factor(), prec); }
 
 };
@@ -122,11 +122,11 @@ static inline UniformScaling<std::complex<RealScalar> > Scaling(const std::compl
 
 /** Constructs a 2D axis aligned scaling */
 template<typename Scalar>
-static inline DiagonalMatrix<Scalar,2> Scaling(Scalar sx, Scalar sy)
+static inline DiagonalMatrix<Scalar,2> Scaling(const Scalar& sx, const Scalar& sy)
 { return DiagonalMatrix<Scalar,2>(sx, sy); }
 /** Constructs a 3D axis aligned scaling */
 template<typename Scalar>
-static inline DiagonalMatrix<Scalar,3> Scaling(Scalar sx, Scalar sy, Scalar sz)
+static inline DiagonalMatrix<Scalar,3> Scaling(const Scalar& sx, const Scalar& sy, const Scalar& sz)
 { return DiagonalMatrix<Scalar,3>(sx, sy, sz); }
 
 /** Constructs an axis aligned scaling expression from vector expression \a coeffs
diff --git a/ThirdParty/eigen3/Eigen/src/Geometry/Transform.h b/ThirdParty/eigen3/Eigen/src/Geometry/Transform.h
index 4c1ef8eaade..887e718d677 100644
--- a/ThirdParty/eigen3/Eigen/src/Geometry/Transform.h
+++ b/ThirdParty/eigen3/Eigen/src/Geometry/Transform.h
@@ -506,8 +506,8 @@ public:
   template<typename OtherDerived>
   inline Transform& prescale(const MatrixBase<OtherDerived> &other);
 
-  inline Transform& scale(Scalar s);
-  inline Transform& prescale(Scalar s);
+  inline Transform& scale(const Scalar& s);
+  inline Transform& prescale(const Scalar& s);
 
   template<typename OtherDerived>
   inline Transform& translate(const MatrixBase<OtherDerived> &other);
@@ -521,8 +521,8 @@ public:
   template<typename RotationType>
   inline Transform& prerotate(const RotationType& rotation);
 
-  Transform& shear(Scalar sx, Scalar sy);
-  Transform& preshear(Scalar sx, Scalar sy);
+  Transform& shear(const Scalar& sx, const Scalar& sy);
+  Transform& preshear(const Scalar& sx, const Scalar& sy);
 
   inline Transform& operator=(const TranslationType& t);
   inline Transform& operator*=(const TranslationType& t) { return translate(t.vector()); }
@@ -584,7 +584,7 @@ public:
     * determined by \a prec.
     *
     * \sa MatrixBase::isApprox() */
-  bool isApprox(const Transform& other, typename NumTraits<Scalar>::Real prec = NumTraits<Scalar>::dummy_precision()) const
+  bool isApprox(const Transform& other, const typename NumTraits<Scalar>::Real& prec = NumTraits<Scalar>::dummy_precision()) const
   { return m_matrix.isApprox(other.m_matrix, prec); }
 
   /** Sets the last row to [0 ... 0 1]
@@ -794,7 +794,7 @@ Transform<Scalar,Dim,Mode,Options>::scale(const MatrixBase<OtherDerived> &other)
   * \sa prescale(Scalar)
   */
 template<typename Scalar, int Dim, int Mode, int Options>
-inline Transform<Scalar,Dim,Mode,Options>& Transform<Scalar,Dim,Mode,Options>::scale(Scalar s)
+inline Transform<Scalar,Dim,Mode,Options>& Transform<Scalar,Dim,Mode,Options>::scale(const Scalar& s)
 {
   EIGEN_STATIC_ASSERT(Mode!=int(Isometry), THIS_METHOD_IS_ONLY_FOR_SPECIFIC_TRANSFORMATIONS)
   linearExt() *= s;
@@ -821,7 +821,7 @@ Transform<Scalar,Dim,Mode,Options>::prescale(const MatrixBase<OtherDerived> &oth
   * \sa scale(Scalar)
   */
 template<typename Scalar, int Dim, int Mode, int Options>
-inline Transform<Scalar,Dim,Mode,Options>& Transform<Scalar,Dim,Mode,Options>::prescale(Scalar s)
+inline Transform<Scalar,Dim,Mode,Options>& Transform<Scalar,Dim,Mode,Options>::prescale(const Scalar& s)
 {
   EIGEN_STATIC_ASSERT(Mode!=int(Isometry), THIS_METHOD_IS_ONLY_FOR_SPECIFIC_TRANSFORMATIONS)
   m_matrix.template topRows<Dim>() *= s;
@@ -909,7 +909,7 @@ Transform<Scalar,Dim,Mode,Options>::prerotate(const RotationType& rotation)
   */
 template<typename Scalar, int Dim, int Mode, int Options>
 Transform<Scalar,Dim,Mode,Options>&
-Transform<Scalar,Dim,Mode,Options>::shear(Scalar sx, Scalar sy)
+Transform<Scalar,Dim,Mode,Options>::shear(const Scalar& sx, const Scalar& sy)
 {
   EIGEN_STATIC_ASSERT(int(Dim)==2, YOU_MADE_A_PROGRAMMING_MISTAKE)
   EIGEN_STATIC_ASSERT(Mode!=int(Isometry), THIS_METHOD_IS_ONLY_FOR_SPECIFIC_TRANSFORMATIONS)
@@ -925,7 +925,7 @@ Transform<Scalar,Dim,Mode,Options>::shear(Scalar sx, Scalar sy)
   */
 template<typename Scalar, int Dim, int Mode, int Options>
 Transform<Scalar,Dim,Mode,Options>&
-Transform<Scalar,Dim,Mode,Options>::preshear(Scalar sx, Scalar sy)
+Transform<Scalar,Dim,Mode,Options>::preshear(const Scalar& sx, const Scalar& sy)
 {
   EIGEN_STATIC_ASSERT(int(Dim)==2, YOU_MADE_A_PROGRAMMING_MISTAKE)
   EIGEN_STATIC_ASSERT(Mode!=int(Isometry), THIS_METHOD_IS_ONLY_FOR_SPECIFIC_TRANSFORMATIONS)
diff --git a/ThirdParty/eigen3/Eigen/src/Geometry/Umeyama.h b/ThirdParty/eigen3/Eigen/src/Geometry/Umeyama.h
index ac0939cde52..345b47e0c37 100644
--- a/ThirdParty/eigen3/Eigen/src/Geometry/Umeyama.h
+++ b/ThirdParty/eigen3/Eigen/src/Geometry/Umeyama.h
@@ -153,16 +153,21 @@ umeyama(const MatrixBase<Derived>& src, const MatrixBase<OtherDerived>& dst, boo
     Rt.block(0,0,m,m).noalias() = svd.matrixU() * S.asDiagonal() * svd.matrixV().transpose();
   }
 
-  // Eq. (42)
-  const Scalar c = 1/src_var * svd.singularValues().dot(S);
-
-  // Eq. (41)
-  // Note that we first assign dst_mean to the destination so that there no need
-  // for a temporary.
-  Rt.col(m).head(m) = dst_mean;
-  Rt.col(m).head(m).noalias() -= c*Rt.topLeftCorner(m,m)*src_mean;
-
-  if (with_scaling) Rt.block(0,0,m,m) *= c;
+  if (with_scaling)
+  {
+    // Eq. (42)
+    const Scalar c = 1/src_var * svd.singularValues().dot(S);
+
+    // Eq. (41)
+    Rt.col(m).head(m) = dst_mean;
+    Rt.col(m).head(m).noalias() -= c*Rt.topLeftCorner(m,m)*src_mean;
+    Rt.block(0,0,m,m) *= c;
+  }
+  else
+  {
+    Rt.col(m).head(m) = dst_mean;
+    Rt.col(m).head(m).noalias() -= Rt.topLeftCorner(m,m)*src_mean;
+  }
 
   return Rt;
 }
diff --git a/ThirdParty/eigen3/Eigen/src/Householder/Householder.h b/ThirdParty/eigen3/Eigen/src/Householder/Householder.h
index 3f64b7dde2f..32112af9bfd 100644
--- a/ThirdParty/eigen3/Eigen/src/Householder/Householder.h
+++ b/ThirdParty/eigen3/Eigen/src/Householder/Householder.h
@@ -67,25 +67,28 @@ void MatrixBase<Derived>::makeHouseholder(
   Scalar& tau,
   RealScalar& beta) const
 {
+  using std::sqrt;
+  using numext::conj;
+  
   EIGEN_STATIC_ASSERT_VECTOR_ONLY(EssentialPart)
   VectorBlock<const Derived, EssentialPart::SizeAtCompileTime> tail(derived(), 1, size()-1);
   
   RealScalar tailSqNorm = size()==1 ? RealScalar(0) : tail.squaredNorm();
   Scalar c0 = coeff(0);
 
-  if(tailSqNorm == RealScalar(0) && internal::imag(c0)==RealScalar(0))
+  if(tailSqNorm == RealScalar(0) && numext::imag(c0)==RealScalar(0))
   {
     tau = RealScalar(0);
-    beta = internal::real(c0);
+    beta = numext::real(c0);
     essential.setZero();
   }
   else
   {
-    beta = internal::sqrt(internal::abs2(c0) + tailSqNorm);
-    if (internal::real(c0)>=RealScalar(0))
+    beta = sqrt(numext::abs2(c0) + tailSqNorm);
+    if (numext::real(c0)>=RealScalar(0))
       beta = -beta;
     essential = tail / (c0 - beta);
-    tau = internal::conj((beta - c0) / beta);
+    tau = conj((beta - c0) / beta);
   }
 }
 
diff --git a/ThirdParty/eigen3/Eigen/src/Householder/HouseholderSequence.h b/ThirdParty/eigen3/Eigen/src/Householder/HouseholderSequence.h
index 1e71e16a785..d800ca1fa42 100644
--- a/ThirdParty/eigen3/Eigen/src/Householder/HouseholderSequence.h
+++ b/ThirdParty/eigen3/Eigen/src/Householder/HouseholderSequence.h
@@ -112,6 +112,9 @@ template<typename OtherScalarType, typename MatrixType> struct matrix_type_times
 template<typename VectorsType, typename CoeffsType, int Side> class HouseholderSequence
   : public EigenBase<HouseholderSequence<VectorsType,CoeffsType,Side> >
 {
+    typedef typename internal::hseq_side_dependent_impl<VectorsType,CoeffsType,Side>::EssentialVectorType EssentialVectorType;
+  
+  public:
     enum {
       RowsAtCompileTime = internal::traits<HouseholderSequence>::RowsAtCompileTime,
       ColsAtCompileTime = internal::traits<HouseholderSequence>::ColsAtCompileTime,
@@ -121,13 +124,10 @@ template<typename VectorsType, typename CoeffsType, int Side> class HouseholderS
     typedef typename internal::traits<HouseholderSequence>::Scalar Scalar;
     typedef typename VectorsType::Index Index;
 
-    typedef typename internal::hseq_side_dependent_impl<VectorsType,CoeffsType,Side>::EssentialVectorType
-            EssentialVectorType;
-
-  public:
-
     typedef HouseholderSequence<
-      VectorsType,
+      typename internal::conditional<NumTraits<Scalar>::IsComplex,
+        typename internal::remove_all<typename VectorsType::ConjugateReturnType>::type,
+        VectorsType>::type,
       typename internal::conditional<NumTraits<Scalar>::IsComplex,
         typename internal::remove_all<typename CoeffsType::ConjugateReturnType>::type,
         CoeffsType>::type,
@@ -208,7 +208,7 @@ template<typename VectorsType, typename CoeffsType, int Side> class HouseholderS
     /** \brief Complex conjugate of the Householder sequence. */
     ConjugateReturnType conjugate() const
     {
-      return ConjugateReturnType(m_vectors, m_coeffs.conjugate())
+      return ConjugateReturnType(m_vectors.conjugate(), m_coeffs.conjugate())
              .setTrans(m_trans)
              .setLength(m_length)
              .setShift(m_shift);
diff --git a/ThirdParty/eigen3/Eigen/src/IterativeLinearSolvers/IncompleteLUT.h b/ThirdParty/eigen3/Eigen/src/IterativeLinearSolvers/IncompleteLUT.h
index 224304f0eb8..b55afc13636 100644
--- a/ThirdParty/eigen3/Eigen/src/IterativeLinearSolvers/IncompleteLUT.h
+++ b/ThirdParty/eigen3/Eigen/src/IterativeLinearSolvers/IncompleteLUT.h
@@ -10,36 +10,88 @@
 #ifndef EIGEN_INCOMPLETE_LUT_H
 #define EIGEN_INCOMPLETE_LUT_H
 
+
 namespace Eigen { 
 
-/**
- * \brief Incomplete LU factorization with dual-threshold strategy
- * During the numerical factorization, two dropping rules are used :
- *  1) any element whose magnitude is less than some tolerance is dropped.
- *    This tolerance is obtained by multiplying the input tolerance @p droptol 
- *    by the average magnitude of all the original elements in the current row.
- *  2) After the elimination of the row, only the @p fill largest elements in 
- *    the L part and the @p fill largest elements in the U part are kept 
- *    (in addition to the diagonal element ). Note that @p fill is computed from 
- *    the input parameter @p fillfactor which is used the ratio to control the fill_in 
- *    relatively to the initial number of nonzero elements.
- * 
- * The two extreme cases are when @p droptol=0 (to keep all the @p fill*2 largest elements)
- * and when @p fill=n/2 with @p droptol being different to zero. 
- * 
- * References : Yousef Saad, ILUT: A dual threshold incomplete LU factorization, 
- *              Numerical Linear Algebra with Applications, 1(4), pp 387-402, 1994.
- * 
- * NOTE : The following implementation is derived from the ILUT implementation
- * in the SPARSKIT package, Copyright (C) 2005, the Regents of the University of Minnesota 
- *  released under the terms of the GNU LGPL: 
- *    http://www-users.cs.umn.edu/~saad/software/SPARSKIT/README
- * However, Yousef Saad gave us permission to relicense his ILUT code to MPL2.
- * See the Eigen mailing list archive, thread: ILUT, date: July 8, 2012:
- *   http://listengine.tuxfamily.org/lists.tuxfamily.org/eigen/2012/07/msg00064.html
- * alternatively, on GMANE:
- *   http://comments.gmane.org/gmane.comp.lib.eigen/3302
- */
+namespace internal {
+    
+/** \internal
+  * Compute a quick-sort split of a vector 
+  * On output, the vector row is permuted such that its elements satisfy
+  * abs(row(i)) >= abs(row(ncut)) if i<ncut
+  * abs(row(i)) <= abs(row(ncut)) if i>ncut 
+  * \param row The vector of values
+  * \param ind The array of index for the elements in @p row
+  * \param ncut  The number of largest elements to keep
+  **/ 
+template <typename VectorV, typename VectorI, typename Index>
+Index QuickSplit(VectorV &row, VectorI &ind, Index ncut)
+{
+  typedef typename VectorV::RealScalar RealScalar;
+  using std::swap;
+  using std::abs;
+  Index mid;
+  Index n = row.size(); /* length of the vector */
+  Index first, last ;
+  
+  ncut--; /* to fit the zero-based indices */
+  first = 0; 
+  last = n-1; 
+  if (ncut < first || ncut > last ) return 0;
+  
+  do {
+    mid = first; 
+    RealScalar abskey = abs(row(mid)); 
+    for (Index j = first + 1; j <= last; j++) {
+      if ( abs(row(j)) > abskey) {
+        ++mid;
+        swap(row(mid), row(j));
+        swap(ind(mid), ind(j));
+      }
+    }
+    /* Interchange for the pivot element */
+    swap(row(mid), row(first));
+    swap(ind(mid), ind(first));
+    
+    if (mid > ncut) last = mid - 1;
+    else if (mid < ncut ) first = mid + 1; 
+  } while (mid != ncut );
+  
+  return 0; /* mid is equal to ncut */ 
+}
+
+}// end namespace internal
+
+/** \ingroup IterativeLinearSolvers_Module
+  * \class IncompleteLUT
+  * \brief Incomplete LU factorization with dual-threshold strategy
+  *
+  * During the numerical factorization, two dropping rules are used :
+  *  1) any element whose magnitude is less than some tolerance is dropped.
+  *    This tolerance is obtained by multiplying the input tolerance @p droptol 
+  *    by the average magnitude of all the original elements in the current row.
+  *  2) After the elimination of the row, only the @p fill largest elements in 
+  *    the L part and the @p fill largest elements in the U part are kept 
+  *    (in addition to the diagonal element ). Note that @p fill is computed from 
+  *    the input parameter @p fillfactor which is used the ratio to control the fill_in 
+  *    relatively to the initial number of nonzero elements.
+  * 
+  * The two extreme cases are when @p droptol=0 (to keep all the @p fill*2 largest elements)
+  * and when @p fill=n/2 with @p droptol being different to zero. 
+  * 
+  * References : Yousef Saad, ILUT: A dual threshold incomplete LU factorization, 
+  *              Numerical Linear Algebra with Applications, 1(4), pp 387-402, 1994.
+  * 
+  * NOTE : The following implementation is derived from the ILUT implementation
+  * in the SPARSKIT package, Copyright (C) 2005, the Regents of the University of Minnesota 
+  *  released under the terms of the GNU LGPL: 
+  *    http://www-users.cs.umn.edu/~saad/software/SPARSKIT/README
+  * However, Yousef Saad gave us permission to relicense his ILUT code to MPL2.
+  * See the Eigen mailing list archive, thread: ILUT, date: July 8, 2012:
+  *   http://listengine.tuxfamily.org/lists.tuxfamily.org/eigen/2012/07/msg00064.html
+  * alternatively, on GMANE:
+  *   http://comments.gmane.org/gmane.comp.lib.eigen/3302
+  */
 template <typename _Scalar>
 class IncompleteLUT : internal::noncopyable
 {
@@ -59,7 +111,7 @@ class IncompleteLUT : internal::noncopyable
     {}
     
     template<typename MatrixType>
-    IncompleteLUT(const MatrixType& mat, RealScalar droptol=NumTraits<Scalar>::dummy_precision(), int fillfactor = 10)
+    IncompleteLUT(const MatrixType& mat, const RealScalar& droptol=NumTraits<Scalar>::dummy_precision(), int fillfactor = 10)
       : m_droptol(droptol),m_fillfactor(fillfactor),
         m_analysisIsOk(false),m_factorizationIsOk(false),m_isInitialized(false)
     {
@@ -98,12 +150,11 @@ class IncompleteLUT : internal::noncopyable
     {
       analyzePattern(amat); 
       factorize(amat);
-      eigen_assert(m_factorizationIsOk == true); 
-      m_isInitialized = true;
+      m_isInitialized = m_factorizationIsOk;
       return *this;
     }
 
-    void setDroptol(RealScalar droptol); 
+    void setDroptol(const RealScalar& droptol); 
     void setFillfactor(int fillfactor); 
     
     template<typename Rhs, typename Dest>
@@ -126,10 +177,6 @@ class IncompleteLUT : internal::noncopyable
 
 protected:
 
-    template <typename VectorV, typename VectorI>
-    int QuickSplit(VectorV &row, VectorI &ind, int ncut);
-
-
     /** keeps off-diagonal entries; drops diagonal entries */
     struct keep_diag {
       inline bool operator() (const Index& row, const Index& col, const Scalar&) const
@@ -156,7 +203,7 @@ protected:
  *  \param droptol   Drop any element whose magnitude is less than this tolerance 
  **/ 
 template<typename Scalar>
-void IncompleteLUT<Scalar>::setDroptol(RealScalar droptol)
+void IncompleteLUT<Scalar>::setDroptol(const RealScalar& droptol)
 {
   this->m_droptol = droptol;   
 }
@@ -171,51 +218,6 @@ void IncompleteLUT<Scalar>::setFillfactor(int fillfactor)
   this->m_fillfactor = fillfactor;   
 }
 
-
-/**
- * Compute a quick-sort split of a vector 
- * On output, the vector row is permuted such that its elements satisfy
- * abs(row(i)) >= abs(row(ncut)) if i<ncut
- * abs(row(i)) <= abs(row(ncut)) if i>ncut 
- * \param row The vector of values
- * \param ind The array of index for the elements in @p row
- * \param ncut  The number of largest elements to keep
- **/ 
-template <typename Scalar>
-template <typename VectorV, typename VectorI>
-int IncompleteLUT<Scalar>::QuickSplit(VectorV &row, VectorI &ind, int ncut)
-{
-  using std::swap;
-  int mid;
-  int n = row.size(); /* length of the vector */
-  int first, last ; 
-  
-  ncut--; /* to fit the zero-based indices */
-  first = 0; 
-  last = n-1; 
-  if (ncut < first || ncut > last ) return 0;
-  
-  do {
-    mid = first; 
-    RealScalar abskey = std::abs(row(mid)); 
-    for (int j = first + 1; j <= last; j++) {
-      if ( std::abs(row(j)) > abskey) {
-        ++mid;
-        swap(row(mid), row(j));
-        swap(ind(mid), ind(j));
-      }
-    }
-    /* Interchange for the pivot element */
-    swap(row(mid), row(first));
-    swap(ind(mid), ind(first));
-    
-    if (mid > ncut) last = mid - 1;
-    else if (mid < ncut ) first = mid + 1; 
-  } while (mid != ncut );
-  
-  return 0; /* mid is equal to ncut */ 
-}
-
 template <typename Scalar>
 template<typename _MatrixType>
 void IncompleteLUT<Scalar>::analyzePattern(const _MatrixType& amat)
@@ -244,7 +246,7 @@ void IncompleteLUT<Scalar>::factorize(const _MatrixType& amat)
   using std::abs;
 
   eigen_assert((amat.rows() == amat.cols()) && "The factorization should be done on a square matrix");
-  int n = amat.cols();  // Size of the matrix
+  Index n = amat.cols();  // Size of the matrix
   m_lu.resize(n,n);
   // Declare Working vectors and variables
   Vector u(n) ;     // real values of the row -- maximum size is n --
@@ -262,21 +264,21 @@ void IncompleteLUT<Scalar>::factorize(const _MatrixType& amat)
   u.fill(0);
 
   // number of largest elements to keep in each row:
-  int fill_in =   static_cast<int> (amat.nonZeros()*m_fillfactor)/n+1;
+  Index fill_in =   static_cast<Index> (amat.nonZeros()*m_fillfactor)/n+1;
   if (fill_in > n) fill_in = n;
 
   // number of largest nonzero elements to keep in the L and the U part of the current row:
-  int nnzL = fill_in/2;
-  int nnzU = nnzL;
+  Index nnzL = fill_in/2;
+  Index nnzU = nnzL;
   m_lu.reserve(n * (nnzL + nnzU + 1));
 
   // global loop over the rows of the sparse matrix
-  for (int ii = 0; ii < n; ii++)
+  for (Index ii = 0; ii < n; ii++)
   {
     // 1 - copy the lower and the upper part of the row i of mat in the working vector u
 
-    int sizeu = 1; // number of nonzero elements in the upper part of the current row
-    int sizel = 0; // number of nonzero elements in the lower part of the current row
+    Index sizeu = 1; // number of nonzero elements in the upper part of the current row
+    Index sizel = 0; // number of nonzero elements in the lower part of the current row
     ju(ii)    = ii;
     u(ii)     = 0;
     jr(ii)    = ii;
@@ -285,7 +287,7 @@ void IncompleteLUT<Scalar>::factorize(const _MatrixType& amat)
     typename FactorType::InnerIterator j_it(mat, ii); // Iterate through the current row ii
     for (; j_it; ++j_it)
     {
-      int k = j_it.index();
+      Index k = j_it.index();
       if (k < ii)
       {
         // copy the lower part
@@ -301,13 +303,13 @@ void IncompleteLUT<Scalar>::factorize(const _MatrixType& amat)
       else
       {
         // copy the upper part
-        int jpos = ii + sizeu;
+        Index jpos = ii + sizeu;
         ju(jpos) = k;
         u(jpos) = j_it.value();
         jr(k) = jpos;
         ++sizeu;
       }
-      rownorm += internal::abs2(j_it.value());
+      rownorm += numext::abs2(j_it.value());
     }
 
     // 2 - detect possible zero row
@@ -320,19 +322,19 @@ void IncompleteLUT<Scalar>::factorize(const _MatrixType& amat)
     rownorm = sqrt(rownorm);
 
     // 3 - eliminate the previous nonzero rows
-    int jj = 0;
-    int len = 0;
+    Index jj = 0;
+    Index len = 0;
     while (jj < sizel)
     {
       // In order to eliminate in the correct order,
       // we must select first the smallest column index among  ju(jj:sizel)
-      int k;
-      int minrow = ju.segment(jj,sizel-jj).minCoeff(&k); // k is relative to the segment
+      Index k;
+      Index minrow = ju.segment(jj,sizel-jj).minCoeff(&k); // k is relative to the segment
       k += jj;
       if (minrow != ju(jj))
       {
         // swap the two locations
-        int j = ju(jj);
+        Index j = ju(jj);
         swap(ju(jj), ju(k));
         jr(minrow) = jj;   jr(j) = k;
         swap(u(jj), u(k));
@@ -358,11 +360,11 @@ void IncompleteLUT<Scalar>::factorize(const _MatrixType& amat)
       for (; ki_it; ++ki_it)
       {
         Scalar prod = fact * ki_it.value();
-        int j       = ki_it.index();
-        int jpos    = jr(j);
+        Index j       = ki_it.index();
+        Index jpos    = jr(j);
         if (jpos == -1) // fill-in element
         {
-          int newpos;
+          Index newpos;
           if (j >= ii) // dealing with the upper part
           {
             newpos = ii + sizeu;
@@ -391,7 +393,7 @@ void IncompleteLUT<Scalar>::factorize(const _MatrixType& amat)
     } // end of the elimination on the row ii
 
     // reset the upper part of the pointer jr to zero
-    for(int k = 0; k <sizeu; k++) jr(ju(ii+k)) = -1;
+    for(Index k = 0; k <sizeu; k++) jr(ju(ii+k)) = -1;
 
     // 4 - partially sort and insert the elements in the m_lu matrix
 
@@ -400,11 +402,11 @@ void IncompleteLUT<Scalar>::factorize(const _MatrixType& amat)
     len = (std::min)(sizel, nnzL);
     typename Vector::SegmentReturnType ul(u.segment(0, sizel));
     typename VectorXi::SegmentReturnType jul(ju.segment(0, sizel));
-    QuickSplit(ul, jul, len);
+    internal::QuickSplit(ul, jul, len);
 
     // store the largest m_fill elements of the L part
     m_lu.startVec(ii);
-    for(int k = 0; k < len; k++)
+    for(Index k = 0; k < len; k++)
       m_lu.insertBackByOuterInnerUnordered(ii,ju(k)) = u(k);
 
     // store the diagonal element
@@ -416,7 +418,7 @@ void IncompleteLUT<Scalar>::factorize(const _MatrixType& amat)
     // sort the U-part of the row
     // apply the dropping rule first
     len = 0;
-    for(int k = 1; k < sizeu; k++)
+    for(Index k = 1; k < sizeu; k++)
     {
       if(abs(u(ii+k)) > m_droptol * rownorm )
       {
@@ -429,10 +431,10 @@ void IncompleteLUT<Scalar>::factorize(const _MatrixType& amat)
     len = (std::min)(sizeu, nnzU);
     typename Vector::SegmentReturnType uu(u.segment(ii+1, sizeu-1));
     typename VectorXi::SegmentReturnType juu(ju.segment(ii+1, sizeu-1));
-    QuickSplit(uu, juu, len);
+    internal::QuickSplit(uu, juu, len);
 
     // store the largest elements of the U part
-    for(int k = ii + 1; k < ii + len; k++)
+    for(Index k = ii + 1; k < ii + len; k++)
       m_lu.insertBackByOuterInnerUnordered(ii,ju(k)) = u(k);
   }
 
@@ -463,4 +465,3 @@ struct solve_retval<IncompleteLUT<_MatrixType>, Rhs>
 } // end namespace Eigen
 
 #endif // EIGEN_INCOMPLETE_LUT_H
-
diff --git a/ThirdParty/eigen3/Eigen/src/IterativeLinearSolvers/IterativeSolverBase.h b/ThirdParty/eigen3/Eigen/src/IterativeLinearSolvers/IterativeSolverBase.h
index 11706cebabd..2036922d69c 100644
--- a/ThirdParty/eigen3/Eigen/src/IterativeLinearSolvers/IterativeSolverBase.h
+++ b/ThirdParty/eigen3/Eigen/src/IterativeLinearSolvers/IterativeSolverBase.h
@@ -120,7 +120,7 @@ public:
   RealScalar tolerance() const { return m_tolerance; }
   
   /** Sets the tolerance threshold used by the stopping criteria */
-  Derived& setTolerance(RealScalar tolerance)
+  Derived& setTolerance(const RealScalar& tolerance)
   {
     m_tolerance = tolerance;
     return derived();
diff --git a/ThirdParty/eigen3/Eigen/src/Jacobi/Jacobi.h b/ThirdParty/eigen3/Eigen/src/Jacobi/Jacobi.h
index a9c17dcdf19..956f72d5701 100644
--- a/ThirdParty/eigen3/Eigen/src/Jacobi/Jacobi.h
+++ b/ThirdParty/eigen3/Eigen/src/Jacobi/Jacobi.h
@@ -50,19 +50,20 @@ template<typename Scalar> class JacobiRotation
     /** Concatenates two planar rotation */
     JacobiRotation operator*(const JacobiRotation& other)
     {
-      return JacobiRotation(m_c * other.m_c - internal::conj(m_s) * other.m_s,
-                            internal::conj(m_c * internal::conj(other.m_s) + internal::conj(m_s) * internal::conj(other.m_c)));
+      using numext::conj;
+      return JacobiRotation(m_c * other.m_c - conj(m_s) * other.m_s,
+                            conj(m_c * conj(other.m_s) + conj(m_s) * conj(other.m_c)));
     }
 
     /** Returns the transposed transformation */
-    JacobiRotation transpose() const { return JacobiRotation(m_c, -internal::conj(m_s)); }
+    JacobiRotation transpose() const { using numext::conj; return JacobiRotation(m_c, -conj(m_s)); }
 
     /** Returns the adjoint transformation */
-    JacobiRotation adjoint() const { return JacobiRotation(internal::conj(m_c), -m_s); }
+    JacobiRotation adjoint() const { using numext::conj; return JacobiRotation(conj(m_c), -m_s); }
 
     template<typename Derived>
     bool makeJacobi(const MatrixBase<Derived>&, typename Derived::Index p, typename Derived::Index q);
-    bool makeJacobi(RealScalar x, Scalar y, RealScalar z);
+    bool makeJacobi(const RealScalar& x, const Scalar& y, const RealScalar& z);
 
     void makeGivens(const Scalar& p, const Scalar& q, Scalar* z=0);
 
@@ -79,8 +80,10 @@ template<typename Scalar> class JacobiRotation
   * \sa MatrixBase::makeJacobi(const MatrixBase<Derived>&, Index, Index), MatrixBase::applyOnTheLeft(), MatrixBase::applyOnTheRight()
   */
 template<typename Scalar>
-bool JacobiRotation<Scalar>::makeJacobi(RealScalar x, Scalar y, RealScalar z)
+bool JacobiRotation<Scalar>::makeJacobi(const RealScalar& x, const Scalar& y, const RealScalar& z)
 {
+  using std::sqrt;
+  using std::abs;
   typedef typename NumTraits<Scalar>::Real RealScalar;
   if(y == Scalar(0))
   {
@@ -90,8 +93,8 @@ bool JacobiRotation<Scalar>::makeJacobi(RealScalar x, Scalar y, RealScalar z)
   }
   else
   {
-    RealScalar tau = (x-z)/(RealScalar(2)*internal::abs(y));
-    RealScalar w = internal::sqrt(internal::abs2(tau) + RealScalar(1));
+    RealScalar tau = (x-z)/(RealScalar(2)*abs(y));
+    RealScalar w = sqrt(numext::abs2(tau) + RealScalar(1));
     RealScalar t;
     if(tau>RealScalar(0))
     {
@@ -102,8 +105,8 @@ bool JacobiRotation<Scalar>::makeJacobi(RealScalar x, Scalar y, RealScalar z)
       t = RealScalar(1) / (tau - w);
     }
     RealScalar sign_t = t > RealScalar(0) ? RealScalar(1) : RealScalar(-1);
-    RealScalar n = RealScalar(1) / internal::sqrt(internal::abs2(t)+RealScalar(1));
-    m_s = - sign_t * (internal::conj(y) / internal::abs(y)) * internal::abs(t) * n;
+    RealScalar n = RealScalar(1) / sqrt(numext::abs2(t)+RealScalar(1));
+    m_s = - sign_t * (numext::conj(y) / abs(y)) * abs(t) * n;
     m_c = n;
     return true;
   }
@@ -122,7 +125,7 @@ template<typename Scalar>
 template<typename Derived>
 inline bool JacobiRotation<Scalar>::makeJacobi(const MatrixBase<Derived>& m, typename Derived::Index p, typename Derived::Index q)
 {
-  return makeJacobi(internal::real(m.coeff(p,p)), m.coeff(p,q), internal::real(m.coeff(q,q)));
+  return makeJacobi(numext::real(m.coeff(p,p)), m.coeff(p,q), numext::real(m.coeff(q,q)));
 }
 
 /** Makes \c *this as a Givens rotation \c G such that applying \f$ G^* \f$ to the left of the vector
@@ -152,52 +155,56 @@ void JacobiRotation<Scalar>::makeGivens(const Scalar& p, const Scalar& q, Scalar
 template<typename Scalar>
 void JacobiRotation<Scalar>::makeGivens(const Scalar& p, const Scalar& q, Scalar* r, internal::true_type)
 {
+  using std::sqrt;
+  using std::abs;
+  using numext::conj;
+  
   if(q==Scalar(0))
   {
-    m_c = internal::real(p)<0 ? Scalar(-1) : Scalar(1);
+    m_c = numext::real(p)<0 ? Scalar(-1) : Scalar(1);
     m_s = 0;
     if(r) *r = m_c * p;
   }
   else if(p==Scalar(0))
   {
     m_c = 0;
-    m_s = -q/internal::abs(q);
-    if(r) *r = internal::abs(q);
+    m_s = -q/abs(q);
+    if(r) *r = abs(q);
   }
   else
   {
-    RealScalar p1 = internal::norm1(p);
-    RealScalar q1 = internal::norm1(q);
+    RealScalar p1 = numext::norm1(p);
+    RealScalar q1 = numext::norm1(q);
     if(p1>=q1)
     {
       Scalar ps = p / p1;
-      RealScalar p2 = internal::abs2(ps);
+      RealScalar p2 = numext::abs2(ps);
       Scalar qs = q / p1;
-      RealScalar q2 = internal::abs2(qs);
+      RealScalar q2 = numext::abs2(qs);
 
-      RealScalar u = internal::sqrt(RealScalar(1) + q2/p2);
-      if(internal::real(p)<RealScalar(0))
+      RealScalar u = sqrt(RealScalar(1) + q2/p2);
+      if(numext::real(p)<RealScalar(0))
         u = -u;
 
       m_c = Scalar(1)/u;
-      m_s = -qs*internal::conj(ps)*(m_c/p2);
+      m_s = -qs*conj(ps)*(m_c/p2);
       if(r) *r = p * u;
     }
     else
     {
       Scalar ps = p / q1;
-      RealScalar p2 = internal::abs2(ps);
+      RealScalar p2 = numext::abs2(ps);
       Scalar qs = q / q1;
-      RealScalar q2 = internal::abs2(qs);
+      RealScalar q2 = numext::abs2(qs);
 
-      RealScalar u = q1 * internal::sqrt(p2 + q2);
-      if(internal::real(p)<RealScalar(0))
+      RealScalar u = q1 * sqrt(p2 + q2);
+      if(numext::real(p)<RealScalar(0))
         u = -u;
 
-      p1 = internal::abs(p);
+      p1 = abs(p);
       ps = p/p1;
       m_c = p1/u;
-      m_s = -internal::conj(ps) * (q/u);
+      m_s = -conj(ps) * (q/u);
       if(r) *r = ps * u;
     }
   }
@@ -207,23 +214,24 @@ void JacobiRotation<Scalar>::makeGivens(const Scalar& p, const Scalar& q, Scalar
 template<typename Scalar>
 void JacobiRotation<Scalar>::makeGivens(const Scalar& p, const Scalar& q, Scalar* r, internal::false_type)
 {
-
+  using std::sqrt;
+  using std::abs;
   if(q==Scalar(0))
   {
     m_c = p<Scalar(0) ? Scalar(-1) : Scalar(1);
     m_s = Scalar(0);
-    if(r) *r = internal::abs(p);
+    if(r) *r = abs(p);
   }
   else if(p==Scalar(0))
   {
     m_c = Scalar(0);
     m_s = q<Scalar(0) ? Scalar(1) : Scalar(-1);
-    if(r) *r = internal::abs(q);
+    if(r) *r = abs(q);
   }
-  else if(internal::abs(p) > internal::abs(q))
+  else if(abs(p) > abs(q))
   {
     Scalar t = q/p;
-    Scalar u = internal::sqrt(Scalar(1) + internal::abs2(t));
+    Scalar u = sqrt(Scalar(1) + numext::abs2(t));
     if(p<Scalar(0))
       u = -u;
     m_c = Scalar(1)/u;
@@ -233,7 +241,7 @@ void JacobiRotation<Scalar>::makeGivens(const Scalar& p, const Scalar& q, Scalar
   else
   {
     Scalar t = p/q;
-    Scalar u = internal::sqrt(Scalar(1) + internal::abs2(t));
+    Scalar u = sqrt(Scalar(1) + numext::abs2(t));
     if(q<Scalar(0))
       u = -u;
     m_s = -Scalar(1)/u;
@@ -303,6 +311,11 @@ void /*EIGEN_DONT_INLINE*/ apply_rotation_in_the_plane(VectorX& _x, VectorY& _y,
 
   Scalar* EIGEN_RESTRICT x = &_x.coeffRef(0);
   Scalar* EIGEN_RESTRICT y = &_y.coeffRef(0);
+  
+  OtherScalar c = j.c();
+  OtherScalar s = j.s();
+  if (c==OtherScalar(1) && s==OtherScalar(0))
+    return;
 
   /*** dynamic-size vectorized paths ***/
 
@@ -316,16 +329,16 @@ void /*EIGEN_DONT_INLINE*/ apply_rotation_in_the_plane(VectorX& _x, VectorY& _y,
     Index alignedStart = internal::first_aligned(y, size);
     Index alignedEnd = alignedStart + ((size-alignedStart)/PacketSize)*PacketSize;
 
-    const Packet pc = pset1<Packet>(j.c());
-    const Packet ps = pset1<Packet>(j.s());
+    const Packet pc = pset1<Packet>(c);
+    const Packet ps = pset1<Packet>(s);
     conj_helper<Packet,Packet,NumTraits<Scalar>::IsComplex,false> pcj;
 
     for(Index i=0; i<alignedStart; ++i)
     {
       Scalar xi = x[i];
       Scalar yi = y[i];
-      x[i] =  j.c() * xi + conj(j.s()) * yi;
-      y[i] = -j.s() * xi + conj(j.c()) * yi;
+      x[i] =  c * xi + numext::conj(s) * yi;
+      y[i] = -s * xi + numext::conj(c) * yi;
     }
 
     Scalar* EIGEN_RESTRICT px = x + alignedStart;
@@ -372,8 +385,8 @@ void /*EIGEN_DONT_INLINE*/ apply_rotation_in_the_plane(VectorX& _x, VectorY& _y,
     {
       Scalar xi = x[i];
       Scalar yi = y[i];
-      x[i] =  j.c() * xi + conj(j.s()) * yi;
-      y[i] = -j.s() * xi + conj(j.c()) * yi;
+      x[i] =  c * xi + numext::conj(s) * yi;
+      y[i] = -s * xi + numext::conj(c) * yi;
     }
   }
 
@@ -382,8 +395,8 @@ void /*EIGEN_DONT_INLINE*/ apply_rotation_in_the_plane(VectorX& _x, VectorY& _y,
           (VectorX::Flags & VectorY::Flags & PacketAccessBit) &&
           (VectorX::Flags & VectorY::Flags & AlignedBit))
   {
-    const Packet pc = pset1<Packet>(j.c());
-    const Packet ps = pset1<Packet>(j.s());
+    const Packet pc = pset1<Packet>(c);
+    const Packet ps = pset1<Packet>(s);
     conj_helper<Packet,Packet,NumTraits<Scalar>::IsComplex,false> pcj;
     Scalar* EIGEN_RESTRICT px = x;
     Scalar* EIGEN_RESTRICT py = y;
@@ -405,8 +418,8 @@ void /*EIGEN_DONT_INLINE*/ apply_rotation_in_the_plane(VectorX& _x, VectorY& _y,
     {
       Scalar xi = *x;
       Scalar yi = *y;
-      *x =  j.c() * xi + conj(j.s()) * yi;
-      *y = -j.s() * xi + conj(j.c()) * yi;
+      *x =  c * xi + numext::conj(s) * yi;
+      *y = -s * xi + numext::conj(c) * yi;
       x += incrx;
       y += incry;
     }
diff --git a/ThirdParty/eigen3/Eigen/src/LU/Determinant.h b/ThirdParty/eigen3/Eigen/src/LU/Determinant.h
index d862c5d7784..bb8e78a8a8a 100644
--- a/ThirdParty/eigen3/Eigen/src/LU/Determinant.h
+++ b/ThirdParty/eigen3/Eigen/src/LU/Determinant.h
@@ -91,7 +91,7 @@ template<typename Derived> struct determinant_impl<Derived, 4>
 template<typename Derived>
 inline typename internal::traits<Derived>::Scalar MatrixBase<Derived>::determinant() const
 {
-  assert(rows() == cols());
+  eigen_assert(rows() == cols());
   typedef typename internal::nested<Derived,Base::RowsAtCompileTime>::type Nested;
   return internal::determinant_impl<typename internal::remove_all<Nested>::type>::run(derived());
 }
diff --git a/ThirdParty/eigen3/Eigen/src/LU/FullPivLU.h b/ThirdParty/eigen3/Eigen/src/LU/FullPivLU.h
index 5a2841a86d8..dfe25f424d7 100644
--- a/ThirdParty/eigen3/Eigen/src/LU/FullPivLU.h
+++ b/ThirdParty/eigen3/Eigen/src/LU/FullPivLU.h
@@ -293,11 +293,12 @@ template<typename _MatrixType> class FullPivLU
       */
     inline Index rank() const
     {
+      using std::abs;
       eigen_assert(m_isInitialized && "LU is not initialized.");
-      RealScalar premultiplied_threshold = internal::abs(m_maxpivot) * threshold();
+      RealScalar premultiplied_threshold = abs(m_maxpivot) * threshold();
       Index result = 0;
       for(Index i = 0; i < m_nonzero_pivots; ++i)
-        result += (internal::abs(m_lu.coeff(i,i)) > premultiplied_threshold);
+        result += (abs(m_lu.coeff(i,i)) > premultiplied_threshold);
       return result;
     }
 
@@ -416,6 +417,9 @@ FullPivLU<MatrixType>::FullPivLU(const MatrixType& matrix)
 template<typename MatrixType>
 FullPivLU<MatrixType>& FullPivLU<MatrixType>::compute(const MatrixType& matrix)
 {
+  // the permutations are stored as int indices, so just to be sure:
+  eigen_assert(matrix.rows()<=NumTraits<int>::highest() && matrix.cols()<=NumTraits<int>::highest());
+  
   m_isInitialized = true;
   m_lu = matrix;
 
@@ -547,6 +551,7 @@ struct kernel_retval<FullPivLU<_MatrixType> >
 
   template<typename Dest> void evalTo(Dest& dst) const
   {
+    using std::abs;
     const Index cols = dec().matrixLU().cols(), dimker = cols - rank();
     if(dimker == 0)
     {
@@ -577,7 +582,7 @@ struct kernel_retval<FullPivLU<_MatrixType> >
     RealScalar premultiplied_threshold = dec().maxPivot() * dec().threshold();
     Index p = 0;
     for(Index i = 0; i < dec().nonzeroPivots(); ++i)
-      if(internal::abs(dec().matrixLU().coeff(i,i)) > premultiplied_threshold)
+      if(abs(dec().matrixLU().coeff(i,i)) > premultiplied_threshold)
         pivots.coeffRef(p++) = i;
     eigen_internal_assert(p == rank());
 
@@ -632,6 +637,7 @@ struct image_retval<FullPivLU<_MatrixType> >
 
   template<typename Dest> void evalTo(Dest& dst) const
   {
+    using std::abs;
     if(rank() == 0)
     {
       // The Image is just {0}, so it doesn't have a basis properly speaking, but let's
@@ -645,7 +651,7 @@ struct image_retval<FullPivLU<_MatrixType> >
     RealScalar premultiplied_threshold = dec().maxPivot() * dec().threshold();
     Index p = 0;
     for(Index i = 0; i < dec().nonzeroPivots(); ++i)
-      if(internal::abs(dec().matrixLU().coeff(i,i)) > premultiplied_threshold)
+      if(abs(dec().matrixLU().coeff(i,i)) > premultiplied_threshold)
         pivots.coeffRef(p++) = i;
     eigen_internal_assert(p == rank());
 
diff --git a/ThirdParty/eigen3/Eigen/src/LU/Inverse.h b/ThirdParty/eigen3/Eigen/src/LU/Inverse.h
index 39b8cdbc8dc..3cf8871932f 100644
--- a/ThirdParty/eigen3/Eigen/src/LU/Inverse.h
+++ b/ThirdParty/eigen3/Eigen/src/LU/Inverse.h
@@ -55,6 +55,7 @@ struct compute_inverse_and_det_with_check<MatrixType, ResultType, 1>
     bool& invertible
   )
   {
+    using std::abs;
     determinant = matrix.coeff(0,0);
     invertible = abs(determinant) > absDeterminantThreshold;
     if(invertible) result.coeffRef(0,0) = typename ResultType::Scalar(1) / determinant;
@@ -98,6 +99,7 @@ struct compute_inverse_and_det_with_check<MatrixType, ResultType, 2>
     bool& invertible
   )
   {
+    using std::abs;
     typedef typename ResultType::Scalar Scalar;
     determinant = matrix.determinant();
     invertible = abs(determinant) > absDeterminantThreshold;
@@ -167,6 +169,7 @@ struct compute_inverse_and_det_with_check<MatrixType, ResultType, 3>
     bool& invertible
   )
   {
+    using std::abs;
     typedef typename ResultType::Scalar Scalar;
     Matrix<Scalar,3,1> cofactors_col0;
     cofactors_col0.coeffRef(0) =  cofactor_3x3<MatrixType,0,0>(matrix);
@@ -251,6 +254,7 @@ struct compute_inverse_and_det_with_check<MatrixType, ResultType, 4>
     bool& invertible
   )
   {
+    using std::abs;
     determinant = matrix.determinant();
     invertible = abs(determinant) > absDeterminantThreshold;
     if(invertible) compute_inverse<MatrixType, ResultType>::run(matrix, inverse);
@@ -327,7 +331,7 @@ inline const internal::inverse_impl<Derived> MatrixBase<Derived>::inverse() cons
   * This is only for fixed-size square matrices of size up to 4x4.
   *
   * \param inverse Reference to the matrix in which to store the inverse.
-  * \param determinant Reference to the variable in which to store the inverse.
+  * \param determinant Reference to the variable in which to store the determinant.
   * \param invertible Reference to the bool variable in which to store whether the matrix is invertible.
   * \param absDeterminantThreshold Optional parameter controlling the invertibility check.
   *                                The matrix will be declared invertible if the absolute value of its
diff --git a/ThirdParty/eigen3/Eigen/src/LU/PartialPivLU.h b/ThirdParty/eigen3/Eigen/src/LU/PartialPivLU.h
index c9ff9dd5a36..740ee694c45 100644
--- a/ThirdParty/eigen3/Eigen/src/LU/PartialPivLU.h
+++ b/ThirdParty/eigen3/Eigen/src/LU/PartialPivLU.h
@@ -242,7 +242,7 @@ struct partial_lu_impl
     const Index cols = lu.cols();
     const Index size = (std::min)(rows,cols);
     nb_transpositions = 0;
-    int first_zero_pivot = -1;
+    Index first_zero_pivot = -1;
     for(Index k = 0; k < size; ++k)
     {
       Index rrows = rows-k-1;
@@ -253,7 +253,7 @@ struct partial_lu_impl
         = lu.col(k).tail(rows-k).cwiseAbs().maxCoeff(&row_of_biggest_in_col);
       row_of_biggest_in_col += k;
 
-      row_transpositions[k] = row_of_biggest_in_col;
+      row_transpositions[k] = PivIndex(row_of_biggest_in_col);
 
       if(biggest_in_corner != RealScalar(0))
       {
@@ -318,7 +318,7 @@ struct partial_lu_impl
     }
 
     nb_transpositions = 0;
-    int first_zero_pivot = -1;
+    Index first_zero_pivot = -1;
     for(Index k = 0; k < size; k+=blockSize)
     {
       Index bs = (std::min)(size-k,blockSize); // actual size of the block
@@ -386,6 +386,9 @@ void partial_lu_inplace(MatrixType& lu, TranspositionType& row_transpositions, t
 template<typename MatrixType>
 PartialPivLU<MatrixType>& PartialPivLU<MatrixType>::compute(const MatrixType& matrix)
 {
+  // the row permutation is stored as int indices, so just to be sure:
+  eigen_assert(matrix.rows()<NumTraits<int>::highest());
+  
   m_lu = matrix;
 
   eigen_assert(matrix.rows() == matrix.cols() && "PartialPivLU is only for square (and moreover invertible) matrices");
diff --git a/ThirdParty/eigen3/Eigen/src/OrderingMethods/Eigen_Colamd.h b/ThirdParty/eigen3/Eigen/src/OrderingMethods/Eigen_Colamd.h
new file mode 100644
index 00000000000..44548f6607c
--- /dev/null
+++ b/ThirdParty/eigen3/Eigen/src/OrderingMethods/Eigen_Colamd.h
@@ -0,0 +1,1850 @@
+// // This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2012 Desire Nuentsa Wakam <desire.nuentsa_wakam@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+// This file is modified from the colamd/symamd library. The copyright is below
+
+//   The authors of the code itself are Stefan I. Larimore and Timothy A.
+//   Davis (davis@cise.ufl.edu), University of Florida.  The algorithm was
+//   developed in collaboration with John Gilbert, Xerox PARC, and Esmond
+//   Ng, Oak Ridge National Laboratory.
+// 
+//     Date:
+// 
+//   September 8, 2003.  Version 2.3.
+// 
+//     Acknowledgements:
+// 
+//   This work was supported by the National Science Foundation, under
+//   grants DMS-9504974 and DMS-9803599.
+// 
+//     Notice:
+// 
+//   Copyright (c) 1998-2003 by the University of Florida.
+//   All Rights Reserved.
+// 
+//   THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY
+//   EXPRESSED OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
+// 
+//   Permission is hereby granted to use, copy, modify, and/or distribute
+//   this program, provided that the Copyright, this License, and the
+//   Availability of the original version is retained on all copies and made
+//   accessible to the end-user of any code or package that includes COLAMD
+//   or any modified version of COLAMD. 
+// 
+//     Availability:
+// 
+//   The colamd/symamd library is available at
+// 
+//       http://www.cise.ufl.edu/research/sparse/colamd/
+
+//   This is the http://www.cise.ufl.edu/research/sparse/colamd/colamd.h
+//   file.  It is required by the colamd.c, colamdmex.c, and symamdmex.c
+//   files, and by any C code that calls the routines whose prototypes are
+//   listed below, or that uses the colamd/symamd definitions listed below.
+  
+#ifndef EIGEN_COLAMD_H
+#define EIGEN_COLAMD_H
+
+namespace internal {
+/* Ensure that debugging is turned off: */
+#ifndef COLAMD_NDEBUG
+#define COLAMD_NDEBUG
+#endif /* NDEBUG */
+/* ========================================================================== */
+/* === Knob and statistics definitions ====================================== */
+/* ========================================================================== */
+
+/* size of the knobs [ ] array.  Only knobs [0..1] are currently used. */
+#define COLAMD_KNOBS 20
+
+/* number of output statistics.  Only stats [0..6] are currently used. */
+#define COLAMD_STATS 20 
+
+/* knobs [0] and stats [0]: dense row knob and output statistic. */
+#define COLAMD_DENSE_ROW 0
+
+/* knobs [1] and stats [1]: dense column knob and output statistic. */
+#define COLAMD_DENSE_COL 1
+
+/* stats [2]: memory defragmentation count output statistic */
+#define COLAMD_DEFRAG_COUNT 2
+
+/* stats [3]: colamd status:  zero OK, > 0 warning or notice, < 0 error */
+#define COLAMD_STATUS 3
+
+/* stats [4..6]: error info, or info on jumbled columns */ 
+#define COLAMD_INFO1 4
+#define COLAMD_INFO2 5
+#define COLAMD_INFO3 6
+
+/* error codes returned in stats [3]: */
+#define COLAMD_OK       (0)
+#define COLAMD_OK_BUT_JUMBLED     (1)
+#define COLAMD_ERROR_A_not_present    (-1)
+#define COLAMD_ERROR_p_not_present    (-2)
+#define COLAMD_ERROR_nrow_negative    (-3)
+#define COLAMD_ERROR_ncol_negative    (-4)
+#define COLAMD_ERROR_nnz_negative   (-5)
+#define COLAMD_ERROR_p0_nonzero     (-6)
+#define COLAMD_ERROR_A_too_small    (-7)
+#define COLAMD_ERROR_col_length_negative  (-8)
+#define COLAMD_ERROR_row_index_out_of_bounds  (-9)
+#define COLAMD_ERROR_out_of_memory    (-10)
+#define COLAMD_ERROR_internal_error   (-999)
+
+/* ========================================================================== */
+/* === Definitions ========================================================== */
+/* ========================================================================== */
+
+#define COLAMD_MAX(a,b) (((a) > (b)) ? (a) : (b))
+#define COLAMD_MIN(a,b) (((a) < (b)) ? (a) : (b))
+
+#define ONES_COMPLEMENT(r) (-(r)-1)
+
+/* -------------------------------------------------------------------------- */
+
+#define COLAMD_EMPTY (-1)
+
+/* Row and column status */
+#define ALIVE (0)
+#define DEAD  (-1)
+
+/* Column status */
+#define DEAD_PRINCIPAL    (-1)
+#define DEAD_NON_PRINCIPAL  (-2)
+
+/* Macros for row and column status update and checking. */
+#define ROW_IS_DEAD(r)      ROW_IS_MARKED_DEAD (Row[r].shared2.mark)
+#define ROW_IS_MARKED_DEAD(row_mark)  (row_mark < ALIVE)
+#define ROW_IS_ALIVE(r)     (Row [r].shared2.mark >= ALIVE)
+#define COL_IS_DEAD(c)      (Col [c].start < ALIVE)
+#define COL_IS_ALIVE(c)     (Col [c].start >= ALIVE)
+#define COL_IS_DEAD_PRINCIPAL(c)  (Col [c].start == DEAD_PRINCIPAL)
+#define KILL_ROW(r)     { Row [r].shared2.mark = DEAD ; }
+#define KILL_PRINCIPAL_COL(c)   { Col [c].start = DEAD_PRINCIPAL ; }
+#define KILL_NON_PRINCIPAL_COL(c) { Col [c].start = DEAD_NON_PRINCIPAL ; }
+
+/* ========================================================================== */
+/* === Colamd reporting mechanism =========================================== */
+/* ========================================================================== */
+
+// == Row and Column structures ==
+template <typename Index>
+struct colamd_col
+{
+  Index start ;   /* index for A of first row in this column, or DEAD */
+  /* if column is dead */
+  Index length ;  /* number of rows in this column */
+  union
+  {
+    Index thickness ; /* number of original columns represented by this */
+    /* col, if the column is alive */
+    Index parent ;  /* parent in parent tree super-column structure, if */
+    /* the column is dead */
+  } shared1 ;
+  union
+  {
+    Index score ; /* the score used to maintain heap, if col is alive */
+    Index order ; /* pivot ordering of this column, if col is dead */
+  } shared2 ;
+  union
+  {
+    Index headhash ;  /* head of a hash bucket, if col is at the head of */
+    /* a degree list */
+    Index hash ;  /* hash value, if col is not in a degree list */
+    Index prev ;  /* previous column in degree list, if col is in a */
+    /* degree list (but not at the head of a degree list) */
+  } shared3 ;
+  union
+  {
+    Index degree_next ; /* next column, if col is in a degree list */
+    Index hash_next ;   /* next column, if col is in a hash list */
+  } shared4 ;
+  
+};
+ 
+template <typename Index>
+struct Colamd_Row
+{
+  Index start ;   /* index for A of first col in this row */
+  Index length ;  /* number of principal columns in this row */
+  union
+  {
+    Index degree ;  /* number of principal & non-principal columns in row */
+    Index p ;   /* used as a row pointer in init_rows_cols () */
+  } shared1 ;
+  union
+  {
+    Index mark ;  /* for computing set differences and marking dead rows*/
+    Index first_column ;/* first column in row (used in garbage collection) */
+  } shared2 ;
+  
+};
+ 
+/* ========================================================================== */
+/* === Colamd recommended memory size ======================================= */
+/* ========================================================================== */
+ 
+/*
+  The recommended length Alen of the array A passed to colamd is given by
+  the COLAMD_RECOMMENDED (nnz, n_row, n_col) macro.  It returns -1 if any
+  argument is negative.  2*nnz space is required for the row and column
+  indices of the matrix. colamd_c (n_col) + colamd_r (n_row) space is
+  required for the Col and Row arrays, respectively, which are internal to
+  colamd.  An additional n_col space is the minimal amount of "elbow room",
+  and nnz/5 more space is recommended for run time efficiency.
+  
+  This macro is not needed when using symamd.
+  
+  Explicit typecast to Index added Sept. 23, 2002, COLAMD version 2.2, to avoid
+  gcc -pedantic warning messages.
+*/
+template <typename Index>
+inline Index colamd_c(Index n_col) 
+{ return Index( ((n_col) + 1) * sizeof (colamd_col<Index>) / sizeof (Index) ) ; }
+
+template <typename Index>
+inline Index  colamd_r(Index n_row)
+{ return Index(((n_row) + 1) * sizeof (Colamd_Row<Index>) / sizeof (Index)); }
+
+// Prototypes of non-user callable routines
+template <typename Index>
+static Index init_rows_cols (Index n_row, Index n_col, Colamd_Row<Index> Row [], colamd_col<Index> col [], Index A [], Index p [], Index stats[COLAMD_STATS] ); 
+
+template <typename Index>
+static void init_scoring (Index n_row, Index n_col, Colamd_Row<Index> Row [], colamd_col<Index> Col [], Index A [], Index head [], double knobs[COLAMD_KNOBS], Index *p_n_row2, Index *p_n_col2, Index *p_max_deg);
+
+template <typename Index>
+static Index find_ordering (Index n_row, Index n_col, Index Alen, Colamd_Row<Index> Row [], colamd_col<Index> Col [], Index A [], Index head [], Index n_col2, Index max_deg, Index pfree);
+
+template <typename Index>
+static void order_children (Index n_col, colamd_col<Index> Col [], Index p []);
+
+template <typename Index>
+static void detect_super_cols (colamd_col<Index> Col [], Index A [], Index head [], Index row_start, Index row_length ) ;
+
+template <typename Index>
+static Index garbage_collection (Index n_row, Index n_col, Colamd_Row<Index> Row [], colamd_col<Index> Col [], Index A [], Index *pfree) ;
+
+template <typename Index>
+static inline  Index clear_mark (Index n_row, Colamd_Row<Index> Row [] ) ;
+
+/* === No debugging ========================================================= */
+
+#define COLAMD_DEBUG0(params) ;
+#define COLAMD_DEBUG1(params) ;
+#define COLAMD_DEBUG2(params) ;
+#define COLAMD_DEBUG3(params) ;
+#define COLAMD_DEBUG4(params) ;
+
+#define COLAMD_ASSERT(expression) ((void) 0)
+
+
+/**
+ * \brief Returns the recommended value of Alen 
+ * 
+ * Returns recommended value of Alen for use by colamd.  
+ * Returns -1 if any input argument is negative.  
+ * The use of this routine or macro is optional.  
+ * Note that the macro uses its arguments   more than once, 
+ * so be careful for side effects, if you pass expressions as arguments to COLAMD_RECOMMENDED.  
+ * 
+ * \param nnz nonzeros in A
+ * \param n_row number of rows in A
+ * \param n_col number of columns in A
+ * \return recommended value of Alen for use by colamd
+ */
+template <typename Index>
+inline Index colamd_recommended ( Index nnz, Index n_row, Index n_col)
+{
+  if ((nnz) < 0 || (n_row) < 0 || (n_col) < 0)
+    return (-1);
+  else
+    return (2 * (nnz) + colamd_c (n_col) + colamd_r (n_row) + (n_col) + ((nnz) / 5)); 
+}
+
+/**
+ * \brief set default parameters  The use of this routine is optional.
+ * 
+ * Colamd: rows with more than (knobs [COLAMD_DENSE_ROW] * n_col)
+ * entries are removed prior to ordering.  Columns with more than
+ * (knobs [COLAMD_DENSE_COL] * n_row) entries are removed prior to
+ * ordering, and placed last in the output column ordering. 
+ *
+ * COLAMD_DENSE_ROW and COLAMD_DENSE_COL are defined as 0 and 1,
+ * respectively, in colamd.h.  Default values of these two knobs
+ * are both 0.5.  Currently, only knobs [0] and knobs [1] are
+ * used, but future versions may use more knobs.  If so, they will
+ * be properly set to their defaults by the future version of
+ * colamd_set_defaults, so that the code that calls colamd will
+ * not need to change, assuming that you either use
+ * colamd_set_defaults, or pass a (double *) NULL pointer as the
+ * knobs array to colamd or symamd.
+ * 
+ * \param knobs parameter settings for colamd
+ */
+
+static inline void colamd_set_defaults(double knobs[COLAMD_KNOBS])
+{
+  /* === Local variables ================================================== */
+  
+  int i ;
+
+  if (!knobs)
+  {
+    return ;      /* no knobs to initialize */
+  }
+  for (i = 0 ; i < COLAMD_KNOBS ; i++)
+  {
+    knobs [i] = 0 ;
+  }
+  knobs [COLAMD_DENSE_ROW] = 0.5 ;  /* ignore rows over 50% dense */
+  knobs [COLAMD_DENSE_COL] = 0.5 ;  /* ignore columns over 50% dense */
+}
+
+/** 
+ * \brief  Computes a column ordering using the column approximate minimum degree ordering
+ * 
+ * Computes a column ordering (Q) of A such that P(AQ)=LU or
+ * (AQ)'AQ=LL' have less fill-in and require fewer floating point
+ * operations than factorizing the unpermuted matrix A or A'A,
+ * respectively.
+ * 
+ * 
+ * \param n_row number of rows in A
+ * \param n_col number of columns in A
+ * \param Alen, size of the array A
+ * \param A row indices of the matrix, of size ALen
+ * \param p column pointers of A, of size n_col+1
+ * \param knobs parameter settings for colamd
+ * \param stats colamd output statistics and error codes
+ */
+template <typename Index>
+static bool colamd(Index n_row, Index n_col, Index Alen, Index *A, Index *p, double knobs[COLAMD_KNOBS], Index stats[COLAMD_STATS])
+{
+  /* === Local variables ================================================== */
+  
+  Index i ;     /* loop index */
+  Index nnz ;     /* nonzeros in A */
+  Index Row_size ;    /* size of Row [], in integers */
+  Index Col_size ;    /* size of Col [], in integers */
+  Index need ;      /* minimum required length of A */
+  Colamd_Row<Index> *Row ;   /* pointer into A of Row [0..n_row] array */
+  colamd_col<Index> *Col ;   /* pointer into A of Col [0..n_col] array */
+  Index n_col2 ;    /* number of non-dense, non-empty columns */
+  Index n_row2 ;    /* number of non-dense, non-empty rows */
+  Index ngarbage ;    /* number of garbage collections performed */
+  Index max_deg ;   /* maximum row degree */
+  double default_knobs [COLAMD_KNOBS] ; /* default knobs array */
+  
+  
+  /* === Check the input arguments ======================================== */
+  
+  if (!stats)
+  {
+    COLAMD_DEBUG0 (("colamd: stats not present\n")) ;
+    return (false) ;
+  }
+  for (i = 0 ; i < COLAMD_STATS ; i++)
+  {
+    stats [i] = 0 ;
+  }
+  stats [COLAMD_STATUS] = COLAMD_OK ;
+  stats [COLAMD_INFO1] = -1 ;
+  stats [COLAMD_INFO2] = -1 ;
+  
+  if (!A)   /* A is not present */
+  {
+    stats [COLAMD_STATUS] = COLAMD_ERROR_A_not_present ;
+    COLAMD_DEBUG0 (("colamd: A not present\n")) ;
+    return (false) ;
+  }
+  
+  if (!p)   /* p is not present */
+  {
+    stats [COLAMD_STATUS] = COLAMD_ERROR_p_not_present ;
+    COLAMD_DEBUG0 (("colamd: p not present\n")) ;
+    return (false) ;
+  }
+  
+  if (n_row < 0)  /* n_row must be >= 0 */
+  {
+    stats [COLAMD_STATUS] = COLAMD_ERROR_nrow_negative ;
+    stats [COLAMD_INFO1] = n_row ;
+    COLAMD_DEBUG0 (("colamd: nrow negative %d\n", n_row)) ;
+    return (false) ;
+  }
+  
+  if (n_col < 0)  /* n_col must be >= 0 */
+  {
+    stats [COLAMD_STATUS] = COLAMD_ERROR_ncol_negative ;
+    stats [COLAMD_INFO1] = n_col ;
+    COLAMD_DEBUG0 (("colamd: ncol negative %d\n", n_col)) ;
+    return (false) ;
+  }
+  
+  nnz = p [n_col] ;
+  if (nnz < 0)  /* nnz must be >= 0 */
+  {
+    stats [COLAMD_STATUS] = COLAMD_ERROR_nnz_negative ;
+    stats [COLAMD_INFO1] = nnz ;
+    COLAMD_DEBUG0 (("colamd: number of entries negative %d\n", nnz)) ;
+    return (false) ;
+  }
+  
+  if (p [0] != 0)
+  {
+    stats [COLAMD_STATUS] = COLAMD_ERROR_p0_nonzero ;
+    stats [COLAMD_INFO1] = p [0] ;
+    COLAMD_DEBUG0 (("colamd: p[0] not zero %d\n", p [0])) ;
+    return (false) ;
+  }
+  
+  /* === If no knobs, set default knobs =================================== */
+  
+  if (!knobs)
+  {
+    colamd_set_defaults (default_knobs) ;
+    knobs = default_knobs ;
+  }
+  
+  /* === Allocate the Row and Col arrays from array A ===================== */
+  
+  Col_size = colamd_c (n_col) ;
+  Row_size = colamd_r (n_row) ;
+  need = 2*nnz + n_col + Col_size + Row_size ;
+  
+  if (need > Alen)
+  {
+    /* not enough space in array A to perform the ordering */
+    stats [COLAMD_STATUS] = COLAMD_ERROR_A_too_small ;
+    stats [COLAMD_INFO1] = need ;
+    stats [COLAMD_INFO2] = Alen ;
+    COLAMD_DEBUG0 (("colamd: Need Alen >= %d, given only Alen = %d\n", need,Alen));
+    return (false) ;
+  }
+  
+  Alen -= Col_size + Row_size ;
+  Col = (colamd_col<Index> *) &A [Alen] ;
+  Row = (Colamd_Row<Index> *) &A [Alen + Col_size] ;
+
+  /* === Construct the row and column data structures ===================== */
+  
+  if (!Eigen::internal::init_rows_cols (n_row, n_col, Row, Col, A, p, stats))
+  {
+    /* input matrix is invalid */
+    COLAMD_DEBUG0 (("colamd: Matrix invalid\n")) ;
+    return (false) ;
+  }
+  
+  /* === Initialize scores, kill dense rows/columns ======================= */
+
+  Eigen::internal::init_scoring (n_row, n_col, Row, Col, A, p, knobs,
+		&n_row2, &n_col2, &max_deg) ;
+  
+  /* === Order the supercolumns =========================================== */
+  
+  ngarbage = Eigen::internal::find_ordering (n_row, n_col, Alen, Row, Col, A, p,
+			    n_col2, max_deg, 2*nnz) ;
+  
+  /* === Order the non-principal columns ================================== */
+  
+  Eigen::internal::order_children (n_col, Col, p) ;
+  
+  /* === Return statistics in stats ======================================= */
+  
+  stats [COLAMD_DENSE_ROW] = n_row - n_row2 ;
+  stats [COLAMD_DENSE_COL] = n_col - n_col2 ;
+  stats [COLAMD_DEFRAG_COUNT] = ngarbage ;
+  COLAMD_DEBUG0 (("colamd: done.\n")) ; 
+  return (true) ;
+}
+
+/* ========================================================================== */
+/* === NON-USER-CALLABLE ROUTINES: ========================================== */
+/* ========================================================================== */
+
+/* There are no user-callable routines beyond this point in the file */
+
+
+/* ========================================================================== */
+/* === init_rows_cols ======================================================= */
+/* ========================================================================== */
+
+/*
+  Takes the column form of the matrix in A and creates the row form of the
+  matrix.  Also, row and column attributes are stored in the Col and Row
+  structs.  If the columns are un-sorted or contain duplicate row indices,
+  this routine will also sort and remove duplicate row indices from the
+  column form of the matrix.  Returns false if the matrix is invalid,
+  true otherwise.  Not user-callable.
+*/
+template <typename Index>
+static Index init_rows_cols  /* returns true if OK, or false otherwise */
+  (
+    /* === Parameters ======================================================= */
+
+    Index n_row,      /* number of rows of A */
+    Index n_col,      /* number of columns of A */
+    Colamd_Row<Index> Row [],    /* of size n_row+1 */
+    colamd_col<Index> Col [],    /* of size n_col+1 */
+    Index A [],     /* row indices of A, of size Alen */
+    Index p [],     /* pointers to columns in A, of size n_col+1 */
+    Index stats [COLAMD_STATS]  /* colamd statistics */ 
+    )
+{
+  /* === Local variables ================================================== */
+
+  Index col ;     /* a column index */
+  Index row ;     /* a row index */
+  Index *cp ;     /* a column pointer */
+  Index *cp_end ;   /* a pointer to the end of a column */
+  Index *rp ;     /* a row pointer */
+  Index *rp_end ;   /* a pointer to the end of a row */
+  Index last_row ;    /* previous row */
+
+  /* === Initialize columns, and check column pointers ==================== */
+
+  for (col = 0 ; col < n_col ; col++)
+  {
+    Col [col].start = p [col] ;
+    Col [col].length = p [col+1] - p [col] ;
+
+    if (Col [col].length < 0)
+    {
+      /* column pointers must be non-decreasing */
+      stats [COLAMD_STATUS] = COLAMD_ERROR_col_length_negative ;
+      stats [COLAMD_INFO1] = col ;
+      stats [COLAMD_INFO2] = Col [col].length ;
+      COLAMD_DEBUG0 (("colamd: col %d length %d < 0\n", col, Col [col].length)) ;
+      return (false) ;
+    }
+
+    Col [col].shared1.thickness = 1 ;
+    Col [col].shared2.score = 0 ;
+    Col [col].shared3.prev = COLAMD_EMPTY ;
+    Col [col].shared4.degree_next = COLAMD_EMPTY ;
+  }
+
+  /* p [0..n_col] no longer needed, used as "head" in subsequent routines */
+
+  /* === Scan columns, compute row degrees, and check row indices ========= */
+
+  stats [COLAMD_INFO3] = 0 ;  /* number of duplicate or unsorted row indices*/
+
+  for (row = 0 ; row < n_row ; row++)
+  {
+    Row [row].length = 0 ;
+    Row [row].shared2.mark = -1 ;
+  }
+
+  for (col = 0 ; col < n_col ; col++)
+  {
+    last_row = -1 ;
+
+    cp = &A [p [col]] ;
+    cp_end = &A [p [col+1]] ;
+
+    while (cp < cp_end)
+    {
+      row = *cp++ ;
+
+      /* make sure row indices within range */
+      if (row < 0 || row >= n_row)
+      {
+	stats [COLAMD_STATUS] = COLAMD_ERROR_row_index_out_of_bounds ;
+	stats [COLAMD_INFO1] = col ;
+	stats [COLAMD_INFO2] = row ;
+	stats [COLAMD_INFO3] = n_row ;
+	COLAMD_DEBUG0 (("colamd: row %d col %d out of bounds\n", row, col)) ;
+	return (false) ;
+      }
+
+      if (row <= last_row || Row [row].shared2.mark == col)
+      {
+	/* row index are unsorted or repeated (or both), thus col */
+	/* is jumbled.  This is a notice, not an error condition. */
+	stats [COLAMD_STATUS] = COLAMD_OK_BUT_JUMBLED ;
+	stats [COLAMD_INFO1] = col ;
+	stats [COLAMD_INFO2] = row ;
+	(stats [COLAMD_INFO3]) ++ ;
+	COLAMD_DEBUG1 (("colamd: row %d col %d unsorted/duplicate\n",row,col));
+      }
+
+      if (Row [row].shared2.mark != col)
+      {
+	Row [row].length++ ;
+      }
+      else
+      {
+	/* this is a repeated entry in the column, */
+	/* it will be removed */
+	Col [col].length-- ;
+      }
+
+      /* mark the row as having been seen in this column */
+      Row [row].shared2.mark = col ;
+
+      last_row = row ;
+    }
+  }
+
+  /* === Compute row pointers ============================================= */
+
+  /* row form of the matrix starts directly after the column */
+  /* form of matrix in A */
+  Row [0].start = p [n_col] ;
+  Row [0].shared1.p = Row [0].start ;
+  Row [0].shared2.mark = -1 ;
+  for (row = 1 ; row < n_row ; row++)
+  {
+    Row [row].start = Row [row-1].start + Row [row-1].length ;
+    Row [row].shared1.p = Row [row].start ;
+    Row [row].shared2.mark = -1 ;
+  }
+
+  /* === Create row form ================================================== */
+
+  if (stats [COLAMD_STATUS] == COLAMD_OK_BUT_JUMBLED)
+  {
+    /* if cols jumbled, watch for repeated row indices */
+    for (col = 0 ; col < n_col ; col++)
+    {
+      cp = &A [p [col]] ;
+      cp_end = &A [p [col+1]] ;
+      while (cp < cp_end)
+      {
+	row = *cp++ ;
+	if (Row [row].shared2.mark != col)
+	{
+	  A [(Row [row].shared1.p)++] = col ;
+	  Row [row].shared2.mark = col ;
+	}
+      }
+    }
+  }
+  else
+  {
+    /* if cols not jumbled, we don't need the mark (this is faster) */
+    for (col = 0 ; col < n_col ; col++)
+    {
+      cp = &A [p [col]] ;
+      cp_end = &A [p [col+1]] ;
+      while (cp < cp_end)
+      {
+	A [(Row [*cp++].shared1.p)++] = col ;
+      }
+    }
+  }
+
+  /* === Clear the row marks and set row degrees ========================== */
+
+  for (row = 0 ; row < n_row ; row++)
+  {
+    Row [row].shared2.mark = 0 ;
+    Row [row].shared1.degree = Row [row].length ;
+  }
+
+  /* === See if we need to re-create columns ============================== */
+
+  if (stats [COLAMD_STATUS] == COLAMD_OK_BUT_JUMBLED)
+  {
+    COLAMD_DEBUG0 (("colamd: reconstructing column form, matrix jumbled\n")) ;
+
+
+    /* === Compute col pointers ========================================= */
+
+    /* col form of the matrix starts at A [0]. */
+    /* Note, we may have a gap between the col form and the row */
+    /* form if there were duplicate entries, if so, it will be */
+    /* removed upon the first garbage collection */
+    Col [0].start = 0 ;
+    p [0] = Col [0].start ;
+    for (col = 1 ; col < n_col ; col++)
+    {
+      /* note that the lengths here are for pruned columns, i.e. */
+      /* no duplicate row indices will exist for these columns */
+      Col [col].start = Col [col-1].start + Col [col-1].length ;
+      p [col] = Col [col].start ;
+    }
+
+    /* === Re-create col form =========================================== */
+
+    for (row = 0 ; row < n_row ; row++)
+    {
+      rp = &A [Row [row].start] ;
+      rp_end = rp + Row [row].length ;
+      while (rp < rp_end)
+      {
+	A [(p [*rp++])++] = row ;
+      }
+    }
+  }
+
+  /* === Done.  Matrix is not (or no longer) jumbled ====================== */
+
+  return (true) ;
+}
+
+
+/* ========================================================================== */
+/* === init_scoring ========================================================= */
+/* ========================================================================== */
+
+/*
+  Kills dense or empty columns and rows, calculates an initial score for
+  each column, and places all columns in the degree lists.  Not user-callable.
+*/
+template <typename Index>
+static void init_scoring
+  (
+    /* === Parameters ======================================================= */
+
+    Index n_row,      /* number of rows of A */
+    Index n_col,      /* number of columns of A */
+    Colamd_Row<Index> Row [],    /* of size n_row+1 */
+    colamd_col<Index> Col [],    /* of size n_col+1 */
+    Index A [],     /* column form and row form of A */
+    Index head [],    /* of size n_col+1 */
+    double knobs [COLAMD_KNOBS],/* parameters */
+    Index *p_n_row2,    /* number of non-dense, non-empty rows */
+    Index *p_n_col2,    /* number of non-dense, non-empty columns */
+    Index *p_max_deg    /* maximum row degree */
+    )
+{
+  /* === Local variables ================================================== */
+
+  Index c ;     /* a column index */
+  Index r, row ;    /* a row index */
+  Index *cp ;     /* a column pointer */
+  Index deg ;     /* degree of a row or column */
+  Index *cp_end ;   /* a pointer to the end of a column */
+  Index *new_cp ;   /* new column pointer */
+  Index col_length ;    /* length of pruned column */
+  Index score ;     /* current column score */
+  Index n_col2 ;    /* number of non-dense, non-empty columns */
+  Index n_row2 ;    /* number of non-dense, non-empty rows */
+  Index dense_row_count ; /* remove rows with more entries than this */
+  Index dense_col_count ; /* remove cols with more entries than this */
+  Index min_score ;   /* smallest column score */
+  Index max_deg ;   /* maximum row degree */
+  Index next_col ;    /* Used to add to degree list.*/
+
+
+  /* === Extract knobs ==================================================== */
+
+  dense_row_count = COLAMD_MAX (0, COLAMD_MIN (knobs [COLAMD_DENSE_ROW] * n_col, n_col)) ;
+  dense_col_count = COLAMD_MAX (0, COLAMD_MIN (knobs [COLAMD_DENSE_COL] * n_row, n_row)) ;
+  COLAMD_DEBUG1 (("colamd: densecount: %d %d\n", dense_row_count, dense_col_count)) ;
+  max_deg = 0 ;
+  n_col2 = n_col ;
+  n_row2 = n_row ;
+
+  /* === Kill empty columns =============================================== */
+
+  /* Put the empty columns at the end in their natural order, so that LU */
+  /* factorization can proceed as far as possible. */
+  for (c = n_col-1 ; c >= 0 ; c--)
+  {
+    deg = Col [c].length ;
+    if (deg == 0)
+    {
+      /* this is a empty column, kill and order it last */
+      Col [c].shared2.order = --n_col2 ;
+      KILL_PRINCIPAL_COL (c) ;
+    }
+  }
+  COLAMD_DEBUG1 (("colamd: null columns killed: %d\n", n_col - n_col2)) ;
+
+  /* === Kill dense columns =============================================== */
+
+  /* Put the dense columns at the end, in their natural order */
+  for (c = n_col-1 ; c >= 0 ; c--)
+  {
+    /* skip any dead columns */
+    if (COL_IS_DEAD (c))
+    {
+      continue ;
+    }
+    deg = Col [c].length ;
+    if (deg > dense_col_count)
+    {
+      /* this is a dense column, kill and order it last */
+      Col [c].shared2.order = --n_col2 ;
+      /* decrement the row degrees */
+      cp = &A [Col [c].start] ;
+      cp_end = cp + Col [c].length ;
+      while (cp < cp_end)
+      {
+	Row [*cp++].shared1.degree-- ;
+      }
+      KILL_PRINCIPAL_COL (c) ;
+    }
+  }
+  COLAMD_DEBUG1 (("colamd: Dense and null columns killed: %d\n", n_col - n_col2)) ;
+
+  /* === Kill dense and empty rows ======================================== */
+
+  for (r = 0 ; r < n_row ; r++)
+  {
+    deg = Row [r].shared1.degree ;
+    COLAMD_ASSERT (deg >= 0 && deg <= n_col) ;
+    if (deg > dense_row_count || deg == 0)
+    {
+      /* kill a dense or empty row */
+      KILL_ROW (r) ;
+      --n_row2 ;
+    }
+    else
+    {
+      /* keep track of max degree of remaining rows */
+      max_deg = COLAMD_MAX (max_deg, deg) ;
+    }
+  }
+  COLAMD_DEBUG1 (("colamd: Dense and null rows killed: %d\n", n_row - n_row2)) ;
+
+  /* === Compute initial column scores ==================================== */
+
+  /* At this point the row degrees are accurate.  They reflect the number */
+  /* of "live" (non-dense) columns in each row.  No empty rows exist. */
+  /* Some "live" columns may contain only dead rows, however.  These are */
+  /* pruned in the code below. */
+
+  /* now find the initial matlab score for each column */
+  for (c = n_col-1 ; c >= 0 ; c--)
+  {
+    /* skip dead column */
+    if (COL_IS_DEAD (c))
+    {
+      continue ;
+    }
+    score = 0 ;
+    cp = &A [Col [c].start] ;
+    new_cp = cp ;
+    cp_end = cp + Col [c].length ;
+    while (cp < cp_end)
+    {
+      /* get a row */
+      row = *cp++ ;
+      /* skip if dead */
+      if (ROW_IS_DEAD (row))
+      {
+	continue ;
+      }
+      /* compact the column */
+      *new_cp++ = row ;
+      /* add row's external degree */
+      score += Row [row].shared1.degree - 1 ;
+      /* guard against integer overflow */
+      score = COLAMD_MIN (score, n_col) ;
+    }
+    /* determine pruned column length */
+    col_length = (Index) (new_cp - &A [Col [c].start]) ;
+    if (col_length == 0)
+    {
+      /* a newly-made null column (all rows in this col are "dense" */
+      /* and have already been killed) */
+      COLAMD_DEBUG2 (("Newly null killed: %d\n", c)) ;
+      Col [c].shared2.order = --n_col2 ;
+      KILL_PRINCIPAL_COL (c) ;
+    }
+    else
+    {
+      /* set column length and set score */
+      COLAMD_ASSERT (score >= 0) ;
+      COLAMD_ASSERT (score <= n_col) ;
+      Col [c].length = col_length ;
+      Col [c].shared2.score = score ;
+    }
+  }
+  COLAMD_DEBUG1 (("colamd: Dense, null, and newly-null columns killed: %d\n",
+		  n_col-n_col2)) ;
+
+  /* At this point, all empty rows and columns are dead.  All live columns */
+  /* are "clean" (containing no dead rows) and simplicial (no supercolumns */
+  /* yet).  Rows may contain dead columns, but all live rows contain at */
+  /* least one live column. */
+
+  /* === Initialize degree lists ========================================== */
+
+
+  /* clear the hash buckets */
+  for (c = 0 ; c <= n_col ; c++)
+  {
+    head [c] = COLAMD_EMPTY ;
+  }
+  min_score = n_col ;
+  /* place in reverse order, so low column indices are at the front */
+  /* of the lists.  This is to encourage natural tie-breaking */
+  for (c = n_col-1 ; c >= 0 ; c--)
+  {
+    /* only add principal columns to degree lists */
+    if (COL_IS_ALIVE (c))
+    {
+      COLAMD_DEBUG4 (("place %d score %d minscore %d ncol %d\n",
+		      c, Col [c].shared2.score, min_score, n_col)) ;
+
+      /* === Add columns score to DList =============================== */
+
+      score = Col [c].shared2.score ;
+
+      COLAMD_ASSERT (min_score >= 0) ;
+      COLAMD_ASSERT (min_score <= n_col) ;
+      COLAMD_ASSERT (score >= 0) ;
+      COLAMD_ASSERT (score <= n_col) ;
+      COLAMD_ASSERT (head [score] >= COLAMD_EMPTY) ;
+
+      /* now add this column to dList at proper score location */
+      next_col = head [score] ;
+      Col [c].shared3.prev = COLAMD_EMPTY ;
+      Col [c].shared4.degree_next = next_col ;
+
+      /* if there already was a column with the same score, set its */
+      /* previous pointer to this new column */
+      if (next_col != COLAMD_EMPTY)
+      {
+	Col [next_col].shared3.prev = c ;
+      }
+      head [score] = c ;
+
+      /* see if this score is less than current min */
+      min_score = COLAMD_MIN (min_score, score) ;
+
+
+    }
+  }
+
+
+  /* === Return number of remaining columns, and max row degree =========== */
+
+  *p_n_col2 = n_col2 ;
+  *p_n_row2 = n_row2 ;
+  *p_max_deg = max_deg ;
+}
+
+
+/* ========================================================================== */
+/* === find_ordering ======================================================== */
+/* ========================================================================== */
+
+/*
+  Order the principal columns of the supercolumn form of the matrix
+  (no supercolumns on input).  Uses a minimum approximate column minimum
+  degree ordering method.  Not user-callable.
+*/
+template <typename Index>
+static Index find_ordering /* return the number of garbage collections */
+  (
+    /* === Parameters ======================================================= */
+
+    Index n_row,      /* number of rows of A */
+    Index n_col,      /* number of columns of A */
+    Index Alen,     /* size of A, 2*nnz + n_col or larger */
+    Colamd_Row<Index> Row [],    /* of size n_row+1 */
+    colamd_col<Index> Col [],    /* of size n_col+1 */
+    Index A [],     /* column form and row form of A */
+    Index head [],    /* of size n_col+1 */
+    Index n_col2,     /* Remaining columns to order */
+    Index max_deg,    /* Maximum row degree */
+    Index pfree     /* index of first free slot (2*nnz on entry) */
+    )
+{
+  /* === Local variables ================================================== */
+
+  Index k ;     /* current pivot ordering step */
+  Index pivot_col ;   /* current pivot column */
+  Index *cp ;     /* a column pointer */
+  Index *rp ;     /* a row pointer */
+  Index pivot_row ;   /* current pivot row */
+  Index *new_cp ;   /* modified column pointer */
+  Index *new_rp ;   /* modified row pointer */
+  Index pivot_row_start ; /* pointer to start of pivot row */
+  Index pivot_row_degree ;  /* number of columns in pivot row */
+  Index pivot_row_length ;  /* number of supercolumns in pivot row */
+  Index pivot_col_score ; /* score of pivot column */
+  Index needed_memory ;   /* free space needed for pivot row */
+  Index *cp_end ;   /* pointer to the end of a column */
+  Index *rp_end ;   /* pointer to the end of a row */
+  Index row ;     /* a row index */
+  Index col ;     /* a column index */
+  Index max_score ;   /* maximum possible score */
+  Index cur_score ;   /* score of current column */
+  unsigned int hash ;   /* hash value for supernode detection */
+  Index head_column ;   /* head of hash bucket */
+  Index first_col ;   /* first column in hash bucket */
+  Index tag_mark ;    /* marker value for mark array */
+  Index row_mark ;    /* Row [row].shared2.mark */
+  Index set_difference ;  /* set difference size of row with pivot row */
+  Index min_score ;   /* smallest column score */
+  Index col_thickness ;   /* "thickness" (no. of columns in a supercol) */
+  Index max_mark ;    /* maximum value of tag_mark */
+  Index pivot_col_thickness ; /* number of columns represented by pivot col */
+  Index prev_col ;    /* Used by Dlist operations. */
+  Index next_col ;    /* Used by Dlist operations. */
+  Index ngarbage ;    /* number of garbage collections performed */
+
+
+  /* === Initialization and clear mark ==================================== */
+
+  max_mark = INT_MAX - n_col ;  /* INT_MAX defined in <limits.h> */
+  tag_mark = Eigen::internal::clear_mark (n_row, Row) ;
+  min_score = 0 ;
+  ngarbage = 0 ;
+  COLAMD_DEBUG1 (("colamd: Ordering, n_col2=%d\n", n_col2)) ;
+
+  /* === Order the columns ================================================ */
+
+  for (k = 0 ; k < n_col2 ; /* 'k' is incremented below */)
+  {
+
+    /* === Select pivot column, and order it ============================ */
+
+    /* make sure degree list isn't empty */
+    COLAMD_ASSERT (min_score >= 0) ;
+    COLAMD_ASSERT (min_score <= n_col) ;
+    COLAMD_ASSERT (head [min_score] >= COLAMD_EMPTY) ;
+
+    /* get pivot column from head of minimum degree list */
+    while (head [min_score] == COLAMD_EMPTY && min_score < n_col)
+    {
+      min_score++ ;
+    }
+    pivot_col = head [min_score] ;
+    COLAMD_ASSERT (pivot_col >= 0 && pivot_col <= n_col) ;
+    next_col = Col [pivot_col].shared4.degree_next ;
+    head [min_score] = next_col ;
+    if (next_col != COLAMD_EMPTY)
+    {
+      Col [next_col].shared3.prev = COLAMD_EMPTY ;
+    }
+
+    COLAMD_ASSERT (COL_IS_ALIVE (pivot_col)) ;
+    COLAMD_DEBUG3 (("Pivot col: %d\n", pivot_col)) ;
+
+    /* remember score for defrag check */
+    pivot_col_score = Col [pivot_col].shared2.score ;
+
+    /* the pivot column is the kth column in the pivot order */
+    Col [pivot_col].shared2.order = k ;
+
+    /* increment order count by column thickness */
+    pivot_col_thickness = Col [pivot_col].shared1.thickness ;
+    k += pivot_col_thickness ;
+    COLAMD_ASSERT (pivot_col_thickness > 0) ;
+
+    /* === Garbage_collection, if necessary ============================= */
+
+    needed_memory = COLAMD_MIN (pivot_col_score, n_col - k) ;
+    if (pfree + needed_memory >= Alen)
+    {
+      pfree = Eigen::internal::garbage_collection (n_row, n_col, Row, Col, A, &A [pfree]) ;
+      ngarbage++ ;
+      /* after garbage collection we will have enough */
+      COLAMD_ASSERT (pfree + needed_memory < Alen) ;
+      /* garbage collection has wiped out the Row[].shared2.mark array */
+      tag_mark = Eigen::internal::clear_mark (n_row, Row) ;
+
+    }
+
+    /* === Compute pivot row pattern ==================================== */
+
+    /* get starting location for this new merged row */
+    pivot_row_start = pfree ;
+
+    /* initialize new row counts to zero */
+    pivot_row_degree = 0 ;
+
+    /* tag pivot column as having been visited so it isn't included */
+    /* in merged pivot row */
+    Col [pivot_col].shared1.thickness = -pivot_col_thickness ;
+
+    /* pivot row is the union of all rows in the pivot column pattern */
+    cp = &A [Col [pivot_col].start] ;
+    cp_end = cp + Col [pivot_col].length ;
+    while (cp < cp_end)
+    {
+      /* get a row */
+      row = *cp++ ;
+      COLAMD_DEBUG4 (("Pivot col pattern %d %d\n", ROW_IS_ALIVE (row), row)) ;
+      /* skip if row is dead */
+      if (ROW_IS_DEAD (row))
+      {
+	continue ;
+      }
+      rp = &A [Row [row].start] ;
+      rp_end = rp + Row [row].length ;
+      while (rp < rp_end)
+      {
+	/* get a column */
+	col = *rp++ ;
+	/* add the column, if alive and untagged */
+	col_thickness = Col [col].shared1.thickness ;
+	if (col_thickness > 0 && COL_IS_ALIVE (col))
+	{
+	  /* tag column in pivot row */
+	  Col [col].shared1.thickness = -col_thickness ;
+	  COLAMD_ASSERT (pfree < Alen) ;
+	  /* place column in pivot row */
+	  A [pfree++] = col ;
+	  pivot_row_degree += col_thickness ;
+	}
+      }
+    }
+
+    /* clear tag on pivot column */
+    Col [pivot_col].shared1.thickness = pivot_col_thickness ;
+    max_deg = COLAMD_MAX (max_deg, pivot_row_degree) ;
+
+
+    /* === Kill all rows used to construct pivot row ==================== */
+
+    /* also kill pivot row, temporarily */
+    cp = &A [Col [pivot_col].start] ;
+    cp_end = cp + Col [pivot_col].length ;
+    while (cp < cp_end)
+    {
+      /* may be killing an already dead row */
+      row = *cp++ ;
+      COLAMD_DEBUG3 (("Kill row in pivot col: %d\n", row)) ;
+      KILL_ROW (row) ;
+    }
+
+    /* === Select a row index to use as the new pivot row =============== */
+
+    pivot_row_length = pfree - pivot_row_start ;
+    if (pivot_row_length > 0)
+    {
+      /* pick the "pivot" row arbitrarily (first row in col) */
+      pivot_row = A [Col [pivot_col].start] ;
+      COLAMD_DEBUG3 (("Pivotal row is %d\n", pivot_row)) ;
+    }
+    else
+    {
+      /* there is no pivot row, since it is of zero length */
+      pivot_row = COLAMD_EMPTY ;
+      COLAMD_ASSERT (pivot_row_length == 0) ;
+    }
+    COLAMD_ASSERT (Col [pivot_col].length > 0 || pivot_row_length == 0) ;
+
+    /* === Approximate degree computation =============================== */
+
+    /* Here begins the computation of the approximate degree.  The column */
+    /* score is the sum of the pivot row "length", plus the size of the */
+    /* set differences of each row in the column minus the pattern of the */
+    /* pivot row itself.  The column ("thickness") itself is also */
+    /* excluded from the column score (we thus use an approximate */
+    /* external degree). */
+
+    /* The time taken by the following code (compute set differences, and */
+    /* add them up) is proportional to the size of the data structure */
+    /* being scanned - that is, the sum of the sizes of each column in */
+    /* the pivot row.  Thus, the amortized time to compute a column score */
+    /* is proportional to the size of that column (where size, in this */
+    /* context, is the column "length", or the number of row indices */
+    /* in that column).  The number of row indices in a column is */
+    /* monotonically non-decreasing, from the length of the original */
+    /* column on input to colamd. */
+
+    /* === Compute set differences ====================================== */
+
+    COLAMD_DEBUG3 (("** Computing set differences phase. **\n")) ;
+
+    /* pivot row is currently dead - it will be revived later. */
+
+    COLAMD_DEBUG3 (("Pivot row: ")) ;
+    /* for each column in pivot row */
+    rp = &A [pivot_row_start] ;
+    rp_end = rp + pivot_row_length ;
+    while (rp < rp_end)
+    {
+      col = *rp++ ;
+      COLAMD_ASSERT (COL_IS_ALIVE (col) && col != pivot_col) ;
+      COLAMD_DEBUG3 (("Col: %d\n", col)) ;
+
+      /* clear tags used to construct pivot row pattern */
+      col_thickness = -Col [col].shared1.thickness ;
+      COLAMD_ASSERT (col_thickness > 0) ;
+      Col [col].shared1.thickness = col_thickness ;
+
+      /* === Remove column from degree list =========================== */
+
+      cur_score = Col [col].shared2.score ;
+      prev_col = Col [col].shared3.prev ;
+      next_col = Col [col].shared4.degree_next ;
+      COLAMD_ASSERT (cur_score >= 0) ;
+      COLAMD_ASSERT (cur_score <= n_col) ;
+      COLAMD_ASSERT (cur_score >= COLAMD_EMPTY) ;
+      if (prev_col == COLAMD_EMPTY)
+      {
+	head [cur_score] = next_col ;
+      }
+      else
+      {
+	Col [prev_col].shared4.degree_next = next_col ;
+      }
+      if (next_col != COLAMD_EMPTY)
+      {
+	Col [next_col].shared3.prev = prev_col ;
+      }
+
+      /* === Scan the column ========================================== */
+
+      cp = &A [Col [col].start] ;
+      cp_end = cp + Col [col].length ;
+      while (cp < cp_end)
+      {
+	/* get a row */
+	row = *cp++ ;
+	row_mark = Row [row].shared2.mark ;
+	/* skip if dead */
+	if (ROW_IS_MARKED_DEAD (row_mark))
+	{
+	  continue ;
+	}
+	COLAMD_ASSERT (row != pivot_row) ;
+	set_difference = row_mark - tag_mark ;
+	/* check if the row has been seen yet */
+	if (set_difference < 0)
+	{
+	  COLAMD_ASSERT (Row [row].shared1.degree <= max_deg) ;
+	  set_difference = Row [row].shared1.degree ;
+	}
+	/* subtract column thickness from this row's set difference */
+	set_difference -= col_thickness ;
+	COLAMD_ASSERT (set_difference >= 0) ;
+	/* absorb this row if the set difference becomes zero */
+	if (set_difference == 0)
+	{
+	  COLAMD_DEBUG3 (("aggressive absorption. Row: %d\n", row)) ;
+	  KILL_ROW (row) ;
+	}
+	else
+	{
+	  /* save the new mark */
+	  Row [row].shared2.mark = set_difference + tag_mark ;
+	}
+      }
+    }
+
+
+    /* === Add up set differences for each column ======================= */
+
+    COLAMD_DEBUG3 (("** Adding set differences phase. **\n")) ;
+
+    /* for each column in pivot row */
+    rp = &A [pivot_row_start] ;
+    rp_end = rp + pivot_row_length ;
+    while (rp < rp_end)
+    {
+      /* get a column */
+      col = *rp++ ;
+      COLAMD_ASSERT (COL_IS_ALIVE (col) && col != pivot_col) ;
+      hash = 0 ;
+      cur_score = 0 ;
+      cp = &A [Col [col].start] ;
+      /* compact the column */
+      new_cp = cp ;
+      cp_end = cp + Col [col].length ;
+
+      COLAMD_DEBUG4 (("Adding set diffs for Col: %d.\n", col)) ;
+
+      while (cp < cp_end)
+      {
+	/* get a row */
+	row = *cp++ ;
+	COLAMD_ASSERT(row >= 0 && row < n_row) ;
+	row_mark = Row [row].shared2.mark ;
+	/* skip if dead */
+	if (ROW_IS_MARKED_DEAD (row_mark))
+	{
+	  continue ;
+	}
+	COLAMD_ASSERT (row_mark > tag_mark) ;
+	/* compact the column */
+	*new_cp++ = row ;
+	/* compute hash function */
+	hash += row ;
+	/* add set difference */
+	cur_score += row_mark - tag_mark ;
+	/* integer overflow... */
+	cur_score = COLAMD_MIN (cur_score, n_col) ;
+      }
+
+      /* recompute the column's length */
+      Col [col].length = (Index) (new_cp - &A [Col [col].start]) ;
+
+      /* === Further mass elimination ================================= */
+
+      if (Col [col].length == 0)
+      {
+	COLAMD_DEBUG4 (("further mass elimination. Col: %d\n", col)) ;
+	/* nothing left but the pivot row in this column */
+	KILL_PRINCIPAL_COL (col) ;
+	pivot_row_degree -= Col [col].shared1.thickness ;
+	COLAMD_ASSERT (pivot_row_degree >= 0) ;
+	/* order it */
+	Col [col].shared2.order = k ;
+	/* increment order count by column thickness */
+	k += Col [col].shared1.thickness ;
+      }
+      else
+      {
+	/* === Prepare for supercolumn detection ==================== */
+
+	COLAMD_DEBUG4 (("Preparing supercol detection for Col: %d.\n", col)) ;
+
+	/* save score so far */
+	Col [col].shared2.score = cur_score ;
+
+	/* add column to hash table, for supercolumn detection */
+	hash %= n_col + 1 ;
+
+	COLAMD_DEBUG4 ((" Hash = %d, n_col = %d.\n", hash, n_col)) ;
+	COLAMD_ASSERT (hash <= n_col) ;
+
+	head_column = head [hash] ;
+	if (head_column > COLAMD_EMPTY)
+	{
+	  /* degree list "hash" is non-empty, use prev (shared3) of */
+	  /* first column in degree list as head of hash bucket */
+	  first_col = Col [head_column].shared3.headhash ;
+	  Col [head_column].shared3.headhash = col ;
+	}
+	else
+	{
+	  /* degree list "hash" is empty, use head as hash bucket */
+	  first_col = - (head_column + 2) ;
+	  head [hash] = - (col + 2) ;
+	}
+	Col [col].shared4.hash_next = first_col ;
+
+	/* save hash function in Col [col].shared3.hash */
+	Col [col].shared3.hash = (Index) hash ;
+	COLAMD_ASSERT (COL_IS_ALIVE (col)) ;
+      }
+    }
+
+    /* The approximate external column degree is now computed.  */
+
+    /* === Supercolumn detection ======================================== */
+
+    COLAMD_DEBUG3 (("** Supercolumn detection phase. **\n")) ;
+
+    Eigen::internal::detect_super_cols (Col, A, head, pivot_row_start, pivot_row_length) ;
+
+    /* === Kill the pivotal column ====================================== */
+
+    KILL_PRINCIPAL_COL (pivot_col) ;
+
+    /* === Clear mark =================================================== */
+
+    tag_mark += (max_deg + 1) ;
+    if (tag_mark >= max_mark)
+    {
+      COLAMD_DEBUG2 (("clearing tag_mark\n")) ;
+      tag_mark = Eigen::internal::clear_mark (n_row, Row) ;
+    }
+
+    /* === Finalize the new pivot row, and column scores ================ */
+
+    COLAMD_DEBUG3 (("** Finalize scores phase. **\n")) ;
+
+    /* for each column in pivot row */
+    rp = &A [pivot_row_start] ;
+    /* compact the pivot row */
+    new_rp = rp ;
+    rp_end = rp + pivot_row_length ;
+    while (rp < rp_end)
+    {
+      col = *rp++ ;
+      /* skip dead columns */
+      if (COL_IS_DEAD (col))
+      {
+	continue ;
+      }
+      *new_rp++ = col ;
+      /* add new pivot row to column */
+      A [Col [col].start + (Col [col].length++)] = pivot_row ;
+
+      /* retrieve score so far and add on pivot row's degree. */
+      /* (we wait until here for this in case the pivot */
+      /* row's degree was reduced due to mass elimination). */
+      cur_score = Col [col].shared2.score + pivot_row_degree ;
+
+      /* calculate the max possible score as the number of */
+      /* external columns minus the 'k' value minus the */
+      /* columns thickness */
+      max_score = n_col - k - Col [col].shared1.thickness ;
+
+      /* make the score the external degree of the union-of-rows */
+      cur_score -= Col [col].shared1.thickness ;
+
+      /* make sure score is less or equal than the max score */
+      cur_score = COLAMD_MIN (cur_score, max_score) ;
+      COLAMD_ASSERT (cur_score >= 0) ;
+
+      /* store updated score */
+      Col [col].shared2.score = cur_score ;
+
+      /* === Place column back in degree list ========================= */
+
+      COLAMD_ASSERT (min_score >= 0) ;
+      COLAMD_ASSERT (min_score <= n_col) ;
+      COLAMD_ASSERT (cur_score >= 0) ;
+      COLAMD_ASSERT (cur_score <= n_col) ;
+      COLAMD_ASSERT (head [cur_score] >= COLAMD_EMPTY) ;
+      next_col = head [cur_score] ;
+      Col [col].shared4.degree_next = next_col ;
+      Col [col].shared3.prev = COLAMD_EMPTY ;
+      if (next_col != COLAMD_EMPTY)
+      {
+	Col [next_col].shared3.prev = col ;
+      }
+      head [cur_score] = col ;
+
+      /* see if this score is less than current min */
+      min_score = COLAMD_MIN (min_score, cur_score) ;
+
+    }
+
+    /* === Resurrect the new pivot row ================================== */
+
+    if (pivot_row_degree > 0)
+    {
+      /* update pivot row length to reflect any cols that were killed */
+      /* during super-col detection and mass elimination */
+      Row [pivot_row].start  = pivot_row_start ;
+      Row [pivot_row].length = (Index) (new_rp - &A[pivot_row_start]) ;
+      Row [pivot_row].shared1.degree = pivot_row_degree ;
+      Row [pivot_row].shared2.mark = 0 ;
+      /* pivot row is no longer dead */
+    }
+  }
+
+  /* === All principal columns have now been ordered ====================== */
+
+  return (ngarbage) ;
+}
+
+
+/* ========================================================================== */
+/* === order_children ======================================================= */
+/* ========================================================================== */
+
+/*
+  The find_ordering routine has ordered all of the principal columns (the
+  representatives of the supercolumns).  The non-principal columns have not
+  yet been ordered.  This routine orders those columns by walking up the
+  parent tree (a column is a child of the column which absorbed it).  The
+  final permutation vector is then placed in p [0 ... n_col-1], with p [0]
+  being the first column, and p [n_col-1] being the last.  It doesn't look
+  like it at first glance, but be assured that this routine takes time linear
+  in the number of columns.  Although not immediately obvious, the time
+  taken by this routine is O (n_col), that is, linear in the number of
+  columns.  Not user-callable.
+*/
+template <typename Index>
+static inline  void order_children
+(
+  /* === Parameters ======================================================= */
+
+  Index n_col,      /* number of columns of A */
+  colamd_col<Index> Col [],    /* of size n_col+1 */
+  Index p []      /* p [0 ... n_col-1] is the column permutation*/
+  )
+{
+  /* === Local variables ================================================== */
+
+  Index i ;     /* loop counter for all columns */
+  Index c ;     /* column index */
+  Index parent ;    /* index of column's parent */
+  Index order ;     /* column's order */
+
+  /* === Order each non-principal column ================================== */
+
+  for (i = 0 ; i < n_col ; i++)
+  {
+    /* find an un-ordered non-principal column */
+    COLAMD_ASSERT (COL_IS_DEAD (i)) ;
+    if (!COL_IS_DEAD_PRINCIPAL (i) && Col [i].shared2.order == COLAMD_EMPTY)
+    {
+      parent = i ;
+      /* once found, find its principal parent */
+      do
+      {
+	parent = Col [parent].shared1.parent ;
+      } while (!COL_IS_DEAD_PRINCIPAL (parent)) ;
+
+      /* now, order all un-ordered non-principal columns along path */
+      /* to this parent.  collapse tree at the same time */
+      c = i ;
+      /* get order of parent */
+      order = Col [parent].shared2.order ;
+
+      do
+      {
+	COLAMD_ASSERT (Col [c].shared2.order == COLAMD_EMPTY) ;
+
+	/* order this column */
+	Col [c].shared2.order = order++ ;
+	/* collaps tree */
+	Col [c].shared1.parent = parent ;
+
+	/* get immediate parent of this column */
+	c = Col [c].shared1.parent ;
+
+	/* continue until we hit an ordered column.  There are */
+	/* guarranteed not to be anymore unordered columns */
+	/* above an ordered column */
+      } while (Col [c].shared2.order == COLAMD_EMPTY) ;
+
+      /* re-order the super_col parent to largest order for this group */
+      Col [parent].shared2.order = order ;
+    }
+  }
+
+  /* === Generate the permutation ========================================= */
+
+  for (c = 0 ; c < n_col ; c++)
+  {
+    p [Col [c].shared2.order] = c ;
+  }
+}
+
+
+/* ========================================================================== */
+/* === detect_super_cols ==================================================== */
+/* ========================================================================== */
+
+/*
+  Detects supercolumns by finding matches between columns in the hash buckets.
+  Check amongst columns in the set A [row_start ... row_start + row_length-1].
+  The columns under consideration are currently *not* in the degree lists,
+  and have already been placed in the hash buckets.
+
+  The hash bucket for columns whose hash function is equal to h is stored
+  as follows:
+
+  if head [h] is >= 0, then head [h] contains a degree list, so:
+
+  head [h] is the first column in degree bucket h.
+  Col [head [h]].headhash gives the first column in hash bucket h.
+
+  otherwise, the degree list is empty, and:
+
+  -(head [h] + 2) is the first column in hash bucket h.
+
+  For a column c in a hash bucket, Col [c].shared3.prev is NOT a "previous
+  column" pointer.  Col [c].shared3.hash is used instead as the hash number
+  for that column.  The value of Col [c].shared4.hash_next is the next column
+  in the same hash bucket.
+
+  Assuming no, or "few" hash collisions, the time taken by this routine is
+  linear in the sum of the sizes (lengths) of each column whose score has
+  just been computed in the approximate degree computation.
+  Not user-callable.
+*/
+template <typename Index>
+static void detect_super_cols
+(
+  /* === Parameters ======================================================= */
+  
+  colamd_col<Index> Col [],    /* of size n_col+1 */
+  Index A [],     /* row indices of A */
+  Index head [],    /* head of degree lists and hash buckets */
+  Index row_start,    /* pointer to set of columns to check */
+  Index row_length    /* number of columns to check */
+)
+{
+  /* === Local variables ================================================== */
+
+  Index hash ;      /* hash value for a column */
+  Index *rp ;     /* pointer to a row */
+  Index c ;     /* a column index */
+  Index super_c ;   /* column index of the column to absorb into */
+  Index *cp1 ;      /* column pointer for column super_c */
+  Index *cp2 ;      /* column pointer for column c */
+  Index length ;    /* length of column super_c */
+  Index prev_c ;    /* column preceding c in hash bucket */
+  Index i ;     /* loop counter */
+  Index *rp_end ;   /* pointer to the end of the row */
+  Index col ;     /* a column index in the row to check */
+  Index head_column ;   /* first column in hash bucket or degree list */
+  Index first_col ;   /* first column in hash bucket */
+
+  /* === Consider each column in the row ================================== */
+
+  rp = &A [row_start] ;
+  rp_end = rp + row_length ;
+  while (rp < rp_end)
+  {
+    col = *rp++ ;
+    if (COL_IS_DEAD (col))
+    {
+      continue ;
+    }
+
+    /* get hash number for this column */
+    hash = Col [col].shared3.hash ;
+    COLAMD_ASSERT (hash <= n_col) ;
+
+    /* === Get the first column in this hash bucket ===================== */
+
+    head_column = head [hash] ;
+    if (head_column > COLAMD_EMPTY)
+    {
+      first_col = Col [head_column].shared3.headhash ;
+    }
+    else
+    {
+      first_col = - (head_column + 2) ;
+    }
+
+    /* === Consider each column in the hash bucket ====================== */
+
+    for (super_c = first_col ; super_c != COLAMD_EMPTY ;
+	 super_c = Col [super_c].shared4.hash_next)
+    {
+      COLAMD_ASSERT (COL_IS_ALIVE (super_c)) ;
+      COLAMD_ASSERT (Col [super_c].shared3.hash == hash) ;
+      length = Col [super_c].length ;
+
+      /* prev_c is the column preceding column c in the hash bucket */
+      prev_c = super_c ;
+
+      /* === Compare super_c with all columns after it ================ */
+
+      for (c = Col [super_c].shared4.hash_next ;
+	   c != COLAMD_EMPTY ; c = Col [c].shared4.hash_next)
+      {
+	COLAMD_ASSERT (c != super_c) ;
+	COLAMD_ASSERT (COL_IS_ALIVE (c)) ;
+	COLAMD_ASSERT (Col [c].shared3.hash == hash) ;
+
+	/* not identical if lengths or scores are different */
+	if (Col [c].length != length ||
+	    Col [c].shared2.score != Col [super_c].shared2.score)
+	{
+	  prev_c = c ;
+	  continue ;
+	}
+
+	/* compare the two columns */
+	cp1 = &A [Col [super_c].start] ;
+	cp2 = &A [Col [c].start] ;
+
+	for (i = 0 ; i < length ; i++)
+	{
+	  /* the columns are "clean" (no dead rows) */
+	  COLAMD_ASSERT (ROW_IS_ALIVE (*cp1))  ;
+	  COLAMD_ASSERT (ROW_IS_ALIVE (*cp2))  ;
+	  /* row indices will same order for both supercols, */
+	  /* no gather scatter nessasary */
+	  if (*cp1++ != *cp2++)
+	  {
+	    break ;
+	  }
+	}
+
+	/* the two columns are different if the for-loop "broke" */
+	if (i != length)
+	{
+	  prev_c = c ;
+	  continue ;
+	}
+
+	/* === Got it!  two columns are identical =================== */
+
+	COLAMD_ASSERT (Col [c].shared2.score == Col [super_c].shared2.score) ;
+
+	Col [super_c].shared1.thickness += Col [c].shared1.thickness ;
+	Col [c].shared1.parent = super_c ;
+	KILL_NON_PRINCIPAL_COL (c) ;
+	/* order c later, in order_children() */
+	Col [c].shared2.order = COLAMD_EMPTY ;
+	/* remove c from hash bucket */
+	Col [prev_c].shared4.hash_next = Col [c].shared4.hash_next ;
+      }
+    }
+
+    /* === Empty this hash bucket ======================================= */
+
+    if (head_column > COLAMD_EMPTY)
+    {
+      /* corresponding degree list "hash" is not empty */
+      Col [head_column].shared3.headhash = COLAMD_EMPTY ;
+    }
+    else
+    {
+      /* corresponding degree list "hash" is empty */
+      head [hash] = COLAMD_EMPTY ;
+    }
+  }
+}
+
+
+/* ========================================================================== */
+/* === garbage_collection =================================================== */
+/* ========================================================================== */
+
+/*
+  Defragments and compacts columns and rows in the workspace A.  Used when
+  all avaliable memory has been used while performing row merging.  Returns
+  the index of the first free position in A, after garbage collection.  The
+  time taken by this routine is linear is the size of the array A, which is
+  itself linear in the number of nonzeros in the input matrix.
+  Not user-callable.
+*/
+template <typename Index>
+static Index garbage_collection  /* returns the new value of pfree */
+  (
+    /* === Parameters ======================================================= */
+    
+    Index n_row,      /* number of rows */
+    Index n_col,      /* number of columns */
+    Colamd_Row<Index> Row [],    /* row info */
+    colamd_col<Index> Col [],    /* column info */
+    Index A [],     /* A [0 ... Alen-1] holds the matrix */
+    Index *pfree      /* &A [0] ... pfree is in use */
+    )
+{
+  /* === Local variables ================================================== */
+
+  Index *psrc ;     /* source pointer */
+  Index *pdest ;    /* destination pointer */
+  Index j ;     /* counter */
+  Index r ;     /* a row index */
+  Index c ;     /* a column index */
+  Index length ;    /* length of a row or column */
+
+  /* === Defragment the columns =========================================== */
+
+  pdest = &A[0] ;
+  for (c = 0 ; c < n_col ; c++)
+  {
+    if (COL_IS_ALIVE (c))
+    {
+      psrc = &A [Col [c].start] ;
+
+      /* move and compact the column */
+      COLAMD_ASSERT (pdest <= psrc) ;
+      Col [c].start = (Index) (pdest - &A [0]) ;
+      length = Col [c].length ;
+      for (j = 0 ; j < length ; j++)
+      {
+	r = *psrc++ ;
+	if (ROW_IS_ALIVE (r))
+	{
+	  *pdest++ = r ;
+	}
+      }
+      Col [c].length = (Index) (pdest - &A [Col [c].start]) ;
+    }
+  }
+
+  /* === Prepare to defragment the rows =================================== */
+
+  for (r = 0 ; r < n_row ; r++)
+  {
+    if (ROW_IS_ALIVE (r))
+    {
+      if (Row [r].length == 0)
+      {
+	/* this row is of zero length.  cannot compact it, so kill it */
+	COLAMD_DEBUG3 (("Defrag row kill\n")) ;
+	KILL_ROW (r) ;
+      }
+      else
+      {
+	/* save first column index in Row [r].shared2.first_column */
+	psrc = &A [Row [r].start] ;
+	Row [r].shared2.first_column = *psrc ;
+	COLAMD_ASSERT (ROW_IS_ALIVE (r)) ;
+	/* flag the start of the row with the one's complement of row */
+	*psrc = ONES_COMPLEMENT (r) ;
+
+      }
+    }
+  }
+
+  /* === Defragment the rows ============================================== */
+
+  psrc = pdest ;
+  while (psrc < pfree)
+  {
+    /* find a negative number ... the start of a row */
+    if (*psrc++ < 0)
+    {
+      psrc-- ;
+      /* get the row index */
+      r = ONES_COMPLEMENT (*psrc) ;
+      COLAMD_ASSERT (r >= 0 && r < n_row) ;
+      /* restore first column index */
+      *psrc = Row [r].shared2.first_column ;
+      COLAMD_ASSERT (ROW_IS_ALIVE (r)) ;
+
+      /* move and compact the row */
+      COLAMD_ASSERT (pdest <= psrc) ;
+      Row [r].start = (Index) (pdest - &A [0]) ;
+      length = Row [r].length ;
+      for (j = 0 ; j < length ; j++)
+      {
+	c = *psrc++ ;
+	if (COL_IS_ALIVE (c))
+	{
+	  *pdest++ = c ;
+	}
+      }
+      Row [r].length = (Index) (pdest - &A [Row [r].start]) ;
+
+    }
+  }
+  /* ensure we found all the rows */
+  COLAMD_ASSERT (debug_rows == 0) ;
+
+  /* === Return the new value of pfree ==================================== */
+
+  return ((Index) (pdest - &A [0])) ;
+}
+
+
+/* ========================================================================== */
+/* === clear_mark =========================================================== */
+/* ========================================================================== */
+
+/*
+  Clears the Row [].shared2.mark array, and returns the new tag_mark.
+  Return value is the new tag_mark.  Not user-callable.
+*/
+template <typename Index>
+static inline  Index clear_mark  /* return the new value for tag_mark */
+  (
+      /* === Parameters ======================================================= */
+
+    Index n_row,    /* number of rows in A */
+    Colamd_Row<Index> Row [] /* Row [0 ... n_row-1].shared2.mark is set to zero */
+    )
+{
+  /* === Local variables ================================================== */
+
+  Index r ;
+
+  for (r = 0 ; r < n_row ; r++)
+  {
+    if (ROW_IS_ALIVE (r))
+    {
+      Row [r].shared2.mark = 0 ;
+    }
+  }
+  return (1) ;
+}
+
+
+} // namespace internal 
+#endif
diff --git a/ThirdParty/eigen3/Eigen/src/OrderingMethods/Ordering.h b/ThirdParty/eigen3/Eigen/src/OrderingMethods/Ordering.h
new file mode 100644
index 00000000000..b4da6531a1d
--- /dev/null
+++ b/ThirdParty/eigen3/Eigen/src/OrderingMethods/Ordering.h
@@ -0,0 +1,150 @@
+ 
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2012  Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_ORDERING_H
+#define EIGEN_ORDERING_H
+
+namespace Eigen {
+  
+#include "Eigen_Colamd.h"
+
+namespace internal {
+    
+/** \internal
+  * \ingroup OrderingMethods_Module
+  * \returns the symmetric pattern A^T+A from the input matrix A. 
+  * FIXME: The values should not be considered here
+  */
+template<typename MatrixType> 
+void ordering_helper_at_plus_a(const MatrixType& mat, MatrixType& symmat)
+{
+  MatrixType C;
+  C = mat.transpose(); // NOTE: Could be  costly
+  for (int i = 0; i < C.rows(); i++) 
+  {
+      for (typename MatrixType::InnerIterator it(C, i); it; ++it)
+        it.valueRef() = 0.0;
+  }
+  symmat = C + mat; 
+}
+    
+}
+
+#ifndef EIGEN_MPL2_ONLY
+
+/** \ingroup OrderingMethods_Module
+  * \class AMDOrdering
+  *
+  * Functor computing the \em approximate \em minimum \em degree ordering
+  * If the matrix is not structurally symmetric, an ordering of A^T+A is computed
+  * \tparam  Index The type of indices of the matrix 
+  * \sa COLAMDOrdering
+  */
+template <typename Index>
+class AMDOrdering
+{
+  public:
+    typedef PermutationMatrix<Dynamic, Dynamic, Index> PermutationType;
+    
+    /** Compute the permutation vector from a sparse matrix
+     * This routine is much faster if the input matrix is column-major     
+     */
+    template <typename MatrixType>
+    void operator()(const MatrixType& mat, PermutationType& perm)
+    {
+      // Compute the symmetric pattern
+      SparseMatrix<typename MatrixType::Scalar, ColMajor, Index> symm;
+      internal::ordering_helper_at_plus_a(mat,symm); 
+    
+      // Call the AMD routine 
+      //m_mat.prune(keep_diag());
+      internal::minimum_degree_ordering(symm, perm);
+    }
+    
+    /** Compute the permutation with a selfadjoint matrix */
+    template <typename SrcType, unsigned int SrcUpLo> 
+    void operator()(const SparseSelfAdjointView<SrcType, SrcUpLo>& mat, PermutationType& perm)
+    { 
+      SparseMatrix<typename SrcType::Scalar, ColMajor, Index> C; C = mat;
+      
+      // Call the AMD routine 
+      // m_mat.prune(keep_diag()); //Remove the diagonal elements 
+      internal::minimum_degree_ordering(C, perm);
+    }
+};
+
+#endif // EIGEN_MPL2_ONLY
+
+/** \ingroup OrderingMethods_Module
+  * \class NaturalOrdering
+  *
+  * Functor computing the natural ordering (identity)
+  * 
+  * \note Returns an empty permutation matrix
+  * \tparam  Index The type of indices of the matrix 
+  */
+template <typename Index>
+class NaturalOrdering
+{
+  public:
+    typedef PermutationMatrix<Dynamic, Dynamic, Index> PermutationType;
+    
+    /** Compute the permutation vector from a column-major sparse matrix */
+    template <typename MatrixType>
+    void operator()(const MatrixType& /*mat*/, PermutationType& perm)
+    {
+      perm.resize(0); 
+    }
+    
+};
+
+/** \ingroup OrderingMethods_Module
+  * \class COLAMDOrdering
+  *
+  * Functor computing the \em column \em approximate \em minimum \em degree ordering 
+  * The matrix should be in column-major format
+  */
+template<typename Index>
+class COLAMDOrdering
+{
+  public:
+    typedef PermutationMatrix<Dynamic, Dynamic, Index> PermutationType; 
+    typedef Matrix<Index, Dynamic, 1> IndexVector;
+    
+    /** Compute the permutation vector form a sparse matrix */
+    template <typename MatrixType>
+    void operator() (const MatrixType& mat, PermutationType& perm)
+    {
+      Index m = mat.rows();
+      Index n = mat.cols();
+      Index nnz = mat.nonZeros();
+      // Get the recommended value of Alen to be used by colamd
+      Index Alen = internal::colamd_recommended(nnz, m, n); 
+      // Set the default parameters
+      double knobs [COLAMD_KNOBS]; 
+      Index stats [COLAMD_STATS];
+      internal::colamd_set_defaults(knobs);
+      
+      Index info;
+      IndexVector p(n+1), A(Alen); 
+      for(Index i=0; i <= n; i++)   p(i) = mat.outerIndexPtr()[i];
+      for(Index i=0; i < nnz; i++)  A(i) = mat.innerIndexPtr()[i];
+      // Call Colamd routine to compute the ordering 
+      info = internal::colamd(m, n, Alen, A.data(), p.data(), knobs, stats); 
+      eigen_assert( info && "COLAMD failed " );
+      
+      perm.resize(n);
+      for (Index i = 0; i < n; i++) perm.indices()(p(i)) = i;
+    }
+};
+
+} // end namespace Eigen
+
+#endif
diff --git a/ThirdParty/eigen3/Eigen/src/PaStiXSupport/PaStiXSupport.h b/ThirdParty/eigen3/Eigen/src/PaStiXSupport/PaStiXSupport.h
index 82e137c645a..a955287d1c9 100644
--- a/ThirdParty/eigen3/Eigen/src/PaStiXSupport/PaStiXSupport.h
+++ b/ThirdParty/eigen3/Eigen/src/PaStiXSupport/PaStiXSupport.h
@@ -157,27 +157,6 @@ class PastixBase : internal::noncopyable
     template<typename Rhs,typename Dest>
     bool _solve (const MatrixBase<Rhs> &b, MatrixBase<Dest> &x) const;
     
-    /** \internal */
-    template<typename Rhs, typename DestScalar, int DestOptions, typename DestIndex>
-    void _solve_sparse(const Rhs& b, SparseMatrix<DestScalar,DestOptions,DestIndex> &dest) const
-    {
-      eigen_assert(m_factorizationIsOk && "The decomposition is not in a valid state for solving, you must first call either compute() or symbolic()/numeric()");
-      eigen_assert(rows()==b.rows());
-      
-      // we process the sparse rhs per block of NbColsAtOnce columns temporarily stored into a dense matrix.
-      static const int NbColsAtOnce = 1;
-      int rhsCols = b.cols();
-      int size = b.rows();
-      Eigen::Matrix<DestScalar,Dynamic,Dynamic> tmp(size,rhsCols);
-      for(int k=0; k<rhsCols; k+=NbColsAtOnce)
-      {
-        int actualCols = std::min<int>(rhsCols-k, NbColsAtOnce);
-        tmp.leftCols(actualCols) = b.middleCols(k,actualCols);
-        tmp.leftCols(actualCols) = derived().solve(tmp.leftCols(actualCols));
-        dest.middleCols(k,actualCols) = tmp.leftCols(actualCols).sparseView();
-      }
-    }
-    
     Derived& derived()
     {
       return *static_cast<Derived*>(this);
@@ -731,7 +710,7 @@ struct sparse_solve_retval<PastixBase<_MatrixType>, Rhs>
 
   template<typename Dest> void evalTo(Dest& dst) const
   {
-    dec()._solve_sparse(rhs(),dst);
+    this->defaultEvalTo(dst);
   }
 };
 
diff --git a/ThirdParty/eigen3/Eigen/src/PardisoSupport/PardisoSupport.h b/ThirdParty/eigen3/Eigen/src/PardisoSupport/PardisoSupport.h
index d623bf5186e..1c48f0df7b5 100644
--- a/ThirdParty/eigen3/Eigen/src/PardisoSupport/PardisoSupport.h
+++ b/ThirdParty/eigen3/Eigen/src/PardisoSupport/PardisoSupport.h
@@ -206,29 +206,6 @@ class PardisoImpl
     template<typename BDerived, typename XDerived>
     bool _solve(const MatrixBase<BDerived> &b, MatrixBase<XDerived>& x) const;
 
-    /** \internal */
-    template<typename Rhs, typename DestScalar, int DestOptions, typename DestIndex>
-    void _solve_sparse(const Rhs& b, SparseMatrix<DestScalar,DestOptions,DestIndex> &dest) const
-    {
-      eigen_assert(m_size==b.rows());
-
-      // we process the sparse rhs per block of NbColsAtOnce columns temporarily stored into a dense matrix.
-      static const int NbColsAtOnce = 4;
-      int rhsCols = b.cols();
-      int size = b.rows();
-      // Pardiso cannot solve in-place,
-      // so we need two temporaries
-      Eigen::Matrix<DestScalar,Dynamic,Dynamic,ColMajor> tmp_rhs(size,rhsCols);
-      Eigen::Matrix<DestScalar,Dynamic,Dynamic,ColMajor> tmp_res(size,rhsCols);
-      for(int k=0; k<rhsCols; k+=NbColsAtOnce)
-      {
-        int actualCols = std::min<int>(rhsCols-k, NbColsAtOnce);
-        tmp_rhs.leftCols(actualCols) = b.middleCols(k,actualCols);
-        tmp_res.leftCols(actualCols) = derived().solve(tmp_rhs.leftCols(actualCols));
-        dest.middleCols(k,actualCols) = tmp_res.leftCols(actualCols).sparseView();
-      }
-    }
-
   protected:
     void pardisoRelease()
     {
@@ -604,7 +581,7 @@ struct sparse_solve_retval<PardisoImpl<Derived>, Rhs>
 
   template<typename Dest> void evalTo(Dest& dst) const
   {
-    dec().derived()._solve_sparse(rhs(),dst);
+    this->defaultEvalTo(dst);
   }
 };
 
diff --git a/ThirdParty/eigen3/Eigen/src/QR/FullPivHouseholderQR.h b/ThirdParty/eigen3/Eigen/src/QR/FullPivHouseholderQR.h
index 37898e77cc2..0dd5ad3472a 100644
--- a/ThirdParty/eigen3/Eigen/src/QR/FullPivHouseholderQR.h
+++ b/ThirdParty/eigen3/Eigen/src/QR/FullPivHouseholderQR.h
@@ -100,6 +100,18 @@ template<typename _MatrixType> class FullPivHouseholderQR
         m_isInitialized(false),
         m_usePrescribedThreshold(false) {}
 
+    /** \brief Constructs a QR factorization from a given matrix
+      *
+      * This constructor computes the QR factorization of the matrix \a matrix by calling
+      * the method compute(). It is a short cut for:
+      * 
+      * \code
+      * FullPivHouseholderQR<MatrixType> qr(matrix.rows(), matrix.cols());
+      * qr.compute(matrix);
+      * \endcode
+      * 
+      * \sa compute()
+      */
     FullPivHouseholderQR(const MatrixType& matrix)
       : m_qr(matrix.rows(), matrix.cols()),
         m_hCoeffs((std::min)(matrix.rows(), matrix.cols())),
@@ -152,12 +164,14 @@ template<typename _MatrixType> class FullPivHouseholderQR
 
     FullPivHouseholderQR& compute(const MatrixType& matrix);
 
+    /** \returns a const reference to the column permutation matrix */
     const PermutationType& colsPermutation() const
     {
       eigen_assert(m_isInitialized && "FullPivHouseholderQR is not initialized.");
       return m_cols_permutation;
     }
 
+    /** \returns a const reference to the vector of indices representing the rows transpositions */
     const IntColVectorType& rowsTranspositions() const
     {
       eigen_assert(m_isInitialized && "FullPivHouseholderQR is not initialized.");
@@ -201,11 +215,12 @@ template<typename _MatrixType> class FullPivHouseholderQR
       */
     inline Index rank() const
     {
+      using std::abs;
       eigen_assert(m_isInitialized && "FullPivHouseholderQR is not initialized.");
-      RealScalar premultiplied_threshold = internal::abs(m_maxpivot) * threshold();
+      RealScalar premultiplied_threshold = abs(m_maxpivot) * threshold();
       Index result = 0;
       for(Index i = 0; i < m_nonzero_pivots; ++i)
-        result += (internal::abs(m_qr.coeff(i,i)) > premultiplied_threshold);
+        result += (abs(m_qr.coeff(i,i)) > premultiplied_threshold);
       return result;
     }
 
@@ -274,6 +289,11 @@ template<typename _MatrixType> class FullPivHouseholderQR
 
     inline Index rows() const { return m_qr.rows(); }
     inline Index cols() const { return m_qr.cols(); }
+    
+    /** \returns a const reference to the vector of Householder coefficients used to represent the factor \c Q.
+      * 
+      * For advanced uses only.
+      */
     const HCoeffsType& hCoeffs() const { return m_hCoeffs; }
 
     /** Allows to prescribe a threshold to be used by certain methods, such as rank(),
@@ -362,9 +382,10 @@ template<typename _MatrixType> class FullPivHouseholderQR
 template<typename MatrixType>
 typename MatrixType::RealScalar FullPivHouseholderQR<MatrixType>::absDeterminant() const
 {
+  using std::abs;
   eigen_assert(m_isInitialized && "FullPivHouseholderQR is not initialized.");
   eigen_assert(m_qr.rows() == m_qr.cols() && "You can't take the determinant of a non-square matrix!");
-  return internal::abs(m_qr.diagonal().prod());
+  return abs(m_qr.diagonal().prod());
 }
 
 template<typename MatrixType>
@@ -375,9 +396,16 @@ typename MatrixType::RealScalar FullPivHouseholderQR<MatrixType>::logAbsDetermin
   return m_qr.diagonal().cwiseAbs().array().log().sum();
 }
 
+/** Performs the QR factorization of the given matrix \a matrix. The result of
+  * the factorization is stored into \c *this, and a reference to \c *this
+  * is returned.
+  *
+  * \sa class FullPivHouseholderQR, FullPivHouseholderQR(const MatrixType&)
+  */
 template<typename MatrixType>
 FullPivHouseholderQR<MatrixType>& FullPivHouseholderQR<MatrixType>::compute(const MatrixType& matrix)
 {
+  using std::abs;
   Index rows = matrix.rows();
   Index cols = matrix.cols();
   Index size = (std::min)(rows,cols);
@@ -439,7 +467,7 @@ FullPivHouseholderQR<MatrixType>& FullPivHouseholderQR<MatrixType>::compute(cons
     m_qr.coeffRef(k,k) = beta;
 
     // remember the maximum absolute value of diagonal coefficients
-    if(internal::abs(beta) > m_maxpivot) m_maxpivot = internal::abs(beta);
+    if(abs(beta) > m_maxpivot) m_maxpivot = abs(beta);
 
     m_qr.bottomRightCorner(rows-k, cols-k-1)
         .applyHouseholderOnTheLeft(m_qr.col(k).tail(rows-k-1), m_hCoeffs.coeffRef(k), &m_temp.coeffRef(k+1));
@@ -544,6 +572,7 @@ public:
   template <typename ResultType>
   void evalTo(ResultType& result, WorkVectorType& workspace) const
   {
+    using numext::conj;
     // compute the product H'_0 H'_1 ... H'_n-1,
     // where H_k is the k-th Householder transformation I - h_k v_k v_k'
     // and v_k is the k-th Householder vector [1,m_qr(k+1,k), m_qr(k+2,k), ...]
@@ -555,7 +584,7 @@ public:
     for (Index k = size-1; k >= 0; k--)
     {
       result.block(k, k, rows-k, rows-k)
-            .applyHouseholderOnTheLeft(m_qr.col(k).tail(rows-k-1), internal::conj(m_hCoeffs.coeff(k)), &workspace.coeffRef(k));
+            .applyHouseholderOnTheLeft(m_qr.col(k).tail(rows-k-1), conj(m_hCoeffs.coeff(k)), &workspace.coeffRef(k));
       result.row(k).swap(result.row(m_rowsTranspositions.coeff(k)));
     }
   }
diff --git a/ThirdParty/eigen3/Eigen/src/QR/HouseholderQR.h b/ThirdParty/eigen3/Eigen/src/QR/HouseholderQR.h
index 5bcb32c1e18..abc61bcbbe1 100644
--- a/ThirdParty/eigen3/Eigen/src/QR/HouseholderQR.h
+++ b/ThirdParty/eigen3/Eigen/src/QR/HouseholderQR.h
@@ -57,14 +57,14 @@ template<typename _MatrixType> class HouseholderQR
     typedef Matrix<Scalar, RowsAtCompileTime, RowsAtCompileTime, (MatrixType::Flags&RowMajorBit) ? RowMajor : ColMajor, MaxRowsAtCompileTime, MaxRowsAtCompileTime> MatrixQType;
     typedef typename internal::plain_diag_type<MatrixType>::type HCoeffsType;
     typedef typename internal::plain_row_type<MatrixType>::type RowVectorType;
-    typedef typename HouseholderSequence<MatrixType,HCoeffsType>::ConjugateReturnType HouseholderSequenceType;
+    typedef HouseholderSequence<MatrixType,typename internal::remove_all<typename HCoeffsType::ConjugateReturnType>::type> HouseholderSequenceType;
 
     /**
-    * \brief Default Constructor.
-    *
-    * The default constructor is useful in cases in which the user intends to
-    * perform decompositions via HouseholderQR::compute(const MatrixType&).
-    */
+      * \brief Default Constructor.
+      *
+      * The default constructor is useful in cases in which the user intends to
+      * perform decompositions via HouseholderQR::compute(const MatrixType&).
+      */
     HouseholderQR() : m_qr(), m_hCoeffs(), m_temp(), m_isInitialized(false) {}
 
     /** \brief Default Constructor with memory preallocation
@@ -79,6 +79,18 @@ template<typename _MatrixType> class HouseholderQR
         m_temp(cols),
         m_isInitialized(false) {}
 
+    /** \brief Constructs a QR factorization from a given matrix
+      *
+      * This constructor computes the QR factorization of the matrix \a matrix by calling
+      * the method compute(). It is a short cut for:
+      * 
+      * \code
+      * HouseholderQR<MatrixType> qr(matrix.rows(), matrix.cols());
+      * qr.compute(matrix);
+      * \endcode
+      * 
+      * \sa compute()
+      */
     HouseholderQR(const MatrixType& matrix)
       : m_qr(matrix.rows(), matrix.cols()),
         m_hCoeffs((std::min)(matrix.rows(),matrix.cols())),
@@ -113,6 +125,14 @@ template<typename _MatrixType> class HouseholderQR
       return internal::solve_retval<HouseholderQR, Rhs>(*this, b.derived());
     }
 
+    /** This method returns an expression of the unitary matrix Q as a sequence of Householder transformations.
+      *
+      * The returned expression can directly be used to perform matrix products. It can also be assigned to a dense Matrix object.
+      * Here is an example showing how to recover the full or thin matrix Q, as well as how to perform matrix products using operator*:
+      *
+      * Example: \include HouseholderQR_householderQ.cpp
+      * Output: \verbinclude HouseholderQR_householderQ.out
+      */
     HouseholderSequenceType householderQ() const
     {
       eigen_assert(m_isInitialized && "HouseholderQR is not initialized.");
@@ -161,6 +181,11 @@ template<typename _MatrixType> class HouseholderQR
 
     inline Index rows() const { return m_qr.rows(); }
     inline Index cols() const { return m_qr.cols(); }
+    
+    /** \returns a const reference to the vector of Householder coefficients used to represent the factor \c Q.
+      * 
+      * For advanced uses only.
+      */
     const HCoeffsType& hCoeffs() const { return m_hCoeffs; }
 
   protected:
@@ -173,9 +198,10 @@ template<typename _MatrixType> class HouseholderQR
 template<typename MatrixType>
 typename MatrixType::RealScalar HouseholderQR<MatrixType>::absDeterminant() const
 {
+  using std::abs;
   eigen_assert(m_isInitialized && "HouseholderQR is not initialized.");
   eigen_assert(m_qr.rows() == m_qr.cols() && "You can't take the determinant of a non-square matrix!");
-  return internal::abs(m_qr.diagonal().prod());
+  return abs(m_qr.diagonal().prod());
 }
 
 template<typename MatrixType>
@@ -232,7 +258,6 @@ void householder_qr_inplace_blocked(MatrixQR& mat, HCoeffs& hCoeffs,
 {
   typedef typename MatrixQR::Index Index;
   typedef typename MatrixQR::Scalar Scalar;
-  typedef typename MatrixQR::RealScalar RealScalar;
   typedef Block<MatrixQR,Dynamic,Dynamic> BlockType;
 
   Index rows = mat.rows();
@@ -309,6 +334,12 @@ struct solve_retval<HouseholderQR<_MatrixType>, Rhs>
 
 } // end namespace internal
 
+/** Performs the QR factorization of the given matrix \a matrix. The result of
+  * the factorization is stored into \c *this, and a reference to \c *this
+  * is returned.
+  *
+  * \sa class HouseholderQR, HouseholderQR(const MatrixType&)
+  */
 template<typename MatrixType>
 HouseholderQR<MatrixType>& HouseholderQR<MatrixType>::compute(const MatrixType& matrix)
 {
diff --git a/ThirdParty/eigen3/Eigen/src/SVD/JacobiSVD.h b/ThirdParty/eigen3/Eigen/src/SVD/JacobiSVD.h
index a7dbf073766..4786768ffe4 100644
--- a/ThirdParty/eigen3/Eigen/src/SVD/JacobiSVD.h
+++ b/ThirdParty/eigen3/Eigen/src/SVD/JacobiSVD.h
@@ -78,7 +78,8 @@ public:
   {
     if (svd.rows() != m_qr.rows() || svd.cols() != m_qr.cols())
     {
-      m_qr = FullPivHouseholderQR<MatrixType>(svd.rows(), svd.cols());
+      m_qr.~QRType();
+      ::new (&m_qr) QRType(svd.rows(), svd.cols());
     }
     if (svd.m_computeFullU) m_workspace.resize(svd.rows());
   }
@@ -96,7 +97,8 @@ public:
     return false;
   }
 private:
-  FullPivHouseholderQR<MatrixType> m_qr;
+  typedef FullPivHouseholderQR<MatrixType> QRType;
+  QRType m_qr;
   WorkspaceType m_workspace;
 };
 
@@ -121,7 +123,8 @@ public:
   {
     if (svd.cols() != m_qr.rows() || svd.rows() != m_qr.cols())
     {
-      m_qr = FullPivHouseholderQR<TransposeTypeWithSameStorageOrder>(svd.cols(), svd.rows());
+      m_qr.~QRType();
+      ::new (&m_qr) QRType(svd.cols(), svd.rows());
     }
     m_adjoint.resize(svd.cols(), svd.rows());
     if (svd.m_computeFullV) m_workspace.resize(svd.cols());
@@ -141,7 +144,8 @@ public:
     else return false;
   }
 private:
-  FullPivHouseholderQR<TransposeTypeWithSameStorageOrder> m_qr;
+  typedef FullPivHouseholderQR<TransposeTypeWithSameStorageOrder> QRType;
+  QRType m_qr;
   TransposeTypeWithSameStorageOrder m_adjoint;
   typename internal::plain_row_type<MatrixType>::type m_workspace;
 };
@@ -158,7 +162,8 @@ public:
   {
     if (svd.rows() != m_qr.rows() || svd.cols() != m_qr.cols())
     {
-      m_qr = ColPivHouseholderQR<MatrixType>(svd.rows(), svd.cols());
+      m_qr.~QRType();
+      ::new (&m_qr) QRType(svd.rows(), svd.cols());
     }
     if (svd.m_computeFullU) m_workspace.resize(svd.rows());
     else if (svd.m_computeThinU) m_workspace.resize(svd.cols());
@@ -183,7 +188,8 @@ public:
   }
 
 private:
-  ColPivHouseholderQR<MatrixType> m_qr;
+  typedef ColPivHouseholderQR<MatrixType> QRType;
+  QRType m_qr;
   typename internal::plain_col_type<MatrixType>::type m_workspace;
 };
 
@@ -209,7 +215,8 @@ public:
   {
     if (svd.cols() != m_qr.rows() || svd.rows() != m_qr.cols())
     {
-      m_qr = ColPivHouseholderQR<TransposeTypeWithSameStorageOrder>(svd.cols(), svd.rows());
+      m_qr.~QRType();
+      ::new (&m_qr) QRType(svd.cols(), svd.rows());
     }
     if (svd.m_computeFullV) m_workspace.resize(svd.cols());
     else if (svd.m_computeThinV) m_workspace.resize(svd.rows());
@@ -237,7 +244,8 @@ public:
   }
 
 private:
-  ColPivHouseholderQR<TransposeTypeWithSameStorageOrder> m_qr;
+  typedef ColPivHouseholderQR<TransposeTypeWithSameStorageOrder> QRType;
+  QRType m_qr;
   TransposeTypeWithSameStorageOrder m_adjoint;
   typename internal::plain_row_type<MatrixType>::type m_workspace;
 };
@@ -254,7 +262,8 @@ public:
   {
     if (svd.rows() != m_qr.rows() || svd.cols() != m_qr.cols())
     {
-      m_qr = HouseholderQR<MatrixType>(svd.rows(), svd.cols());
+      m_qr.~QRType();
+      ::new (&m_qr) QRType(svd.rows(), svd.cols());
     }
     if (svd.m_computeFullU) m_workspace.resize(svd.rows());
     else if (svd.m_computeThinU) m_workspace.resize(svd.cols());
@@ -278,7 +287,8 @@ public:
     return false;
   }
 private:
-  HouseholderQR<MatrixType> m_qr;
+  typedef HouseholderQR<MatrixType> QRType;
+  QRType m_qr;
   typename internal::plain_col_type<MatrixType>::type m_workspace;
 };
 
@@ -304,7 +314,8 @@ public:
   {
     if (svd.cols() != m_qr.rows() || svd.rows() != m_qr.cols())
     {
-      m_qr = HouseholderQR<TransposeTypeWithSameStorageOrder>(svd.cols(), svd.rows());
+      m_qr.~QRType();
+      ::new (&m_qr) QRType(svd.cols(), svd.rows());
     }
     if (svd.m_computeFullV) m_workspace.resize(svd.cols());
     else if (svd.m_computeThinV) m_workspace.resize(svd.rows());
@@ -332,7 +343,8 @@ public:
   }
 
 private:
-  HouseholderQR<TransposeTypeWithSameStorageOrder> m_qr;
+  typedef HouseholderQR<TransposeTypeWithSameStorageOrder> QRType;
+  QRType m_qr;
   TransposeTypeWithSameStorageOrder m_adjoint;
   typename internal::plain_row_type<MatrixType>::type m_workspace;
 };
@@ -359,9 +371,10 @@ struct svd_precondition_2x2_block_to_be_real<MatrixType, QRPreconditioner, true>
   typedef typename SVD::Index Index;
   static void run(typename SVD::WorkMatrixType& work_matrix, SVD& svd, Index p, Index q)
   {
+    using std::sqrt;
     Scalar z;
     JacobiRotation<Scalar> rot;
-    RealScalar n = sqrt(abs2(work_matrix.coeff(p,p)) + abs2(work_matrix.coeff(q,p)));
+    RealScalar n = sqrt(numext::abs2(work_matrix.coeff(p,p)) + numext::abs2(work_matrix.coeff(q,p)));
     if(n==0)
     {
       z = abs(work_matrix.coeff(p,q)) / work_matrix.coeff(p,q);
@@ -398,9 +411,10 @@ void real_2x2_jacobi_svd(const MatrixType& matrix, Index p, Index q,
                             JacobiRotation<RealScalar> *j_left,
                             JacobiRotation<RealScalar> *j_right)
 {
+  using std::sqrt;
   Matrix<RealScalar,2,2> m;
-  m << real(matrix.coeff(p,p)), real(matrix.coeff(p,q)),
-       real(matrix.coeff(q,p)), real(matrix.coeff(q,q));
+  m << numext::real(matrix.coeff(p,p)), numext::real(matrix.coeff(p,q)),
+       numext::real(matrix.coeff(q,p)), numext::real(matrix.coeff(q,q));
   JacobiRotation<RealScalar> rot1;
   RealScalar t = m.coeff(0,0) + m.coeff(1,1);
   RealScalar d = m.coeff(1,0) - m.coeff(0,1);
@@ -412,7 +426,7 @@ void real_2x2_jacobi_svd(const MatrixType& matrix, Index p, Index q,
   else
   {
     RealScalar u = d / t;
-    rot1.c() = RealScalar(1) / sqrt(RealScalar(1) + abs2(u));
+    rot1.c() = RealScalar(1) / sqrt(RealScalar(1) + numext::abs2(u));
     rot1.s() = rot1.c() * u;
   }
   m.applyOnTheLeft(0,1,rot1);
@@ -709,12 +723,14 @@ void JacobiSVD<MatrixType, QRPreconditioner>::allocate(Index rows, Index cols, u
   }
   m_diagSize = (std::min)(m_rows, m_cols);
   m_singularValues.resize(m_diagSize);
-  m_matrixU.resize(m_rows, m_computeFullU ? m_rows
-                          : m_computeThinU ? m_diagSize
-                          : 0);
-  m_matrixV.resize(m_cols, m_computeFullV ? m_cols
-                          : m_computeThinV ? m_diagSize
-                          : 0);
+  if(RowsAtCompileTime==Dynamic)
+    m_matrixU.resize(m_rows, m_computeFullU ? m_rows
+                            : m_computeThinU ? m_diagSize
+                            : 0);
+  if(ColsAtCompileTime==Dynamic)
+    m_matrixV.resize(m_cols, m_computeFullV ? m_cols
+                            : m_computeThinV ? m_diagSize
+                            : 0);
   m_workMatrix.resize(m_diagSize, m_diagSize);
   
   if(m_cols>m_rows) m_qr_precond_morecols.allocate(*this);
@@ -725,6 +741,7 @@ template<typename MatrixType, int QRPreconditioner>
 JacobiSVD<MatrixType, QRPreconditioner>&
 JacobiSVD<MatrixType, QRPreconditioner>::compute(const MatrixType& matrix, unsigned int computationOptions)
 {
+  using std::abs;
   allocate(matrix.rows(), matrix.cols(), computationOptions);
 
   // currently we stop when we reach precision 2*epsilon as the last bit of precision can require an unreasonable number of iterations,
@@ -762,9 +779,9 @@ JacobiSVD<MatrixType, QRPreconditioner>::compute(const MatrixType& matrix, unsig
         // notice that this comparison will evaluate to false if any NaN is involved, ensuring that NaN's don't
         // keep us iterating forever. Similarly, small denormal numbers are considered zero.
         using std::max;
-        RealScalar threshold = (max)(considerAsZero, precision * (max)(internal::abs(m_workMatrix.coeff(p,p)),
-                                                                       internal::abs(m_workMatrix.coeff(q,q))));
-        if((max)(internal::abs(m_workMatrix.coeff(p,q)),internal::abs(m_workMatrix.coeff(q,p))) > threshold)
+        RealScalar threshold = (max)(considerAsZero, precision * (max)(abs(m_workMatrix.coeff(p,p)),
+                                                                       abs(m_workMatrix.coeff(q,q))));
+        if((max)(abs(m_workMatrix.coeff(p,q)),abs(m_workMatrix.coeff(q,p))) > threshold)
         {
           finished = false;
 
@@ -788,7 +805,7 @@ JacobiSVD<MatrixType, QRPreconditioner>::compute(const MatrixType& matrix, unsig
 
   for(Index i = 0; i < m_diagSize; ++i)
   {
-    RealScalar a = internal::abs(m_workMatrix.coeff(i,i));
+    RealScalar a = abs(m_workMatrix.coeff(i,i));
     m_singularValues.coeffRef(i) = a;
     if(computeU() && (a!=RealScalar(0))) m_matrixU.col(i) *= m_workMatrix.coeff(i,i)/a;
   }
@@ -833,17 +850,12 @@ struct solve_retval<JacobiSVD<_MatrixType, QRPreconditioner>, Rhs>
     // A = U S V^*
     // So A^{-1} = V S^{-1} U^*
 
-    Index diagSize = (std::min)(dec().rows(), dec().cols());
-    typename JacobiSVDType::SingularValuesType invertedSingVals(diagSize);
-
+    Matrix<Scalar, Dynamic, Rhs::ColsAtCompileTime, 0, _MatrixType::MaxRowsAtCompileTime, Rhs::MaxColsAtCompileTime> tmp;
     Index nonzeroSingVals = dec().nonzeroSingularValues();
-    invertedSingVals.head(nonzeroSingVals) = dec().singularValues().head(nonzeroSingVals).array().inverse();
-    invertedSingVals.tail(diagSize - nonzeroSingVals).setZero();
-
-    dst = dec().matrixV().leftCols(diagSize)
-        * invertedSingVals.asDiagonal()
-        * dec().matrixU().leftCols(diagSize).adjoint()
-        * rhs();
+    
+    tmp.noalias() = dec().matrixU().leftCols(nonzeroSingVals).adjoint() * rhs();
+    tmp = dec().singularValues().head(nonzeroSingVals).asDiagonal().inverse() * tmp;
+    dst = dec().matrixV().leftCols(nonzeroSingVals) * tmp;
   }
 };
 } // end namespace internal
diff --git a/ThirdParty/eigen3/Eigen/src/SVD/UpperBidiagonalization.h b/ThirdParty/eigen3/Eigen/src/SVD/UpperBidiagonalization.h
index 213b3100df5..587de37a5ad 100644
--- a/ThirdParty/eigen3/Eigen/src/SVD/UpperBidiagonalization.h
+++ b/ThirdParty/eigen3/Eigen/src/SVD/UpperBidiagonalization.h
@@ -39,7 +39,7 @@ template<typename _MatrixType> class UpperBidiagonalization
               CwiseUnaryOp<internal::scalar_conjugate_op<Scalar>, const Diagonal<const MatrixType,0> >
             > HouseholderUSequenceType;
     typedef HouseholderSequence<
-              const MatrixType,
+              const typename internal::remove_all<typename MatrixType::ConjugateReturnType>::type,
               Diagonal<const MatrixType,1>,
               OnTheRight
             > HouseholderVSequenceType;
@@ -74,7 +74,7 @@ template<typename _MatrixType> class UpperBidiagonalization
     const HouseholderVSequenceType householderV() // const here gives nasty errors and i'm lazy
     {
       eigen_assert(m_isInitialized && "UpperBidiagonalization is not initialized.");
-      return HouseholderVSequenceType(m_householder, m_householder.const_derived().template diagonal<1>())
+      return HouseholderVSequenceType(m_householder.conjugate(), m_householder.const_derived().template diagonal<1>())
              .setLength(m_householder.cols()-1)
              .setShift(1);
     }
diff --git a/ThirdParty/eigen3/Eigen/src/SparseCholesky/SimplicialCholesky.h b/ThirdParty/eigen3/Eigen/src/SparseCholesky/SimplicialCholesky.h
index 9bf38ab2d91..f41d7e010f7 100644
--- a/ThirdParty/eigen3/Eigen/src/SparseCholesky/SimplicialCholesky.h
+++ b/ThirdParty/eigen3/Eigen/src/SparseCholesky/SimplicialCholesky.h
@@ -1,52 +1,12 @@
 // This file is part of Eigen, a lightweight C++ template library
 // for linear algebra.
 //
-// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2008-2012 Gael Guennebaud <gael.guennebaud@inria.fr>
 //
 // This Source Code Form is subject to the terms of the Mozilla
 // Public License v. 2.0. If a copy of the MPL was not distributed
 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
 
-/*
-
-NOTE: the _symbolic, and _numeric functions has been adapted from
-      the LDL library:
-
-LDL Copyright (c) 2005 by Timothy A. Davis.  All Rights Reserved.
-
-LDL License:
-
-    Your use or distribution of LDL or any modified version of
-    LDL implies that you agree to this License.
-
-    This library is free software; you can redistribute it and/or
-    modify it under the terms of the GNU Lesser General Public
-    License as published by the Free Software Foundation; either
-    version 2.1 of the License, or (at your option) any later version.
-
-    This library is distributed in the hope that it will be useful,
-    but WITHOUT ANY WARRANTY; without even the implied warranty of
-    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
-    Lesser General Public License for more details.
-
-    You should have received a copy of the GNU Lesser General Public
-    License along with this library; if not, write to the Free Software
-    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301
-    USA
-
-    Permission is hereby granted to use or copy this program under the
-    terms of the GNU LGPL, provided that the Copyright, this License,
-    and the Availability of the original version is retained on all copies.
-    User documentation of any code that uses this code or any modified
-    version of this code must cite the Copyright, this License, the
-    Availability note, and "Used by permission." Permission to modify
-    the code and to distribute modified code is granted, provided the
-    Copyright, this License, and the Availability note are retained,
-    and a notice that the code was modified is included.
- */
-
-#include "../Core/util/NonMPL2.h"
-
 #ifndef EIGEN_SIMPLICIAL_CHOLESKY_H
 #define EIGEN_SIMPLICIAL_CHOLESKY_H
 
@@ -215,27 +175,6 @@ class SimplicialCholeskyBase : internal::noncopyable
         dest = m_Pinv * dest;
     }
 
-    /** \internal */
-    template<typename Rhs, typename DestScalar, int DestOptions, typename DestIndex>
-    void _solve_sparse(const Rhs& b, SparseMatrix<DestScalar,DestOptions,DestIndex> &dest) const
-    {
-      eigen_assert(m_factorizationIsOk && "The decomposition is not in a valid state for solving, you must first call either compute() or symbolic()/numeric()");
-      eigen_assert(m_matrix.rows()==b.rows());
-      
-      // we process the sparse rhs per block of NbColsAtOnce columns temporarily stored into a dense matrix.
-      static const int NbColsAtOnce = 4;
-      int rhsCols = b.cols();
-      int size = b.rows();
-      Eigen::Matrix<DestScalar,Dynamic,Dynamic> tmp(size,rhsCols);
-      for(int k=0; k<rhsCols; k+=NbColsAtOnce)
-      {
-        int actualCols = std::min<int>(rhsCols-k, NbColsAtOnce);
-        tmp.leftCols(actualCols) = b.middleCols(k,actualCols);
-        tmp.leftCols(actualCols) = derived().solve(tmp.leftCols(actualCols));
-        dest.middleCols(k,actualCols) = tmp.leftCols(actualCols).sparseView();
-      }
-    }
-
 #endif // EIGEN_PARSED_BY_DOXYGEN
 
   protected:
@@ -425,7 +364,7 @@ public:
     Scalar determinant() const
     {
       Scalar detL = Base::m_matrix.diagonal().prod();
-      return internal::abs2(detL);
+      return numext::abs2(detL);
     }
 };
 
@@ -660,7 +599,7 @@ public:
       else
       {
         Scalar detL = Diagonal<const CholMatrixType>(Base::m_matrix).prod();
-        return internal::abs2(detL);
+        return numext::abs2(detL);
       }
     }
     
@@ -693,151 +632,6 @@ void SimplicialCholeskyBase<Derived>::ordering(const MatrixType& a, CholMatrixTy
   ap.template selfadjointView<Upper>() = a.template selfadjointView<UpLo>().twistedBy(m_P);
 }
 
-template<typename Derived>
-void SimplicialCholeskyBase<Derived>::analyzePattern_preordered(const CholMatrixType& ap, bool doLDLT)
-{
-  const Index size = ap.rows();
-  m_matrix.resize(size, size);
-  m_parent.resize(size);
-  m_nonZerosPerCol.resize(size);
-  
-  ei_declare_aligned_stack_constructed_variable(Index, tags, size, 0);
-
-  for(Index k = 0; k < size; ++k)
-  {
-    /* L(k,:) pattern: all nodes reachable in etree from nz in A(0:k-1,k) */
-    m_parent[k] = -1;             /* parent of k is not yet known */
-    tags[k] = k;                  /* mark node k as visited */
-    m_nonZerosPerCol[k] = 0;      /* count of nonzeros in column k of L */
-    for(typename CholMatrixType::InnerIterator it(ap,k); it; ++it)
-    {
-      Index i = it.index();
-      if(i < k)
-      {
-        /* follow path from i to root of etree, stop at flagged node */
-        for(; tags[i] != k; i = m_parent[i])
-        {
-          /* find parent of i if not yet determined */
-          if (m_parent[i] == -1)
-            m_parent[i] = k;
-          m_nonZerosPerCol[i]++;        /* L (k,i) is nonzero */
-          tags[i] = k;                  /* mark i as visited */
-        }
-      }
-    }
-  }
-  
-  /* construct Lp index array from m_nonZerosPerCol column counts */
-  Index* Lp = m_matrix.outerIndexPtr();
-  Lp[0] = 0;
-  for(Index k = 0; k < size; ++k)
-    Lp[k+1] = Lp[k] + m_nonZerosPerCol[k] + (doLDLT ? 0 : 1);
-
-  m_matrix.resizeNonZeros(Lp[size]);
-  
-  m_isInitialized     = true;
-  m_info              = Success;
-  m_analysisIsOk      = true;
-  m_factorizationIsOk = false;
-}
-
-
-template<typename Derived>
-template<bool DoLDLT>
-void SimplicialCholeskyBase<Derived>::factorize_preordered(const CholMatrixType& ap)
-{
-  eigen_assert(m_analysisIsOk && "You must first call analyzePattern()");
-  eigen_assert(ap.rows()==ap.cols());
-  const Index size = ap.rows();
-  eigen_assert(m_parent.size()==size);
-  eigen_assert(m_nonZerosPerCol.size()==size);
-
-  const Index* Lp = m_matrix.outerIndexPtr();
-  Index* Li = m_matrix.innerIndexPtr();
-  Scalar* Lx = m_matrix.valuePtr();
-
-  ei_declare_aligned_stack_constructed_variable(Scalar, y, size, 0);
-  ei_declare_aligned_stack_constructed_variable(Index,  pattern, size, 0);
-  ei_declare_aligned_stack_constructed_variable(Index,  tags, size, 0);
-  
-  bool ok = true;
-  m_diag.resize(DoLDLT ? size : 0);
-  
-  for(Index k = 0; k < size; ++k)
-  {
-    // compute nonzero pattern of kth row of L, in topological order
-    y[k] = 0.0;                     // Y(0:k) is now all zero
-    Index top = size;               // stack for pattern is empty
-    tags[k] = k;                    // mark node k as visited
-    m_nonZerosPerCol[k] = 0;        // count of nonzeros in column k of L
-    for(typename MatrixType::InnerIterator it(ap,k); it; ++it)
-    {
-      Index i = it.index();
-      if(i <= k)
-      {
-        y[i] += internal::conj(it.value());            /* scatter A(i,k) into Y (sum duplicates) */
-        Index len;
-        for(len = 0; tags[i] != k; i = m_parent[i])
-        {
-          pattern[len++] = i;     /* L(k,i) is nonzero */
-          tags[i] = k;            /* mark i as visited */
-        }
-        while(len > 0)
-          pattern[--top] = pattern[--len];
-      }
-    }
-
-    /* compute numerical values kth row of L (a sparse triangular solve) */
-
-    RealScalar d = internal::real(y[k]) * m_shiftScale + m_shiftOffset;    // get D(k,k), apply the shift function, and clear Y(k)
-    y[k] = 0.0;
-    for(; top < size; ++top)
-    {
-      Index i = pattern[top];       /* pattern[top:n-1] is pattern of L(:,k) */
-      Scalar yi = y[i];             /* get and clear Y(i) */
-      y[i] = 0.0;
-      
-      /* the nonzero entry L(k,i) */
-      Scalar l_ki;
-      if(DoLDLT)
-        l_ki = yi / m_diag[i];       
-      else
-        yi = l_ki = yi / Lx[Lp[i]];
-      
-      Index p2 = Lp[i] + m_nonZerosPerCol[i];
-      Index p;
-      for(p = Lp[i] + (DoLDLT ? 0 : 1); p < p2; ++p)
-        y[Li[p]] -= internal::conj(Lx[p]) * yi;
-      d -= internal::real(l_ki * internal::conj(yi));
-      Li[p] = k;                          /* store L(k,i) in column form of L */
-      Lx[p] = l_ki;
-      ++m_nonZerosPerCol[i];              /* increment count of nonzeros in col i */
-    }
-    if(DoLDLT)
-    {
-      m_diag[k] = d;
-      if(d == RealScalar(0))
-      {
-        ok = false;                         /* failure, D(k,k) is zero */
-        break;
-      }
-    }
-    else
-    {
-      Index p = Lp[k] + m_nonZerosPerCol[k]++;
-      Li[p] = k ;                /* store L(k,k) = sqrt (d) in column k */
-      if(d <= RealScalar(0)) {
-        ok = false;              /* failure, matrix is not positive definite */
-        break;
-      }
-      Lx[p] = internal::sqrt(d) ;
-    }
-  }
-
-  m_info = ok ? Success : NumericalIssue;
-  m_factorizationIsOk = true;
-}
-
 namespace internal {
   
 template<typename Derived, typename Rhs>
@@ -862,7 +656,7 @@ struct sparse_solve_retval<SimplicialCholeskyBase<Derived>, Rhs>
 
   template<typename Dest> void evalTo(Dest& dst) const
   {
-    dec().derived()._solve_sparse(rhs(),dst);
+    this->defaultEvalTo(dst);
   }
 };
 
diff --git a/ThirdParty/eigen3/Eigen/src/SparseCholesky/SimplicialCholesky_impl.h b/ThirdParty/eigen3/Eigen/src/SparseCholesky/SimplicialCholesky_impl.h
new file mode 100644
index 00000000000..7aaf702beed
--- /dev/null
+++ b/ThirdParty/eigen3/Eigen/src/SparseCholesky/SimplicialCholesky_impl.h
@@ -0,0 +1,199 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2012 Gael Guennebaud <gael.guennebaud@inria.fr>
+
+/*
+
+NOTE: thes functions vave been adapted from the LDL library:
+
+LDL Copyright (c) 2005 by Timothy A. Davis.  All Rights Reserved.
+
+LDL License:
+
+    Your use or distribution of LDL or any modified version of
+    LDL implies that you agree to this License.
+
+    This library is free software; you can redistribute it and/or
+    modify it under the terms of the GNU Lesser General Public
+    License as published by the Free Software Foundation; either
+    version 2.1 of the License, or (at your option) any later version.
+
+    This library is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+    Lesser General Public License for more details.
+
+    You should have received a copy of the GNU Lesser General Public
+    License along with this library; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301
+    USA
+
+    Permission is hereby granted to use or copy this program under the
+    terms of the GNU LGPL, provided that the Copyright, this License,
+    and the Availability of the original version is retained on all copies.
+    User documentation of any code that uses this code or any modified
+    version of this code must cite the Copyright, this License, the
+    Availability note, and "Used by permission." Permission to modify
+    the code and to distribute modified code is granted, provided the
+    Copyright, this License, and the Availability note are retained,
+    and a notice that the code was modified is included.
+ */
+
+#include "../Core/util/NonMPL2.h"
+
+#ifndef EIGEN_SIMPLICIAL_CHOLESKY_IMPL_H
+#define EIGEN_SIMPLICIAL_CHOLESKY_IMPL_H
+
+namespace Eigen {
+
+template<typename Derived>
+void SimplicialCholeskyBase<Derived>::analyzePattern_preordered(const CholMatrixType& ap, bool doLDLT)
+{
+  const Index size = ap.rows();
+  m_matrix.resize(size, size);
+  m_parent.resize(size);
+  m_nonZerosPerCol.resize(size);
+
+  ei_declare_aligned_stack_constructed_variable(Index, tags, size, 0);
+
+  for(Index k = 0; k < size; ++k)
+  {
+    /* L(k,:) pattern: all nodes reachable in etree from nz in A(0:k-1,k) */
+    m_parent[k] = -1;             /* parent of k is not yet known */
+    tags[k] = k;                  /* mark node k as visited */
+    m_nonZerosPerCol[k] = 0;      /* count of nonzeros in column k of L */
+    for(typename CholMatrixType::InnerIterator it(ap,k); it; ++it)
+    {
+      Index i = it.index();
+      if(i < k)
+      {
+        /* follow path from i to root of etree, stop at flagged node */
+        for(; tags[i] != k; i = m_parent[i])
+        {
+          /* find parent of i if not yet determined */
+          if (m_parent[i] == -1)
+            m_parent[i] = k;
+          m_nonZerosPerCol[i]++;        /* L (k,i) is nonzero */
+          tags[i] = k;                  /* mark i as visited */
+        }
+      }
+    }
+  }
+
+  /* construct Lp index array from m_nonZerosPerCol column counts */
+  Index* Lp = m_matrix.outerIndexPtr();
+  Lp[0] = 0;
+  for(Index k = 0; k < size; ++k)
+    Lp[k+1] = Lp[k] + m_nonZerosPerCol[k] + (doLDLT ? 0 : 1);
+
+  m_matrix.resizeNonZeros(Lp[size]);
+
+  m_isInitialized     = true;
+  m_info              = Success;
+  m_analysisIsOk      = true;
+  m_factorizationIsOk = false;
+}
+
+
+template<typename Derived>
+template<bool DoLDLT>
+void SimplicialCholeskyBase<Derived>::factorize_preordered(const CholMatrixType& ap)
+{
+  using std::sqrt;
+
+  eigen_assert(m_analysisIsOk && "You must first call analyzePattern()");
+  eigen_assert(ap.rows()==ap.cols());
+  const Index size = ap.rows();
+  eigen_assert(m_parent.size()==size);
+  eigen_assert(m_nonZerosPerCol.size()==size);
+
+  const Index* Lp = m_matrix.outerIndexPtr();
+  Index* Li = m_matrix.innerIndexPtr();
+  Scalar* Lx = m_matrix.valuePtr();
+
+  ei_declare_aligned_stack_constructed_variable(Scalar, y, size, 0);
+  ei_declare_aligned_stack_constructed_variable(Index,  pattern, size, 0);
+  ei_declare_aligned_stack_constructed_variable(Index,  tags, size, 0);
+
+  bool ok = true;
+  m_diag.resize(DoLDLT ? size : 0);
+
+  for(Index k = 0; k < size; ++k)
+  {
+    // compute nonzero pattern of kth row of L, in topological order
+    y[k] = 0.0;                     // Y(0:k) is now all zero
+    Index top = size;               // stack for pattern is empty
+    tags[k] = k;                    // mark node k as visited
+    m_nonZerosPerCol[k] = 0;        // count of nonzeros in column k of L
+    for(typename MatrixType::InnerIterator it(ap,k); it; ++it)
+    {
+      Index i = it.index();
+      if(i <= k)
+      {
+        y[i] += numext::conj(it.value());            /* scatter A(i,k) into Y (sum duplicates) */
+        Index len;
+        for(len = 0; tags[i] != k; i = m_parent[i])
+        {
+          pattern[len++] = i;     /* L(k,i) is nonzero */
+          tags[i] = k;            /* mark i as visited */
+        }
+        while(len > 0)
+          pattern[--top] = pattern[--len];
+      }
+    }
+
+    /* compute numerical values kth row of L (a sparse triangular solve) */
+
+    RealScalar d = numext::real(y[k]) * m_shiftScale + m_shiftOffset;    // get D(k,k), apply the shift function, and clear Y(k)
+    y[k] = 0.0;
+    for(; top < size; ++top)
+    {
+      Index i = pattern[top];       /* pattern[top:n-1] is pattern of L(:,k) */
+      Scalar yi = y[i];             /* get and clear Y(i) */
+      y[i] = 0.0;
+
+      /* the nonzero entry L(k,i) */
+      Scalar l_ki;
+      if(DoLDLT)
+        l_ki = yi / m_diag[i];
+      else
+        yi = l_ki = yi / Lx[Lp[i]];
+
+      Index p2 = Lp[i] + m_nonZerosPerCol[i];
+      Index p;
+      for(p = Lp[i] + (DoLDLT ? 0 : 1); p < p2; ++p)
+        y[Li[p]] -= numext::conj(Lx[p]) * yi;
+      d -= numext::real(l_ki * numext::conj(yi));
+      Li[p] = k;                          /* store L(k,i) in column form of L */
+      Lx[p] = l_ki;
+      ++m_nonZerosPerCol[i];              /* increment count of nonzeros in col i */
+    }
+    if(DoLDLT)
+    {
+      m_diag[k] = d;
+      if(d == RealScalar(0))
+      {
+        ok = false;                         /* failure, D(k,k) is zero */
+        break;
+      }
+    }
+    else
+    {
+      Index p = Lp[k] + m_nonZerosPerCol[k]++;
+      Li[p] = k ;                /* store L(k,k) = sqrt (d) in column k */
+      if(d <= RealScalar(0)) {
+        ok = false;              /* failure, matrix is not positive definite */
+        break;
+      }
+      Lx[p] = sqrt(d) ;
+    }
+  }
+
+  m_info = ok ? Success : NumericalIssue;
+  m_factorizationIsOk = true;
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_SIMPLICIAL_CHOLESKY_IMPL_H
diff --git a/ThirdParty/eigen3/Eigen/src/SparseCore/SparseAssign.h b/ThirdParty/eigen3/Eigen/src/SparseCore/SparseAssign.h
deleted file mode 100644
index e69de29bb2d..00000000000
diff --git a/ThirdParty/eigen3/Eigen/src/SparseCore/SparseBlock.h b/ThirdParty/eigen3/Eigen/src/SparseCore/SparseBlock.h
index eefd8070251..0b3e193dbc5 100644
--- a/ThirdParty/eigen3/Eigen/src/SparseCore/SparseBlock.h
+++ b/ThirdParty/eigen3/Eigen/src/SparseCore/SparseBlock.h
@@ -12,51 +12,37 @@
 
 namespace Eigen { 
 
-namespace internal {
-template<typename MatrixType, int Size>
-struct traits<SparseInnerVectorSet<MatrixType, Size> >
+template<typename XprType, int BlockRows, int BlockCols>
+class BlockImpl<XprType,BlockRows,BlockCols,true,Sparse>
+  : public SparseMatrixBase<Block<XprType,BlockRows,BlockCols,true> >
 {
-  typedef typename traits<MatrixType>::Scalar Scalar;
-  typedef typename traits<MatrixType>::Index Index;
-  typedef typename traits<MatrixType>::StorageKind StorageKind;
-  typedef MatrixXpr XprKind;
-  enum {
-    IsRowMajor = (int(MatrixType::Flags)&RowMajorBit)==RowMajorBit,
-    Flags = MatrixType::Flags,
-    RowsAtCompileTime = IsRowMajor ? Size : MatrixType::RowsAtCompileTime,
-    ColsAtCompileTime = IsRowMajor ? MatrixType::ColsAtCompileTime : Size,
-    MaxRowsAtCompileTime = RowsAtCompileTime,
-    MaxColsAtCompileTime = ColsAtCompileTime,
-    CoeffReadCost = MatrixType::CoeffReadCost
-  };
-};
-} // end namespace internal
-
-template<typename MatrixType, int Size>
-class SparseInnerVectorSet : internal::no_assignment_operator,
-  public SparseMatrixBase<SparseInnerVectorSet<MatrixType, Size> >
-{
-  public:
-
-    enum { IsRowMajor = internal::traits<SparseInnerVectorSet>::IsRowMajor };
-
-    EIGEN_SPARSE_PUBLIC_INTERFACE(SparseInnerVectorSet)
-    class InnerIterator: public MatrixType::InnerIterator
+    typedef typename internal::remove_all<typename XprType::Nested>::type _MatrixTypeNested;
+    typedef Block<XprType, BlockRows, BlockCols, true> BlockType;
+public:
+    enum { IsRowMajor = internal::traits<BlockType>::IsRowMajor };
+protected:
+    enum { OuterSize = IsRowMajor ? BlockRows : BlockCols };
+public:
+    EIGEN_SPARSE_PUBLIC_INTERFACE(BlockType)
+    
+    class InnerIterator: public XprType::InnerIterator
     {
+        typedef typename BlockImpl::Index Index;
       public:
-        inline InnerIterator(const SparseInnerVectorSet& xpr, Index outer)
-          : MatrixType::InnerIterator(xpr.m_matrix, xpr.m_outerStart + outer), m_outer(outer)
+        inline InnerIterator(const BlockType& xpr, Index outer)
+          : XprType::InnerIterator(xpr.m_matrix, xpr.m_outerStart + outer), m_outer(outer)
         {}
         inline Index row() const { return IsRowMajor ? m_outer : this->index(); }
         inline Index col() const { return IsRowMajor ? this->index() : m_outer; }
       protected:
         Index m_outer;
     };
-    class ReverseInnerIterator: public MatrixType::ReverseInnerIterator
+    class ReverseInnerIterator: public XprType::ReverseInnerIterator
     {
+        typedef typename BlockImpl::Index Index;
       public:
-        inline ReverseInnerIterator(const SparseInnerVectorSet& xpr, Index outer)
-          : MatrixType::ReverseInnerIterator(xpr.m_matrix, xpr.m_outerStart + outer), m_outer(outer)
+        inline ReverseInnerIterator(const BlockType& xpr, Index outer)
+          : XprType::ReverseInnerIterator(xpr.m_matrix, xpr.m_outerStart + outer), m_outer(outer)
         {}
         inline Index row() const { return IsRowMajor ? m_outer : this->index(); }
         inline Index col() const { return IsRowMajor ? this->index() : m_outer; }
@@ -64,39 +50,22 @@ class SparseInnerVectorSet : internal::no_assignment_operator,
         Index m_outer;
     };
 
-    inline SparseInnerVectorSet(const MatrixType& matrix, Index outerStart, Index outerSize)
-      : m_matrix(matrix), m_outerStart(outerStart), m_outerSize(outerSize)
-    {
-      eigen_assert( (outerStart>=0) && ((outerStart+outerSize)<=matrix.outerSize()) );
-    }
-
-    inline SparseInnerVectorSet(const MatrixType& matrix, Index outer)
-      : m_matrix(matrix), m_outerStart(outer), m_outerSize(Size)
-    {
-      eigen_assert(Size!=Dynamic);
-      eigen_assert( (outer>=0) && (outer<matrix.outerSize()) );
-    }
-
-//     template<typename OtherDerived>
-//     inline SparseInnerVectorSet& operator=(const SparseMatrixBase<OtherDerived>& other)
-//     {
-//       return *this;
-//     }
+    inline BlockImpl(const XprType& xpr, int i)
+      : m_matrix(xpr), m_outerStart(i), m_outerSize(OuterSize)
+    {}
 
-//     template<typename Sparse>
-//     inline SparseInnerVectorSet& operator=(const SparseMatrixBase<OtherDerived>& other)
-//     {
-//       return *this;
-//     }
+    inline BlockImpl(const XprType& xpr, int startRow, int startCol, int blockRows, int blockCols)
+      : m_matrix(xpr), m_outerStart(IsRowMajor ? startRow : startCol), m_outerSize(IsRowMajor ? blockRows : blockCols)
+    {}
 
     EIGEN_STRONG_INLINE Index rows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); }
     EIGEN_STRONG_INLINE Index cols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); }
 
   protected:
 
-    const typename MatrixType::Nested m_matrix;
+    typename XprType::Nested m_matrix;
     Index m_outerStart;
-    const internal::variable_if_dynamic<Index, Size> m_outerSize;
+    const internal::variable_if_dynamic<Index, OuterSize> m_outerSize;
 };
 
 
@@ -104,32 +73,36 @@ class SparseInnerVectorSet : internal::no_assignment_operator,
 * specialisation for SparseMatrix
 ***************************************************************************/
 
-template<typename _Scalar, int _Options, typename _Index, int Size>
-class SparseInnerVectorSet<SparseMatrix<_Scalar, _Options, _Index>, Size>
-  : public SparseMatrixBase<SparseInnerVectorSet<SparseMatrix<_Scalar, _Options, _Index>, Size> >
+template<typename _Scalar, int _Options, typename _Index, int BlockRows, int BlockCols>
+class BlockImpl<SparseMatrix<_Scalar, _Options, _Index>,BlockRows,BlockCols,true,Sparse>
+  : public SparseMatrixBase<Block<SparseMatrix<_Scalar, _Options, _Index>,BlockRows,BlockCols,true> >
 {
-    typedef SparseMatrix<_Scalar, _Options, _Index> MatrixType;
-  public:
-
-    enum { IsRowMajor = internal::traits<SparseInnerVectorSet>::IsRowMajor };
-
-    EIGEN_SPARSE_PUBLIC_INTERFACE(SparseInnerVectorSet)
-    class InnerIterator: public MatrixType::InnerIterator
+    typedef SparseMatrix<_Scalar, _Options, _Index> SparseMatrixType;
+    typedef typename internal::remove_all<typename SparseMatrixType::Nested>::type _MatrixTypeNested;
+    typedef Block<SparseMatrixType, BlockRows, BlockCols, true> BlockType;
+public:
+    enum { IsRowMajor = internal::traits<BlockType>::IsRowMajor };
+    EIGEN_SPARSE_PUBLIC_INTERFACE(BlockType)
+protected:
+    enum { OuterSize = IsRowMajor ? BlockRows : BlockCols };
+public:
+    
+    class InnerIterator: public SparseMatrixType::InnerIterator
     {
       public:
-        inline InnerIterator(const SparseInnerVectorSet& xpr, Index outer)
-          : MatrixType::InnerIterator(xpr.m_matrix, xpr.m_outerStart + outer), m_outer(outer)
+        inline InnerIterator(const BlockType& xpr, Index outer)
+          : SparseMatrixType::InnerIterator(xpr.m_matrix, xpr.m_outerStart + outer), m_outer(outer)
         {}
         inline Index row() const { return IsRowMajor ? m_outer : this->index(); }
         inline Index col() const { return IsRowMajor ? this->index() : m_outer; }
       protected:
         Index m_outer;
     };
-    class ReverseInnerIterator: public MatrixType::ReverseInnerIterator
+    class ReverseInnerIterator: public SparseMatrixType::ReverseInnerIterator
     {
       public:
-        inline ReverseInnerIterator(const SparseInnerVectorSet& xpr, Index outer)
-          : MatrixType::ReverseInnerIterator(xpr.m_matrix, xpr.m_outerStart + outer), m_outer(outer)
+        inline ReverseInnerIterator(const BlockType& xpr, Index outer)
+          : SparseMatrixType::ReverseInnerIterator(xpr.m_matrix, xpr.m_outerStart + outer), m_outer(outer)
         {}
         inline Index row() const { return IsRowMajor ? m_outer : this->index(); }
         inline Index col() const { return IsRowMajor ? this->index() : m_outer; }
@@ -137,23 +110,18 @@ class SparseInnerVectorSet<SparseMatrix<_Scalar, _Options, _Index>, Size>
         Index m_outer;
     };
 
-    inline SparseInnerVectorSet(const MatrixType& matrix, Index outerStart, Index outerSize)
-      : m_matrix(matrix), m_outerStart(outerStart), m_outerSize(outerSize)
-    {
-      eigen_assert( (outerStart>=0) && ((outerStart+outerSize)<=matrix.outerSize()) );
-    }
+    inline BlockImpl(const SparseMatrixType& xpr, int i)
+      : m_matrix(xpr), m_outerStart(i), m_outerSize(OuterSize)
+    {}
 
-    inline SparseInnerVectorSet(const MatrixType& matrix, Index outer)
-      : m_matrix(matrix), m_outerStart(outer), m_outerSize(Size)
-    {
-      eigen_assert(Size==1);
-      eigen_assert( (outer>=0) && (outer<matrix.outerSize()) );
-    }
+    inline BlockImpl(const SparseMatrixType& xpr, int startRow, int startCol, int blockRows, int blockCols)
+      : m_matrix(xpr), m_outerStart(IsRowMajor ? startRow : startCol), m_outerSize(IsRowMajor ? blockRows : blockCols)
+    {}
 
     template<typename OtherDerived>
-    inline SparseInnerVectorSet& operator=(const SparseMatrixBase<OtherDerived>& other)
+    inline BlockType& operator=(const SparseMatrixBase<OtherDerived>& other)
     {
-      typedef typename internal::remove_all<typename MatrixType::Nested>::type _NestedMatrixType;
+      typedef typename internal::remove_all<typename SparseMatrixType::Nested>::type _NestedMatrixType;
       _NestedMatrixType& matrix = const_cast<_NestedMatrixType&>(m_matrix);;
       // This assignement is slow if this vector set is not empty
       // and/or it is not at the end of the nonzeros of the underlying matrix.
@@ -163,70 +131,69 @@ class SparseInnerVectorSet<SparseMatrix<_Scalar, _Options, _Index>, Size>
 
       // 2 - let's check whether there is enough allocated memory
       Index nnz           = tmp.nonZeros();
-      Index nnz_previous  = nonZeros();
-      Index free_size     = Index(matrix.data().allocatedSize()) + nnz_previous;
-      Index nnz_head      = m_outerStart==0 ? 0 : matrix.outerIndexPtr()[m_outerStart];
-      Index tail          = m_matrix.outerIndexPtr()[m_outerStart+m_outerSize.value()];
-      Index nnz_tail      = matrix.nonZeros() - tail;
-
-      if(nnz>free_size)
+      Index start         = m_outerStart==0 ? 0 : matrix.outerIndexPtr()[m_outerStart]; // starting position of the current block
+      Index end           = m_matrix.outerIndexPtr()[m_outerStart+m_outerSize.value()]; // ending posiiton of the current block
+      Index block_size    = end - start;                                                // available room in the current block
+      Index tail_size     = m_matrix.outerIndexPtr()[m_matrix.outerSize()] - end;
+      
+      Index free_size     = m_matrix.isCompressed()
+                          ? Index(matrix.data().allocatedSize()) + block_size
+                          : block_size;
+
+      if(nnz>free_size) 
       {
         // realloc manually to reduce copies
-        typename MatrixType::Storage newdata(m_matrix.nonZeros() - nnz_previous + nnz);
+        typename SparseMatrixType::Storage newdata(m_matrix.data().allocatedSize() - block_size + nnz);
 
-        std::memcpy(&newdata.value(0), &m_matrix.data().value(0), nnz_head*sizeof(Scalar));
-        std::memcpy(&newdata.index(0), &m_matrix.data().index(0), nnz_head*sizeof(Index));
+        std::memcpy(&newdata.value(0), &m_matrix.data().value(0), start*sizeof(Scalar));
+        std::memcpy(&newdata.index(0), &m_matrix.data().index(0), start*sizeof(Index));
 
-        std::memcpy(&newdata.value(nnz_head), &tmp.data().value(0), nnz*sizeof(Scalar));
-        std::memcpy(&newdata.index(nnz_head), &tmp.data().index(0), nnz*sizeof(Index));
+        std::memcpy(&newdata.value(start), &tmp.data().value(0), nnz*sizeof(Scalar));
+        std::memcpy(&newdata.index(start), &tmp.data().index(0), nnz*sizeof(Index));
 
-        std::memcpy(&newdata.value(nnz_head+nnz), &matrix.data().value(tail), nnz_tail*sizeof(Scalar));
-        std::memcpy(&newdata.index(nnz_head+nnz), &matrix.data().index(tail), nnz_tail*sizeof(Index));
+        std::memcpy(&newdata.value(start+nnz), &matrix.data().value(end), tail_size*sizeof(Scalar));
+        std::memcpy(&newdata.index(start+nnz), &matrix.data().index(end), tail_size*sizeof(Index));
+        
+        newdata.resize(m_matrix.outerIndexPtr()[m_matrix.outerSize()] - block_size + nnz);
 
         matrix.data().swap(newdata);
       }
       else
       {
         // no need to realloc, simply copy the tail at its respective position and insert tmp
-        matrix.data().resize(nnz_head + nnz + nnz_tail);
-
-        if(nnz<nnz_previous)
-        {
-          std::memcpy(&matrix.data().value(nnz_head+nnz), &matrix.data().value(tail), nnz_tail*sizeof(Scalar));
-          std::memcpy(&matrix.data().index(nnz_head+nnz), &matrix.data().index(tail), nnz_tail*sizeof(Index));
-        }
-        else
-        {
-          for(Index i=nnz_tail-1; i>=0; --i)
-          {
-            matrix.data().value(nnz_head+nnz+i) = matrix.data().value(tail+i);
-            matrix.data().index(nnz_head+nnz+i) = matrix.data().index(tail+i);
-          }
-        }
-
-        std::memcpy(&matrix.data().value(nnz_head), &tmp.data().value(0), nnz*sizeof(Scalar));
-        std::memcpy(&matrix.data().index(nnz_head), &tmp.data().index(0), nnz*sizeof(Index));
+        matrix.data().resize(start + nnz + tail_size);
+
+        std::memmove(&matrix.data().value(start+nnz), &matrix.data().value(end), tail_size*sizeof(Scalar));
+        std::memmove(&matrix.data().index(start+nnz), &matrix.data().index(end), tail_size*sizeof(Index));
+
+        std::memcpy(&matrix.data().value(start), &tmp.data().value(0), nnz*sizeof(Scalar));
+        std::memcpy(&matrix.data().index(start), &tmp.data().index(0), nnz*sizeof(Index));
       }
+      
+      // update innerNonZeros
+      if(!m_matrix.isCompressed())
+        for(Index j=0; j<m_outerSize.value(); ++j)
+          matrix.innerNonZeroPtr()[m_outerStart+j] = tmp.innerVector(j).nonZeros();
 
       // update outer index pointers
-      Index p = nnz_head;
+      Index p = start;
       for(Index k=0; k<m_outerSize.value(); ++k)
       {
         matrix.outerIndexPtr()[m_outerStart+k] = p;
         p += tmp.innerVector(k).nonZeros();
       }
-      std::ptrdiff_t offset = nnz - nnz_previous;
+      std::ptrdiff_t offset = nnz - block_size;
       for(Index k = m_outerStart + m_outerSize.value(); k<=matrix.outerSize(); ++k)
       {
         matrix.outerIndexPtr()[k] += offset;
       }
 
-      return *this;
+      return derived();
     }
 
-    inline SparseInnerVectorSet& operator=(const SparseInnerVectorSet& other)
+    inline BlockType& operator=(const BlockType& other)
     {
-      return operator=<SparseInnerVectorSet>(other);
+      return operator=<BlockType>(other);
     }
 
     inline const Scalar* valuePtr() const
@@ -252,12 +219,12 @@ class SparseInnerVectorSet<SparseMatrix<_Scalar, _Options, _Index>, Size>
       else if(m_outerSize.value()==0)
         return 0;
       else
-        return Map<const Matrix<Index,Size,1> >(m_matrix.innerNonZeroPtr()+m_outerStart, m_outerSize.value()).sum();
+        return Map<const Matrix<Index,OuterSize,1> >(m_matrix.innerNonZeroPtr()+m_outerStart, m_outerSize.value()).sum();
     }
 
     const Scalar& lastCoeff() const
     {
-      EIGEN_STATIC_ASSERT_VECTOR_ONLY(SparseInnerVectorSet);
+      EIGEN_STATIC_ASSERT_VECTOR_ONLY(BlockImpl);
       eigen_assert(nonZeros()>0);
       if(m_matrix.isCompressed())
         return m_matrix.valuePtr()[m_matrix.outerIndexPtr()[m_outerStart+1]-1];
@@ -265,122 +232,173 @@ class SparseInnerVectorSet<SparseMatrix<_Scalar, _Options, _Index>, Size>
         return m_matrix.valuePtr()[m_matrix.outerIndexPtr()[m_outerStart]+m_matrix.innerNonZeroPtr()[m_outerStart]-1];
     }
 
-//     template<typename Sparse>
-//     inline SparseInnerVectorSet& operator=(const SparseMatrixBase<OtherDerived>& other)
-//     {
-//       return *this;
-//     }
-
     EIGEN_STRONG_INLINE Index rows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); }
     EIGEN_STRONG_INLINE Index cols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); }
 
   protected:
 
-    typename MatrixType::Nested m_matrix;
+    typename SparseMatrixType::Nested m_matrix;
     Index m_outerStart;
-    const internal::variable_if_dynamic<Index, Size> m_outerSize;
+    const internal::variable_if_dynamic<Index, OuterSize> m_outerSize;
 
 };
 
 //----------
 
-/** \returns the i-th row of the matrix \c *this. For row-major matrix only. */
-template<typename Derived>
-SparseInnerVectorSet<Derived,1> SparseMatrixBase<Derived>::row(Index i)
-{
-  EIGEN_STATIC_ASSERT(IsRowMajor,THIS_METHOD_IS_ONLY_FOR_ROW_MAJOR_MATRICES);
-  return innerVector(i);
-}
-
-/** \returns the i-th row of the matrix \c *this. For row-major matrix only.
-  * (read-only version) */
-template<typename Derived>
-const SparseInnerVectorSet<Derived,1> SparseMatrixBase<Derived>::row(Index i) const
-{
-  EIGEN_STATIC_ASSERT(IsRowMajor,THIS_METHOD_IS_ONLY_FOR_ROW_MAJOR_MATRICES);
-  return innerVector(i);
-}
-
-/** \returns the i-th column of the matrix \c *this. For column-major matrix only. */
-template<typename Derived>
-SparseInnerVectorSet<Derived,1> SparseMatrixBase<Derived>::col(Index i)
-{
-  EIGEN_STATIC_ASSERT(!IsRowMajor,THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES);
-  return innerVector(i);
-}
-
-/** \returns the i-th column of the matrix \c *this. For column-major matrix only.
-  * (read-only version) */
-template<typename Derived>
-const SparseInnerVectorSet<Derived,1> SparseMatrixBase<Derived>::col(Index i) const
-{
-  EIGEN_STATIC_ASSERT(!IsRowMajor,THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES);
-  return innerVector(i);
-}
-
 /** \returns the \a outer -th column (resp. row) of the matrix \c *this if \c *this
   * is col-major (resp. row-major).
   */
 template<typename Derived>
-SparseInnerVectorSet<Derived,1> SparseMatrixBase<Derived>::innerVector(Index outer)
-{ return SparseInnerVectorSet<Derived,1>(derived(), outer); }
+typename SparseMatrixBase<Derived>::InnerVectorReturnType SparseMatrixBase<Derived>::innerVector(Index outer)
+{ return InnerVectorReturnType(derived(), outer); }
 
 /** \returns the \a outer -th column (resp. row) of the matrix \c *this if \c *this
   * is col-major (resp. row-major). Read-only.
   */
 template<typename Derived>
-const SparseInnerVectorSet<Derived,1> SparseMatrixBase<Derived>::innerVector(Index outer) const
-{ return SparseInnerVectorSet<Derived,1>(derived(), outer); }
+const typename SparseMatrixBase<Derived>::ConstInnerVectorReturnType SparseMatrixBase<Derived>::innerVector(Index outer) const
+{ return ConstInnerVectorReturnType(derived(), outer); }
 
-/** \returns the i-th row of the matrix \c *this. For row-major matrix only. */
+/** \returns the \a outer -th column (resp. row) of the matrix \c *this if \c *this
+  * is col-major (resp. row-major).
+  */
 template<typename Derived>
-SparseInnerVectorSet<Derived,Dynamic> SparseMatrixBase<Derived>::middleRows(Index start, Index size)
+Block<Derived,Dynamic,Dynamic,true> SparseMatrixBase<Derived>::innerVectors(Index outerStart, Index outerSize)
 {
-  EIGEN_STATIC_ASSERT(IsRowMajor,THIS_METHOD_IS_ONLY_FOR_ROW_MAJOR_MATRICES);
-  return innerVectors(start, size);
+  return Block<Derived,Dynamic,Dynamic,true>(derived(),
+                                             IsRowMajor ? outerStart : 0, IsRowMajor ? 0 : outerStart,
+                                             IsRowMajor ? outerSize : rows(), IsRowMajor ? cols() : outerSize);
+  
 }
 
-/** \returns the i-th row of the matrix \c *this. For row-major matrix only.
-  * (read-only version) */
+/** \returns the \a outer -th column (resp. row) of the matrix \c *this if \c *this
+  * is col-major (resp. row-major). Read-only.
+  */
 template<typename Derived>
-const SparseInnerVectorSet<Derived,Dynamic> SparseMatrixBase<Derived>::middleRows(Index start, Index size) const
+const Block<const Derived,Dynamic,Dynamic,true> SparseMatrixBase<Derived>::innerVectors(Index outerStart, Index outerSize) const
 {
-  EIGEN_STATIC_ASSERT(IsRowMajor,THIS_METHOD_IS_ONLY_FOR_ROW_MAJOR_MATRICES);
-  return innerVectors(start, size);
+  return Block<const Derived,Dynamic,Dynamic,true>(derived(),
+                                                  IsRowMajor ? outerStart : 0, IsRowMajor ? 0 : outerStart,
+                                                  IsRowMajor ? outerSize : rows(), IsRowMajor ? cols() : outerSize);
+  
 }
 
-/** \returns the i-th column of the matrix \c *this. For column-major matrix only. */
-template<typename Derived>
-SparseInnerVectorSet<Derived,Dynamic> SparseMatrixBase<Derived>::middleCols(Index start, Index size)
+/** Generic implementation of sparse Block expression.
+  * Real-only. 
+  */
+template<typename XprType, int BlockRows, int BlockCols, bool InnerPanel>
+class BlockImpl<XprType,BlockRows,BlockCols,InnerPanel,Sparse>
+  : public SparseMatrixBase<Block<XprType,BlockRows,BlockCols,InnerPanel> >, internal::no_assignment_operator
 {
-  EIGEN_STATIC_ASSERT(!IsRowMajor,THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES);
-  return innerVectors(start, size);
-}
+  typedef typename internal::remove_all<typename XprType::Nested>::type _MatrixTypeNested;
+  typedef Block<XprType, BlockRows, BlockCols, InnerPanel> BlockType;
+public:
+    enum { IsRowMajor = internal::traits<BlockType>::IsRowMajor };
+    EIGEN_SPARSE_PUBLIC_INTERFACE(BlockType)
+
+    /** Column or Row constructor
+      */
+    inline BlockImpl(const XprType& xpr, int i)
+      : m_matrix(xpr),
+        m_startRow( (BlockRows==1) && (BlockCols==XprType::ColsAtCompileTime) ? i : 0),
+        m_startCol( (BlockRows==XprType::RowsAtCompileTime) && (BlockCols==1) ? i : 0),
+        m_blockRows(xpr.rows()),
+        m_blockCols(xpr.cols())
+    {}
+
+    /** Dynamic-size constructor
+      */
+    inline BlockImpl(const XprType& xpr, int startRow, int startCol, int blockRows, int blockCols)
+      : m_matrix(xpr), m_startRow(startRow), m_startCol(startCol), m_blockRows(blockRows), m_blockCols(blockCols)
+    {}
+
+    inline int rows() const { return m_blockRows.value(); }
+    inline int cols() const { return m_blockCols.value(); }
+
+    inline Scalar& coeffRef(int row, int col)
+    {
+      return m_matrix.const_cast_derived()
+               .coeffRef(row + m_startRow.value(), col + m_startCol.value());
+    }
 
-/** \returns the i-th column of the matrix \c *this. For column-major matrix only.
-  * (read-only version) */
-template<typename Derived>
-const SparseInnerVectorSet<Derived,Dynamic> SparseMatrixBase<Derived>::middleCols(Index start, Index size) const
-{
-  EIGEN_STATIC_ASSERT(!IsRowMajor,THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES);
-  return innerVectors(start, size);
-}
+    inline const Scalar coeff(int row, int col) const
+    {
+      return m_matrix.coeff(row + m_startRow.value(), col + m_startCol.value());
+    }
+
+    inline Scalar& coeffRef(int index)
+    {
+      return m_matrix.const_cast_derived()
+             .coeffRef(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index),
+                       m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0));
+    }
+
+    inline const Scalar coeff(int index) const
+    {
+      return m_matrix
+             .coeff(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index),
+                    m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0));
+    }
+    
+    inline const _MatrixTypeNested& nestedExpression() const { return m_matrix; }
+    
+    class InnerIterator : public _MatrixTypeNested::InnerIterator
+    {
+      typedef typename _MatrixTypeNested::InnerIterator Base;
+      const BlockType& m_block;
+      Index m_end;
+    public:
+
+      EIGEN_STRONG_INLINE InnerIterator(const BlockType& block, Index outer)
+        : Base(block.derived().nestedExpression(), outer + (IsRowMajor ? block.m_startRow.value() : block.m_startCol.value())),
+          m_block(block),
+          m_end(IsRowMajor ? block.m_startCol.value()+block.m_blockCols.value() : block.m_startRow.value()+block.m_blockRows.value())
+      {
+        while( (Base::operator bool()) && (Base::index() < (IsRowMajor ? m_block.m_startCol.value() : m_block.m_startRow.value())) )
+          Base::operator++();
+      }
 
+      inline Index index()  const { return Base::index() - (IsRowMajor ? m_block.m_startCol.value() : m_block.m_startRow.value()); }
+      inline Index outer()  const { return Base::outer() - (IsRowMajor ? m_block.m_startRow.value() : m_block.m_startCol.value()); }
+      inline Index row()    const { return Base::row()   - m_block.m_startRow.value(); }
+      inline Index col()    const { return Base::col()   - m_block.m_startCol.value(); }
+      
+      inline operator bool() const { return Base::operator bool() && Base::index() < m_end; }
+    };
+    class ReverseInnerIterator : public _MatrixTypeNested::ReverseInnerIterator
+    {
+      typedef typename _MatrixTypeNested::ReverseInnerIterator Base;
+      const BlockType& m_block;
+      Index m_begin;
+    public:
+
+      EIGEN_STRONG_INLINE ReverseInnerIterator(const BlockType& block, Index outer)
+        : Base(block.derived().nestedExpression(), outer + (IsRowMajor ? block.m_startRow.value() : block.m_startCol.value())),
+          m_block(block),
+          m_begin(IsRowMajor ? block.m_startCol.value() : block.m_startRow.value())
+      {
+        while( (Base::operator bool()) && (Base::index() >= (IsRowMajor ? m_block.m_startCol.value()+block.m_blockCols.value() : m_block.m_startRow.value()+block.m_blockRows.value())) )
+          Base::operator--();
+      }
 
+      inline Index index()  const { return Base::index() - (IsRowMajor ? m_block.m_startCol.value() : m_block.m_startRow.value()); }
+      inline Index outer()  const { return Base::outer() - (IsRowMajor ? m_block.m_startRow.value() : m_block.m_startCol.value()); }
+      inline Index row()    const { return Base::row()   - m_block.m_startRow.value(); }
+      inline Index col()    const { return Base::col()   - m_block.m_startCol.value(); }
+      
+      inline operator bool() const { return Base::operator bool() && Base::index() >= m_begin; }
+    };
+  protected:
+    friend class InnerIterator;
+    friend class ReverseInnerIterator;
 
-/** \returns the \a outer -th column (resp. row) of the matrix \c *this if \c *this
-  * is col-major (resp. row-major).
-  */
-template<typename Derived>
-SparseInnerVectorSet<Derived,Dynamic> SparseMatrixBase<Derived>::innerVectors(Index outerStart, Index outerSize)
-{ return SparseInnerVectorSet<Derived,Dynamic>(derived(), outerStart, outerSize); }
+    typename XprType::Nested m_matrix;
+    const internal::variable_if_dynamic<Index, XprType::RowsAtCompileTime == 1 ? 0 : Dynamic> m_startRow;
+    const internal::variable_if_dynamic<Index, XprType::ColsAtCompileTime == 1 ? 0 : Dynamic> m_startCol;
+    const internal::variable_if_dynamic<Index, RowsAtCompileTime> m_blockRows;
+    const internal::variable_if_dynamic<Index, ColsAtCompileTime> m_blockCols;
 
-/** \returns the \a outer -th column (resp. row) of the matrix \c *this if \c *this
-  * is col-major (resp. row-major). Read-only.
-  */
-template<typename Derived>
-const SparseInnerVectorSet<Derived,Dynamic> SparseMatrixBase<Derived>::innerVectors(Index outerStart, Index outerSize) const
-{ return SparseInnerVectorSet<Derived,Dynamic>(derived(), outerStart, outerSize); }
+};
 
 } // end namespace Eigen
 
diff --git a/ThirdParty/eigen3/Eigen/src/SparseCore/SparseColEtree.h b/ThirdParty/eigen3/Eigen/src/SparseCore/SparseColEtree.h
new file mode 100644
index 00000000000..f89ca381441
--- /dev/null
+++ b/ThirdParty/eigen3/Eigen/src/SparseCore/SparseColEtree.h
@@ -0,0 +1,204 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+/* 
+ 
+ * NOTE: This file is the modified version of sp_coletree.c file in SuperLU 
+ 
+ * -- SuperLU routine (version 3.1) --
+ * Univ. of California Berkeley, Xerox Palo Alto Research Center,
+ * and Lawrence Berkeley National Lab.
+ * August 1, 2008
+ *
+ * Copyright (c) 1994 by Xerox Corporation.  All rights reserved.
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY
+ * EXPRESSED OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program for any
+ * purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is
+ * granted, provided the above notices are retained, and a notice that
+ * the code was modified is included with the above copyright notice.
+ */
+#ifndef SPARSE_COLETREE_H
+#define SPARSE_COLETREE_H
+
+namespace Eigen {
+
+namespace internal {
+
+/** Find the root of the tree/set containing the vertex i : Use Path halving */ 
+template<typename Index, typename IndexVector>
+Index etree_find (Index i, IndexVector& pp)
+{
+  Index p = pp(i); // Parent 
+  Index gp = pp(p); // Grand parent 
+  while (gp != p) 
+  {
+    pp(i) = gp; // Parent pointer on find path is changed to former grand parent
+    i = gp; 
+    p = pp(i);
+    gp = pp(p);
+  }
+  return p; 
+}
+
+/** Compute the column elimination tree of a sparse matrix
+  * \param mat The matrix in column-major format. 
+  * \param parent The elimination tree
+  * \param firstRowElt The column index of the first element in each row
+  * \param perm The permutation to apply to the column of \b mat
+  */
+template <typename MatrixType, typename IndexVector>
+int coletree(const MatrixType& mat, IndexVector& parent, IndexVector& firstRowElt, typename MatrixType::Index *perm=0)
+{
+  typedef typename MatrixType::Index Index;
+  Index nc = mat.cols(); // Number of columns 
+  Index m = mat.rows();
+  IndexVector root(nc); // root of subtree of etree 
+  root.setZero();
+  IndexVector pp(nc); // disjoint sets 
+  pp.setZero(); // Initialize disjoint sets 
+  parent.resize(mat.cols());
+  //Compute first nonzero column in each row 
+  Index row,col; 
+  firstRowElt.resize(m);
+  firstRowElt.setConstant(nc);
+  firstRowElt.segment(0, nc).setLinSpaced(nc, 0, nc-1);
+  bool found_diag;
+  for (col = 0; col < nc; col++)
+  {
+    Index pcol = col;
+    if(perm) pcol  = perm[col];
+    for (typename MatrixType::InnerIterator it(mat, pcol); it; ++it)
+    { 
+      row = it.row();
+      firstRowElt(row) = (std::min)(firstRowElt(row), col);
+    }
+  }
+  /* Compute etree by Liu's algorithm for symmetric matrices,
+          except use (firstRowElt[r],c) in place of an edge (r,c) of A.
+    Thus each row clique in A'*A is replaced by a star
+    centered at its first vertex, which has the same fill. */
+  Index rset, cset, rroot; 
+  for (col = 0; col < nc; col++) 
+  {
+    found_diag = false;
+    pp(col) = col; 
+    cset = col; 
+    root(cset) = col; 
+    parent(col) = nc; 
+    /* The diagonal element is treated here even if it does not exist in the matrix
+     * hence the loop is executed once more */ 
+    Index pcol = col;
+    if(perm) pcol  = perm[col];
+    for (typename MatrixType::InnerIterator it(mat, pcol); it||!found_diag; ++it)
+    { //  A sequence of interleaved find and union is performed 
+      Index i = col;
+      if(it) i = it.index();
+      if (i == col) found_diag = true;
+      row = firstRowElt(i);
+      if (row >= col) continue; 
+      rset = internal::etree_find(row, pp); // Find the name of the set containing row
+      rroot = root(rset);
+      if (rroot != col) 
+      {
+        parent(rroot) = col; 
+        pp(cset) = rset; 
+        cset = rset; 
+        root(cset) = col; 
+      }
+    }
+  }
+  return 0;  
+}
+
+/** 
+  * Depth-first search from vertex n.  No recursion.
+  * This routine was contributed by Cédric Doucet, CEDRAT Group, Meylan, France.
+*/
+template <typename Index, typename IndexVector>
+void nr_etdfs (Index n, IndexVector& parent, IndexVector& first_kid, IndexVector& next_kid, IndexVector& post, Index postnum)
+{
+  Index current = n, first, next;
+  while (postnum != n) 
+  {
+    // No kid for the current node
+    first = first_kid(current);
+    
+    // no kid for the current node
+    if (first == -1) 
+    {
+      // Numbering this node because it has no kid 
+      post(current) = postnum++;
+      
+      // looking for the next kid 
+      next = next_kid(current); 
+      while (next == -1) 
+      {
+        // No more kids : back to the parent node
+        current = parent(current); 
+        // numbering the parent node 
+        post(current) = postnum++;
+        
+        // Get the next kid 
+        next = next_kid(current); 
+      }
+      // stopping criterion 
+      if (postnum == n+1) return; 
+      
+      // Updating current node 
+      current = next; 
+    }
+    else 
+    {
+      current = first; 
+    }
+  }
+}
+
+
+/**
+  * \brief Post order a tree 
+  * \param n the number of nodes
+  * \param parent Input tree
+  * \param post postordered tree
+  */
+template <typename Index, typename IndexVector>
+void treePostorder(Index n, IndexVector& parent, IndexVector& post)
+{
+  IndexVector first_kid, next_kid; // Linked list of children 
+  Index postnum; 
+  // Allocate storage for working arrays and results 
+  first_kid.resize(n+1); 
+  next_kid.setZero(n+1);
+  post.setZero(n+1);
+  
+  // Set up structure describing children
+  Index v, dad; 
+  first_kid.setConstant(-1); 
+  for (v = n-1; v >= 0; v--) 
+  {
+    dad = parent(v);
+    next_kid(v) = first_kid(dad); 
+    first_kid(dad) = v; 
+  }
+  
+  // Depth-first search from dummy root vertex #n
+  postnum = 0; 
+  internal::nr_etdfs(n, parent, first_kid, next_kid, post, postnum);
+}
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // SPARSE_COLETREE_H
diff --git a/ThirdParty/eigen3/Eigen/src/SparseCore/SparseCwiseBinaryOp.h b/ThirdParty/eigen3/Eigen/src/SparseCore/SparseCwiseBinaryOp.h
index d5f97f78fc9..ec86ca933c2 100644
--- a/ThirdParty/eigen3/Eigen/src/SparseCore/SparseCwiseBinaryOp.h
+++ b/ThirdParty/eigen3/Eigen/src/SparseCore/SparseCwiseBinaryOp.h
@@ -73,7 +73,7 @@ class CwiseBinaryOpImpl<BinaryOp,Lhs,Rhs,Sparse>::InnerIterator
     typedef internal::sparse_cwise_binary_op_inner_iterator_selector<
       BinaryOp,Lhs,Rhs, InnerIterator> Base;
 
-    EIGEN_STRONG_INLINE InnerIterator(const CwiseBinaryOpImpl& binOp, typename CwiseBinaryOpImpl::Index outer)
+    EIGEN_STRONG_INLINE InnerIterator(const CwiseBinaryOpImpl& binOp, Index outer)
       : Base(binOp.derived(),outer)
     {}
 };
@@ -300,7 +300,7 @@ template<typename OtherDerived>
 EIGEN_STRONG_INLINE Derived &
 SparseMatrixBase<Derived>::operator-=(const SparseMatrixBase<OtherDerived> &other)
 {
-  return *this = derived() - other.derived();
+  return derived() = derived() - other.derived();
 }
 
 template<typename Derived>
@@ -308,7 +308,7 @@ template<typename OtherDerived>
 EIGEN_STRONG_INLINE Derived &
 SparseMatrixBase<Derived>::operator+=(const SparseMatrixBase<OtherDerived>& other)
 {
-  return *this = derived() + other.derived();
+  return derived() = derived() + other.derived();
 }
 
 template<typename Derived>
diff --git a/ThirdParty/eigen3/Eigen/src/SparseCore/SparseDenseProduct.h b/ThirdParty/eigen3/Eigen/src/SparseCore/SparseDenseProduct.h
index 6f32940d6c1..30975c29cce 100644
--- a/ThirdParty/eigen3/Eigen/src/SparseCore/SparseDenseProduct.h
+++ b/ThirdParty/eigen3/Eigen/src/SparseCore/SparseDenseProduct.h
@@ -39,7 +39,7 @@ struct traits<SparseDenseOuterProduct<Lhs,Rhs,Tr> >
 {
   typedef Sparse StorageKind;
   typedef typename scalar_product_traits<typename traits<Lhs>::Scalar,
-                                            typename traits<Rhs>::Scalar>::ReturnType Scalar;
+                                         typename traits<Rhs>::Scalar>::ReturnType Scalar;
   typedef typename Lhs::Index Index;
   typedef typename Lhs::Nested LhsNested;
   typedef typename Rhs::Nested RhsNested;
@@ -111,6 +111,7 @@ template<typename Lhs, typename Rhs, bool Transpose>
 class SparseDenseOuterProduct<Lhs,Rhs,Transpose>::InnerIterator : public _LhsNested::InnerIterator
 {
     typedef typename _LhsNested::InnerIterator Base;
+    typedef typename SparseDenseOuterProduct::Index Index;
   public:
     EIGEN_STRONG_INLINE InnerIterator(const SparseDenseOuterProduct& prod, Index outer)
       : Base(prod.lhs(), 0), m_outer(outer), m_factor(prod.rhs().coeff(outer))
@@ -150,7 +151,7 @@ struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, R
   typedef typename internal::remove_all<DenseResType>::type Res;
   typedef typename Lhs::Index Index;
   typedef typename Lhs::InnerIterator LhsInnerIterator;
-  static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, typename Res::Scalar alpha)
+  static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha)
   {
     for(Index c=0; c<rhs.cols(); ++c)
     {
@@ -174,7 +175,7 @@ struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, C
   typedef typename internal::remove_all<DenseResType>::type Res;
   typedef typename Lhs::InnerIterator LhsInnerIterator;
   typedef typename Lhs::Index Index;
-  static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, typename Res::Scalar alpha)
+  static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha)
   {
     for(Index c=0; c<rhs.cols(); ++c)
     {
@@ -196,7 +197,7 @@ struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, R
   typedef typename internal::remove_all<DenseResType>::type Res;
   typedef typename Lhs::InnerIterator LhsInnerIterator;
   typedef typename Lhs::Index Index;
-  static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, typename Res::Scalar alpha)
+  static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha)
   {
     for(Index j=0; j<lhs.outerSize(); ++j)
     {
@@ -215,7 +216,7 @@ struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, C
   typedef typename internal::remove_all<DenseResType>::type Res;
   typedef typename Lhs::InnerIterator LhsInnerIterator;
   typedef typename Lhs::Index Index;
-  static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, typename Res::Scalar alpha)
+  static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha)
   {
     for(Index j=0; j<lhs.outerSize(); ++j)
     {
@@ -244,7 +245,7 @@ class SparseTimeDenseProduct
     SparseTimeDenseProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs)
     {}
 
-    template<typename Dest> void scaleAndAddTo(Dest& dest, Scalar alpha) const
+    template<typename Dest> void scaleAndAddTo(Dest& dest, const Scalar& alpha) const
     {
       internal::sparse_time_dense_product(m_lhs, m_rhs, dest, alpha);
     }
@@ -274,7 +275,7 @@ class DenseTimeSparseProduct
     DenseTimeSparseProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs)
     {}
 
-    template<typename Dest> void scaleAndAddTo(Dest& dest, Scalar alpha) const
+    template<typename Dest> void scaleAndAddTo(Dest& dest, const Scalar& alpha) const
     {
       Transpose<const _LhsNested> lhs_t(m_lhs);
       Transpose<const _RhsNested> rhs_t(m_rhs);
diff --git a/ThirdParty/eigen3/Eigen/src/SparseCore/SparseDiagonalProduct.h b/ThirdParty/eigen3/Eigen/src/SparseCore/SparseDiagonalProduct.h
index ccba02124f2..1bb590e64d4 100644
--- a/ThirdParty/eigen3/Eigen/src/SparseCore/SparseDiagonalProduct.h
+++ b/ThirdParty/eigen3/Eigen/src/SparseCore/SparseDiagonalProduct.h
@@ -78,7 +78,11 @@ class SparseDiagonalProduct
     EIGEN_SPARSE_PUBLIC_INTERFACE(SparseDiagonalProduct)
 
     typedef internal::sparse_diagonal_product_inner_iterator_selector
-                <_LhsNested,_RhsNested,SparseDiagonalProduct,LhsMode,RhsMode> InnerIterator;
+                      <_LhsNested,_RhsNested,SparseDiagonalProduct,LhsMode,RhsMode> InnerIterator;
+    
+    // We do not want ReverseInnerIterator for diagonal-sparse products,
+    // but this dummy declaration is needed to make diag * sparse * diag compile.
+    class ReverseInnerIterator;
 
     EIGEN_STRONG_INLINE SparseDiagonalProduct(const Lhs& lhs, const Rhs& rhs)
       : m_lhs(lhs), m_rhs(rhs)
@@ -118,13 +122,13 @@ class sparse_diagonal_product_inner_iterator_selector
 <Lhs,Rhs,SparseDiagonalProductType,SDP_IsDiagonal,SDP_IsSparseColMajor>
   : public CwiseBinaryOp<
       scalar_product_op<typename Lhs::Scalar>,
-      SparseInnerVectorSet<Rhs,1>,
-      typename Lhs::DiagonalVectorType>::InnerIterator
+      const typename Rhs::ConstInnerVectorReturnType,
+      const typename Lhs::DiagonalVectorType>::InnerIterator
 {
     typedef typename CwiseBinaryOp<
       scalar_product_op<typename Lhs::Scalar>,
-      SparseInnerVectorSet<Rhs,1>,
-      typename Lhs::DiagonalVectorType>::InnerIterator Base;
+      const typename Rhs::ConstInnerVectorReturnType,
+      const typename Lhs::DiagonalVectorType>::InnerIterator Base;
     typedef typename Lhs::Index Index;
     Index m_outer;
   public:
@@ -156,13 +160,13 @@ class sparse_diagonal_product_inner_iterator_selector
 <Lhs,Rhs,SparseDiagonalProductType,SDP_IsSparseRowMajor,SDP_IsDiagonal>
   : public CwiseBinaryOp<
       scalar_product_op<typename Rhs::Scalar>,
-      SparseInnerVectorSet<Lhs,1>,
-      Transpose<const typename Rhs::DiagonalVectorType> >::InnerIterator
+      const typename Lhs::ConstInnerVectorReturnType,
+      const Transpose<const typename Rhs::DiagonalVectorType> >::InnerIterator
 {
     typedef typename CwiseBinaryOp<
       scalar_product_op<typename Rhs::Scalar>,
-      SparseInnerVectorSet<Lhs,1>,
-      Transpose<const typename Rhs::DiagonalVectorType> >::InnerIterator Base;
+      const typename Lhs::ConstInnerVectorReturnType,
+      const Transpose<const typename Rhs::DiagonalVectorType> >::InnerIterator Base;
     typedef typename Lhs::Index Index;
     Index m_outer;
   public:
diff --git a/ThirdParty/eigen3/Eigen/src/SparseCore/SparseDot.h b/ThirdParty/eigen3/Eigen/src/SparseCore/SparseDot.h
index 5c4a593dc01..db39c9aecc7 100644
--- a/ThirdParty/eigen3/Eigen/src/SparseCore/SparseDot.h
+++ b/ThirdParty/eigen3/Eigen/src/SparseCore/SparseDot.h
@@ -30,7 +30,7 @@ SparseMatrixBase<Derived>::dot(const MatrixBase<OtherDerived>& other) const
   Scalar res(0);
   while (i)
   {
-    res += internal::conj(i.value()) * other.coeff(i.index());
+    res += numext::conj(i.value()) * other.coeff(i.index());
     ++i;
   }
   return res;
@@ -54,8 +54,8 @@ SparseMatrixBase<Derived>::dot(const SparseMatrixBase<OtherDerived>& other) cons
   typedef typename internal::remove_all<Nested>::type  NestedCleaned;
   typedef typename internal::remove_all<OtherNested>::type  OtherNestedCleaned;
 
-  const Nested nthis(derived());
-  const OtherNested nother(other.derived());
+  Nested nthis(derived());
+  OtherNested nother(other.derived());
 
   typename NestedCleaned::InnerIterator i(nthis,0);
   typename OtherNestedCleaned::InnerIterator j(nother,0);
@@ -64,7 +64,7 @@ SparseMatrixBase<Derived>::dot(const SparseMatrixBase<OtherDerived>& other) cons
   {
     if (i.index()==j.index())
     {
-      res += internal::conj(i.value()) * j.value();
+      res += numext::conj(i.value()) * j.value();
       ++i; ++j;
     }
     else if (i.index()<j.index())
@@ -79,16 +79,23 @@ template<typename Derived>
 inline typename NumTraits<typename internal::traits<Derived>::Scalar>::Real
 SparseMatrixBase<Derived>::squaredNorm() const
 {
-  return internal::real((*this).cwiseAbs2().sum());
+  return numext::real((*this).cwiseAbs2().sum());
 }
 
 template<typename Derived>
 inline typename NumTraits<typename internal::traits<Derived>::Scalar>::Real
 SparseMatrixBase<Derived>::norm() const
 {
-  return internal::sqrt(squaredNorm());
+  using std::sqrt;
+  return sqrt(squaredNorm());
 }
 
+template<typename Derived>
+inline typename NumTraits<typename internal::traits<Derived>::Scalar>::Real
+SparseMatrixBase<Derived>::blueNorm() const
+{
+  return internal::blueNorm_impl(*this);
+}
 } // end namespace Eigen
 
 #endif // EIGEN_SPARSE_DOT_H
diff --git a/ThirdParty/eigen3/Eigen/src/SparseCore/SparseMatrix.h b/ThirdParty/eigen3/Eigen/src/SparseCore/SparseMatrix.h
index fc3749b5fd2..adceafe18d2 100644
--- a/ThirdParty/eigen3/Eigen/src/SparseCore/SparseMatrix.h
+++ b/ThirdParty/eigen3/Eigen/src/SparseCore/SparseMatrix.h
@@ -31,7 +31,7 @@ namespace Eigen {
   *
   * \tparam _Scalar the scalar type, i.e. the type of the coefficients
   * \tparam _Options Union of bit flags controlling the storage scheme. Currently the only possibility
-  *                 is RowMajor. The default is 0 which means column-major.
+  *                 is ColMajor or RowMajor. The default is 0 which means column-major.
   * \tparam _Index the type of the indices. It has to be a \b signed type (e.g., short, int, std::ptrdiff_t). Default is \c int.
   *
   * This class can be extended with the help of the plugin mechanism described on the page
@@ -170,6 +170,8 @@ class SparseMatrix
       * This function returns Scalar(0) if the element is an explicit \em zero */
     inline Scalar coeff(Index row, Index col) const
     {
+      eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());
+      
       const Index outer = IsRowMajor ? row : col;
       const Index inner = IsRowMajor ? col : row;
       Index end = m_innerNonZeros ? m_outerIndex[outer] + m_innerNonZeros[outer] : m_outerIndex[outer+1];
@@ -186,6 +188,8 @@ class SparseMatrix
       */
     inline Scalar& coeffRef(Index row, Index col)
     {
+      eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());
+      
       const Index outer = IsRowMajor ? row : col;
       const Index inner = IsRowMajor ? col : row;
 
@@ -213,8 +217,10 @@ class SparseMatrix
       * inserted in increasing inner index order, and in O(nnz_j) for a random insertion.
       *
       */
-    EIGEN_DONT_INLINE Scalar& insert(Index row, Index col)
+    Scalar& insert(Index row, Index col)
     {
+      eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());
+      
       if(isCompressed())
       {
         reserve(VectorXi::Constant(outerSize(), 2));
@@ -281,12 +287,12 @@ class SparseMatrix
     template<class SizesType>
     inline void reserveInnerVectors(const SizesType& reserveSizes)
     {
-      
       if(isCompressed())
       {
         std::size_t totalReserveSize = 0;
         // turn the matrix into non-compressed mode
-        m_innerNonZeros = new Index[m_outerSize];
+        m_innerNonZeros = static_cast<Index*>(std::malloc(m_outerSize * sizeof(Index)));
+        if (!m_innerNonZeros) internal::throw_std_bad_alloc();
         
         // temporarily use m_innerSizes to hold the new starting points.
         Index* newOuterIndex = m_innerNonZeros;
@@ -299,11 +305,11 @@ class SparseMatrix
           totalReserveSize += reserveSizes[j];
         }
         m_data.reserve(totalReserveSize);
-        std::ptrdiff_t previousOuterIndex = m_outerIndex[m_outerSize];
-        for(std::ptrdiff_t j=m_outerSize-1; j>=0; --j)
+        Index previousOuterIndex = m_outerIndex[m_outerSize];
+        for(Index j=m_outerSize-1; j>=0; --j)
         {
-          ptrdiff_t innerNNZ = previousOuterIndex - m_outerIndex[j];
-          for(std::ptrdiff_t i=innerNNZ-1; i>=0; --i)
+          Index innerNNZ = previousOuterIndex - m_outerIndex[j];
+          for(Index i=innerNNZ-1; i>=0; --i)
           {
             m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i);
             m_data.value(newOuterIndex[j]+i) = m_data.value(m_outerIndex[j]+i);
@@ -318,25 +324,27 @@ class SparseMatrix
       }
       else
       {
-        Index* newOuterIndex = new Index[m_outerSize+1];
+        Index* newOuterIndex = static_cast<Index*>(std::malloc((m_outerSize+1)*sizeof(Index)));
+        if (!newOuterIndex) internal::throw_std_bad_alloc();
+        
         Index count = 0;
         for(Index j=0; j<m_outerSize; ++j)
         {
           newOuterIndex[j] = count;
           Index alreadyReserved = (m_outerIndex[j+1]-m_outerIndex[j]) - m_innerNonZeros[j];
-          Index toReserve = std::max<std::ptrdiff_t>(reserveSizes[j], alreadyReserved);
+          Index toReserve = std::max<Index>(reserveSizes[j], alreadyReserved);
           count += toReserve + m_innerNonZeros[j];
         }
         newOuterIndex[m_outerSize] = count;
         
         m_data.resize(count);
-        for(ptrdiff_t j=m_outerSize-1; j>=0; --j)
+        for(Index j=m_outerSize-1; j>=0; --j)
         {
-          std::ptrdiff_t offset = newOuterIndex[j] - m_outerIndex[j];
+          Index offset = newOuterIndex[j] - m_outerIndex[j];
           if(offset>0)
           {
-            std::ptrdiff_t innerNNZ = m_innerNonZeros[j];
-            for(std::ptrdiff_t i=innerNNZ-1; i>=0; --i)
+            Index innerNNZ = m_innerNonZeros[j];
+            for(Index i=innerNNZ-1; i>=0; --i)
             {
               m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i);
               m_data.value(newOuterIndex[j]+i) = m_data.value(m_outerIndex[j]+i);
@@ -345,7 +353,7 @@ class SparseMatrix
         }
         
         std::swap(m_outerIndex, newOuterIndex);
-        delete[] newOuterIndex;
+        std::free(newOuterIndex);
       }
       
     }
@@ -431,7 +439,7 @@ class SparseMatrix
     
     /** \internal
       * same as insert(Index,Index) except that the indices are given relative to the storage order */
-    EIGEN_DONT_INLINE Scalar& insertByOuterInner(Index j, Index i)
+    Scalar& insertByOuterInner(Index j, Index i)
     {
       return insert(IsRowMajor ? j : i, IsRowMajor ? i : j);
     }
@@ -448,7 +456,7 @@ class SparseMatrix
       for(Index j=1; j<m_outerSize; ++j)
       {
         Index nextOldStart = m_outerIndex[j+1];
-        std::ptrdiff_t offset = oldStart - m_outerIndex[j];
+        Index offset = oldStart - m_outerIndex[j];
         if(offset>0)
         {
           for(Index k=0; k<m_innerNonZeros[j]; ++k)
@@ -460,14 +468,26 @@ class SparseMatrix
         m_outerIndex[j+1] = m_outerIndex[j] + m_innerNonZeros[j];
         oldStart = nextOldStart;
       }
-      delete[] m_innerNonZeros;
+      std::free(m_innerNonZeros);
       m_innerNonZeros = 0;
       m_data.resize(m_outerIndex[m_outerSize]);
       m_data.squeeze();
     }
 
+    /** Turns the matrix into the uncompressed mode */
+    void uncompress()
+    {
+      if(m_innerNonZeros != 0)
+        return; 
+      m_innerNonZeros = static_cast<Index*>(std::malloc(m_outerSize * sizeof(Index)));
+      for (int i = 0; i < m_outerSize; i++)
+      {
+        m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i]; 
+      }
+    }
+    
     /** Suppresses all nonzeros which are \b much \b smaller \b than \a reference under the tolerence \a epsilon */
-    void prune(Scalar reference, RealScalar epsilon = NumTraits<RealScalar>::dummy_precision())
+    void prune(const Scalar& reference, const RealScalar& epsilon = NumTraits<RealScalar>::dummy_precision())
     {
       prune(default_prunning_func(reference,epsilon));
     }
@@ -506,6 +526,70 @@ class SparseMatrix
       m_data.resize(k,0);
     }
 
+    /** Resizes the matrix to a \a rows x \a cols matrix leaving old values untouched.
+      * \sa resizeNonZeros(Index), reserve(), setZero()
+      */
+    void conservativeResize(Index rows, Index cols) 
+    {
+      // No change
+      if (this->rows() == rows && this->cols() == cols) return;
+      
+      // If one dimension is null, then there is nothing to be preserved
+      if(rows==0 || cols==0) return resize(rows,cols);
+
+      Index innerChange = IsRowMajor ? cols - this->cols() : rows - this->rows();
+      Index outerChange = IsRowMajor ? rows - this->rows() : cols - this->cols();
+      Index newInnerSize = IsRowMajor ? cols : rows;
+
+      // Deals with inner non zeros
+      if (m_innerNonZeros)
+      {
+        // Resize m_innerNonZeros
+        Index *newInnerNonZeros = static_cast<Index*>(std::realloc(m_innerNonZeros, (m_outerSize + outerChange) * sizeof(Index)));
+        if (!newInnerNonZeros) internal::throw_std_bad_alloc();
+        m_innerNonZeros = newInnerNonZeros;
+        
+        for(Index i=m_outerSize; i<m_outerSize+outerChange; i++)          
+          m_innerNonZeros[i] = 0;
+      } 
+      else if (innerChange < 0) 
+      {
+        // Inner size decreased: allocate a new m_innerNonZeros
+        m_innerNonZeros = static_cast<Index*>(std::malloc((m_outerSize+outerChange+1) * sizeof(Index)));
+        if (!m_innerNonZeros) internal::throw_std_bad_alloc();
+        for(Index i = 0; i < m_outerSize; i++)
+          m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i];
+      }
+      
+      // Change the m_innerNonZeros in case of a decrease of inner size
+      if (m_innerNonZeros && innerChange < 0)
+      {
+        for(Index i = 0; i < m_outerSize + (std::min)(outerChange, Index(0)); i++)
+        {
+          Index &n = m_innerNonZeros[i];
+          Index start = m_outerIndex[i];
+          while (n > 0 && m_data.index(start+n-1) >= newInnerSize) --n; 
+        }
+      }
+      
+      m_innerSize = newInnerSize;
+
+      // Re-allocate outer index structure if necessary
+      if (outerChange == 0)
+        return;
+          
+      Index *newOuterIndex = static_cast<Index*>(std::realloc(m_outerIndex, (m_outerSize + outerChange + 1) * sizeof(Index)));
+      if (!newOuterIndex) internal::throw_std_bad_alloc();
+      m_outerIndex = newOuterIndex;
+      if (outerChange > 0)
+      {
+        Index last = m_outerSize == 0 ? 0 : m_outerIndex[m_outerSize];
+        for(Index i=m_outerSize; i<m_outerSize+outerChange+1; i++)          
+          m_outerIndex[i] = last; 
+      }
+      m_outerSize += outerChange;
+    }
+    
     /** Resizes the matrix to a \a rows x \a cols matrix and initializes it to zero.
       * \sa resizeNonZeros(Index), reserve(), setZero()
       */
@@ -516,13 +600,15 @@ class SparseMatrix
       m_data.clear();
       if (m_outerSize != outerSize || m_outerSize==0)
       {
-        delete[] m_outerIndex;
-        m_outerIndex = new Index [outerSize+1];
+        std::free(m_outerIndex);
+        m_outerIndex = static_cast<Index*>(std::malloc((outerSize + 1) * sizeof(Index)));
+        if (!m_outerIndex) internal::throw_std_bad_alloc();
+        
         m_outerSize = outerSize;
       }
       if(m_innerNonZeros)
       {
-        delete[] m_innerNonZeros;
+        std::free(m_innerNonZeros);
         m_innerNonZeros = 0;
       }
       memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(Index));
@@ -560,9 +646,20 @@ class SparseMatrix
     inline SparseMatrix(const SparseMatrixBase<OtherDerived>& other)
       : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
     {
+      EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value),
+        YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
       check_template_parameters();
       *this = other.derived();
     }
+    
+    /** Constructs a sparse matrix from the sparse selfadjoint view \a other */
+    template<typename OtherDerived, unsigned int UpLo>
+    inline SparseMatrix(const SparseSelfAdjointView<OtherDerived, UpLo>& other)
+      : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
+    {
+      check_template_parameters();
+      *this = other;
+    }
 
     /** Copy constructor (it performs a deep copy) */
     inline SparseMatrix(const SparseMatrix& other)
@@ -594,13 +691,22 @@ class SparseMatrix
       m_data.swap(other.m_data);
     }
 
+    /** Sets *this to the identity matrix */
+    inline void setIdentity()
+    {
+      eigen_assert(rows() == cols() && "ONLY FOR SQUARED MATRICES");
+      this->m_data.resize(rows());
+      Eigen::Map<Matrix<Index, Dynamic, 1> >(&this->m_data.index(0), rows()).setLinSpaced(0, rows()-1);
+      Eigen::Map<Matrix<Scalar, Dynamic, 1> >(&this->m_data.value(0), rows()).setOnes();
+      Eigen::Map<Matrix<Index, Dynamic, 1> >(this->m_outerIndex, rows()+1).setLinSpaced(0, rows());
+    }
     inline SparseMatrix& operator=(const SparseMatrix& other)
     {
       if (other.isRValue())
       {
         swap(other.const_cast_derived());
       }
-      else
+      else if(this!=&other)
       {
         initAssignment(other);
         if(other.isCompressed())
@@ -634,62 +740,7 @@ class SparseMatrix
     #endif
 
     template<typename OtherDerived>
-    EIGEN_DONT_INLINE SparseMatrix& operator=(const SparseMatrixBase<OtherDerived>& other)
-    {
-      const bool needToTranspose = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit);
-      if (needToTranspose)
-      {
-        // two passes algorithm:
-        //  1 - compute the number of coeffs per dest inner vector
-        //  2 - do the actual copy/eval
-        // Since each coeff of the rhs has to be evaluated twice, let's evaluate it if needed
-        typedef typename internal::nested<OtherDerived,2>::type OtherCopy;
-        typedef typename internal::remove_all<OtherCopy>::type _OtherCopy;
-        OtherCopy otherCopy(other.derived());
-
-        SparseMatrix dest(other.rows(),other.cols());
-        Eigen::Map<Matrix<Index, Dynamic, 1> > (dest.m_outerIndex,dest.outerSize()).setZero();
-
-        // pass 1
-        // FIXME the above copy could be merged with that pass
-        for (Index j=0; j<otherCopy.outerSize(); ++j)
-          for (typename _OtherCopy::InnerIterator it(otherCopy, j); it; ++it)
-            ++dest.m_outerIndex[it.index()];
-
-        // prefix sum
-        Index count = 0;
-        VectorXi positions(dest.outerSize());
-        for (Index j=0; j<dest.outerSize(); ++j)
-        {
-          Index tmp = dest.m_outerIndex[j];
-          dest.m_outerIndex[j] = count;
-          positions[j] = count;
-          count += tmp;
-        }
-        dest.m_outerIndex[dest.outerSize()] = count;
-        // alloc
-        dest.m_data.resize(count);
-        // pass 2
-        for (Index j=0; j<otherCopy.outerSize(); ++j)
-        {
-          for (typename _OtherCopy::InnerIterator it(otherCopy, j); it; ++it)
-          {
-            Index pos = positions[it.index()]++;
-            dest.m_data.index(pos) = j;
-            dest.m_data.value(pos) = it.value();
-          }
-        }
-        this->swap(dest);
-        return *this;
-      }
-      else
-      {
-        if(other.isRValue())
-          initAssignment(other.derived());
-        // there is no special optimization
-        return Base::operator=(other.derived());
-      }
-    }
+    EIGEN_DONT_INLINE SparseMatrix& operator=(const SparseMatrixBase<OtherDerived>& other);
 
     friend std::ostream & operator << (std::ostream & s, const SparseMatrix& m)
     {
@@ -731,8 +782,8 @@ class SparseMatrix
     /** Destructor */
     inline ~SparseMatrix()
     {
-      delete[] m_outerIndex;
-      delete[] m_innerNonZeros;
+      std::free(m_outerIndex);
+      std::free(m_innerNonZeros);
     }
 
 #ifndef EIGEN_PARSED_BY_DOXYGEN
@@ -752,118 +803,14 @@ protected:
       resize(other.rows(), other.cols());
       if(m_innerNonZeros)
       {
-        delete[] m_innerNonZeros;
+        std::free(m_innerNonZeros);
         m_innerNonZeros = 0;
       }
     }
 
     /** \internal
       * \sa insert(Index,Index) */
-    EIGEN_DONT_INLINE Scalar& insertCompressed(Index row, Index col)
-    {
-      eigen_assert(isCompressed());
-
-      const Index outer = IsRowMajor ? row : col;
-      const Index inner = IsRowMajor ? col : row;
-
-      Index previousOuter = outer;
-      if (m_outerIndex[outer+1]==0)
-      {
-        // we start a new inner vector
-        while (previousOuter>=0 && m_outerIndex[previousOuter]==0)
-        {
-          m_outerIndex[previousOuter] = static_cast<Index>(m_data.size());
-          --previousOuter;
-        }
-        m_outerIndex[outer+1] = m_outerIndex[outer];
-      }
-
-      // here we have to handle the tricky case where the outerIndex array
-      // starts with: [ 0 0 0 0 0 1 ...] and we are inserted in, e.g.,
-      // the 2nd inner vector...
-      bool isLastVec = (!(previousOuter==-1 && m_data.size()!=0))
-                    && (size_t(m_outerIndex[outer+1]) == m_data.size());
-
-      size_t startId = m_outerIndex[outer];
-      // FIXME let's make sure sizeof(long int) == sizeof(size_t)
-      size_t p = m_outerIndex[outer+1];
-      ++m_outerIndex[outer+1];
-
-      float reallocRatio = 1;
-      if (m_data.allocatedSize()<=m_data.size())
-      {
-        // if there is no preallocated memory, let's reserve a minimum of 32 elements
-        if (m_data.size()==0)
-        {
-          m_data.reserve(32);
-        }
-        else
-        {
-          // we need to reallocate the data, to reduce multiple reallocations
-          // we use a smart resize algorithm based on the current filling ratio
-          // in addition, we use float to avoid integers overflows
-          float nnzEstimate = float(m_outerIndex[outer])*float(m_outerSize)/float(outer+1);
-          reallocRatio = (nnzEstimate-float(m_data.size()))/float(m_data.size());
-          // furthermore we bound the realloc ratio to:
-          //   1) reduce multiple minor realloc when the matrix is almost filled
-          //   2) avoid to allocate too much memory when the matrix is almost empty
-          reallocRatio = (std::min)((std::max)(reallocRatio,1.5f),8.f);
-        }
-      }
-      m_data.resize(m_data.size()+1,reallocRatio);
-
-      if (!isLastVec)
-      {
-        if (previousOuter==-1)
-        {
-          // oops wrong guess.
-          // let's correct the outer offsets
-          for (Index k=0; k<=(outer+1); ++k)
-            m_outerIndex[k] = 0;
-          Index k=outer+1;
-          while(m_outerIndex[k]==0)
-            m_outerIndex[k++] = 1;
-          while (k<=m_outerSize && m_outerIndex[k]!=0)
-            m_outerIndex[k++]++;
-          p = 0;
-          --k;
-          k = m_outerIndex[k]-1;
-          while (k>0)
-          {
-            m_data.index(k) = m_data.index(k-1);
-            m_data.value(k) = m_data.value(k-1);
-            k--;
-          }
-        }
-        else
-        {
-          // we are not inserting into the last inner vec
-          // update outer indices:
-          Index j = outer+2;
-          while (j<=m_outerSize && m_outerIndex[j]!=0)
-            m_outerIndex[j++]++;
-          --j;
-          // shift data of last vecs:
-          Index k = m_outerIndex[j]-1;
-          while (k>=Index(p))
-          {
-            m_data.index(k) = m_data.index(k-1);
-            m_data.value(k) = m_data.value(k-1);
-            k--;
-          }
-        }
-      }
-
-      while ( (p > startId) && (m_data.index(p-1) > inner) )
-      {
-        m_data.index(p) = m_data.index(p-1);
-        m_data.value(p) = m_data.value(p-1);
-        --p;
-      }
-
-      m_data.index(p) = inner;
-      return (m_data.value(p) = 0);
-    }
+    EIGEN_DONT_INLINE Scalar& insertCompressed(Index row, Index col);
 
     /** \internal
       * A vector object that is equal to 0 everywhere but v at the position i */
@@ -882,41 +829,12 @@ protected:
 
     /** \internal
       * \sa insert(Index,Index) */
-    EIGEN_DONT_INLINE Scalar& insertUncompressed(Index row, Index col)
-    {
-      eigen_assert(!isCompressed());
-
-      const Index outer = IsRowMajor ? row : col;
-      const Index inner = IsRowMajor ? col : row;
-
-      std::ptrdiff_t room = m_outerIndex[outer+1] - m_outerIndex[outer];
-      std::ptrdiff_t innerNNZ = m_innerNonZeros[outer];
-      if(innerNNZ>=room)
-      {
-        // this inner vector is full, we need to reallocate the whole buffer :(
-        reserve(SingletonVector(outer,std::max<std::ptrdiff_t>(2,innerNNZ)));
-      }
-
-      Index startId = m_outerIndex[outer];
-      Index p = startId + m_innerNonZeros[outer];
-      while ( (p > startId) && (m_data.index(p-1) > inner) )
-      {
-        m_data.index(p) = m_data.index(p-1);
-        m_data.value(p) = m_data.value(p-1);
-        --p;
-      }
-      eigen_assert((p<=startId || m_data.index(p-1)!=inner) && "you cannot insert an element that already exist, you must call coeffRef to this end");
-
-      m_innerNonZeros[outer]++;
-
-      m_data.index(p) = inner;
-      return (m_data.value(p) = 0);
-    }
+    EIGEN_DONT_INLINE Scalar& insertUncompressed(Index row, Index col);
 
 public:
     /** \internal
       * \sa insert(Index,Index) */
-    inline Scalar& insertBackUncompressed(Index row, Index col)
+    EIGEN_STRONG_INLINE Scalar& insertBackUncompressed(Index row, Index col)
     {
       const Index outer = IsRowMajor ? row : col;
       const Index inner = IsRowMajor ? col : row;
@@ -924,8 +842,7 @@ public:
       eigen_assert(!isCompressed());
       eigen_assert(m_innerNonZeros[outer]<=(m_outerIndex[outer+1] - m_outerIndex[outer]));
 
-      Index p = m_outerIndex[outer] + m_innerNonZeros[outer];
-      m_innerNonZeros[outer]++;
+      Index p = m_outerIndex[outer] + m_innerNonZeros[outer]++;
       m_data.index(p) = inner;
       return (m_data.value(p) = 0);
     }
@@ -934,10 +851,11 @@ private:
   static void check_template_parameters()
   {
     EIGEN_STATIC_ASSERT(NumTraits<Index>::IsSigned,THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE);
+    EIGEN_STATIC_ASSERT((Options&(ColMajor|RowMajor))==Options,INVALID_MATRIX_TEMPLATE_PARAMETERS);
   }
 
   struct default_prunning_func {
-    default_prunning_func(Scalar ref, RealScalar eps) : reference(ref), epsilon(eps) {}
+    default_prunning_func(const Scalar& ref, const RealScalar& eps) : reference(ref), epsilon(eps) {}
     inline bool operator() (const Index&, const Index&, const Scalar& value) const
     {
       return !internal::isMuchSmallerThan(value, reference, epsilon);
@@ -1021,22 +939,27 @@ void set_from_triplets(const InputIterator& begin, const InputIterator& end, Spa
   EIGEN_UNUSED_VARIABLE(Options);
   enum { IsRowMajor = SparseMatrixType::IsRowMajor };
   typedef typename SparseMatrixType::Scalar Scalar;
-  typedef typename SparseMatrixType::Index Index;
   SparseMatrix<Scalar,IsRowMajor?ColMajor:RowMajor> trMat(mat.rows(),mat.cols());
 
-  // pass 1: count the nnz per inner-vector
-  VectorXi wi(trMat.outerSize());
-  wi.setZero();
-  for(InputIterator it(begin); it!=end; ++it)
-    wi(IsRowMajor ? it->col() : it->row())++;
+  if(begin<end)
+  {
+    // pass 1: count the nnz per inner-vector
+    VectorXi wi(trMat.outerSize());
+    wi.setZero();
+    for(InputIterator it(begin); it!=end; ++it)
+    {
+      eigen_assert(it->row()>=0 && it->row()<mat.rows() && it->col()>=0 && it->col()<mat.cols());
+      wi(IsRowMajor ? it->col() : it->row())++;
+    }
 
-  // pass 2: insert all the elements into trMat
-  trMat.reserve(wi);
-  for(InputIterator it(begin); it!=end; ++it)
-    trMat.insertBackUncompressed(it->row(),it->col()) = it->value();
+    // pass 2: insert all the elements into trMat
+    trMat.reserve(wi);
+    for(InputIterator it(begin); it!=end; ++it)
+      trMat.insertBackUncompressed(it->row(),it->col()) = it->value();
 
-  // pass 3:
-  trMat.sumupDuplicates();
+    // pass 3:
+    trMat.sumupDuplicates();
+  }
 
   // pass 4: transposed copy -> implicit sorting
   mat = trMat;
@@ -1045,7 +968,7 @@ void set_from_triplets(const InputIterator& begin, const InputIterator& end, Spa
 }
 
 
-/** Fill the matrix \c *this with the list of \em triplets defined by the iterator range \a begin - \b.
+/** Fill the matrix \c *this with the list of \em triplets defined by the iterator range \a begin - \a end.
   *
   * A \em triplet is a tuple (i,j,value) defining a non-zero element.
   * The input list of triplets does not have to be sorted, and can contains duplicated elements.
@@ -1124,11 +1047,212 @@ void SparseMatrix<Scalar,_Options,_Index>::sumupDuplicates()
   m_outerIndex[m_outerSize] = count;
 
   // turn the matrix into compressed form
-  delete[] m_innerNonZeros;
+  std::free(m_innerNonZeros);
   m_innerNonZeros = 0;
   m_data.resize(m_outerIndex[m_outerSize]);
 }
 
+template<typename Scalar, int _Options, typename _Index>
+template<typename OtherDerived>
+EIGEN_DONT_INLINE SparseMatrix<Scalar,_Options,_Index>& SparseMatrix<Scalar,_Options,_Index>::operator=(const SparseMatrixBase<OtherDerived>& other)
+{
+  EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value),
+        YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
+  
+  const bool needToTranspose = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit);
+  if (needToTranspose)
+  {
+    // two passes algorithm:
+    //  1 - compute the number of coeffs per dest inner vector
+    //  2 - do the actual copy/eval
+    // Since each coeff of the rhs has to be evaluated twice, let's evaluate it if needed
+    typedef typename internal::nested<OtherDerived,2>::type OtherCopy;
+    typedef typename internal::remove_all<OtherCopy>::type _OtherCopy;
+    OtherCopy otherCopy(other.derived());
+
+    SparseMatrix dest(other.rows(),other.cols());
+    Eigen::Map<Matrix<Index, Dynamic, 1> > (dest.m_outerIndex,dest.outerSize()).setZero();
+
+    // pass 1
+    // FIXME the above copy could be merged with that pass
+    for (Index j=0; j<otherCopy.outerSize(); ++j)
+      for (typename _OtherCopy::InnerIterator it(otherCopy, j); it; ++it)
+        ++dest.m_outerIndex[it.index()];
+
+    // prefix sum
+    Index count = 0;
+    VectorXi positions(dest.outerSize());
+    for (Index j=0; j<dest.outerSize(); ++j)
+    {
+      Index tmp = dest.m_outerIndex[j];
+      dest.m_outerIndex[j] = count;
+      positions[j] = count;
+      count += tmp;
+    }
+    dest.m_outerIndex[dest.outerSize()] = count;
+    // alloc
+    dest.m_data.resize(count);
+    // pass 2
+    for (Index j=0; j<otherCopy.outerSize(); ++j)
+    {
+      for (typename _OtherCopy::InnerIterator it(otherCopy, j); it; ++it)
+      {
+        Index pos = positions[it.index()]++;
+        dest.m_data.index(pos) = j;
+        dest.m_data.value(pos) = it.value();
+      }
+    }
+    this->swap(dest);
+    return *this;
+  }
+  else
+  {
+    if(other.isRValue())
+      initAssignment(other.derived());
+    // there is no special optimization
+    return Base::operator=(other.derived());
+  }
+}
+
+template<typename _Scalar, int _Options, typename _Index>
+EIGEN_DONT_INLINE typename SparseMatrix<_Scalar,_Options,_Index>::Scalar& SparseMatrix<_Scalar,_Options,_Index>::insertUncompressed(Index row, Index col)
+{
+  eigen_assert(!isCompressed());
+
+  const Index outer = IsRowMajor ? row : col;
+  const Index inner = IsRowMajor ? col : row;
+
+  Index room = m_outerIndex[outer+1] - m_outerIndex[outer];
+  Index innerNNZ = m_innerNonZeros[outer];
+  if(innerNNZ>=room)
+  {
+    // this inner vector is full, we need to reallocate the whole buffer :(
+    reserve(SingletonVector(outer,std::max<Index>(2,innerNNZ)));
+  }
+
+  Index startId = m_outerIndex[outer];
+  Index p = startId + m_innerNonZeros[outer];
+  while ( (p > startId) && (m_data.index(p-1) > inner) )
+  {
+    m_data.index(p) = m_data.index(p-1);
+    m_data.value(p) = m_data.value(p-1);
+    --p;
+  }
+  eigen_assert((p<=startId || m_data.index(p-1)!=inner) && "you cannot insert an element that already exist, you must call coeffRef to this end");
+
+  m_innerNonZeros[outer]++;
+
+  m_data.index(p) = inner;
+  return (m_data.value(p) = 0);
+}
+
+template<typename _Scalar, int _Options, typename _Index>
+EIGEN_DONT_INLINE typename SparseMatrix<_Scalar,_Options,_Index>::Scalar& SparseMatrix<_Scalar,_Options,_Index>::insertCompressed(Index row, Index col)
+{
+  eigen_assert(isCompressed());
+
+  const Index outer = IsRowMajor ? row : col;
+  const Index inner = IsRowMajor ? col : row;
+
+  Index previousOuter = outer;
+  if (m_outerIndex[outer+1]==0)
+  {
+    // we start a new inner vector
+    while (previousOuter>=0 && m_outerIndex[previousOuter]==0)
+    {
+      m_outerIndex[previousOuter] = static_cast<Index>(m_data.size());
+      --previousOuter;
+    }
+    m_outerIndex[outer+1] = m_outerIndex[outer];
+  }
+
+  // here we have to handle the tricky case where the outerIndex array
+  // starts with: [ 0 0 0 0 0 1 ...] and we are inserted in, e.g.,
+  // the 2nd inner vector...
+  bool isLastVec = (!(previousOuter==-1 && m_data.size()!=0))
+                && (size_t(m_outerIndex[outer+1]) == m_data.size());
+
+  size_t startId = m_outerIndex[outer];
+  // FIXME let's make sure sizeof(long int) == sizeof(size_t)
+  size_t p = m_outerIndex[outer+1];
+  ++m_outerIndex[outer+1];
+
+  float reallocRatio = 1;
+  if (m_data.allocatedSize()<=m_data.size())
+  {
+    // if there is no preallocated memory, let's reserve a minimum of 32 elements
+    if (m_data.size()==0)
+    {
+      m_data.reserve(32);
+    }
+    else
+    {
+      // we need to reallocate the data, to reduce multiple reallocations
+      // we use a smart resize algorithm based on the current filling ratio
+      // in addition, we use float to avoid integers overflows
+      float nnzEstimate = float(m_outerIndex[outer])*float(m_outerSize)/float(outer+1);
+      reallocRatio = (nnzEstimate-float(m_data.size()))/float(m_data.size());
+      // furthermore we bound the realloc ratio to:
+      //   1) reduce multiple minor realloc when the matrix is almost filled
+      //   2) avoid to allocate too much memory when the matrix is almost empty
+      reallocRatio = (std::min)((std::max)(reallocRatio,1.5f),8.f);
+    }
+  }
+  m_data.resize(m_data.size()+1,reallocRatio);
+
+  if (!isLastVec)
+  {
+    if (previousOuter==-1)
+    {
+      // oops wrong guess.
+      // let's correct the outer offsets
+      for (Index k=0; k<=(outer+1); ++k)
+        m_outerIndex[k] = 0;
+      Index k=outer+1;
+      while(m_outerIndex[k]==0)
+        m_outerIndex[k++] = 1;
+      while (k<=m_outerSize && m_outerIndex[k]!=0)
+        m_outerIndex[k++]++;
+      p = 0;
+      --k;
+      k = m_outerIndex[k]-1;
+      while (k>0)
+      {
+        m_data.index(k) = m_data.index(k-1);
+        m_data.value(k) = m_data.value(k-1);
+        k--;
+      }
+    }
+    else
+    {
+      // we are not inserting into the last inner vec
+      // update outer indices:
+      Index j = outer+2;
+      while (j<=m_outerSize && m_outerIndex[j]!=0)
+        m_outerIndex[j++]++;
+      --j;
+      // shift data of last vecs:
+      Index k = m_outerIndex[j]-1;
+      while (k>=Index(p))
+      {
+        m_data.index(k) = m_data.index(k-1);
+        m_data.value(k) = m_data.value(k-1);
+        k--;
+      }
+    }
+  }
+
+  while ( (p > startId) && (m_data.index(p-1) > inner) )
+  {
+    m_data.index(p) = m_data.index(p-1);
+    m_data.value(p) = m_data.value(p-1);
+    --p;
+  }
+
+  m_data.index(p) = inner;
+  return (m_data.value(p) = 0);
+}
+
 } // end namespace Eigen
 
 #endif // EIGEN_SPARSEMATRIX_H
diff --git a/ThirdParty/eigen3/Eigen/src/SparseCore/SparseMatrixBase.h b/ThirdParty/eigen3/Eigen/src/SparseCore/SparseMatrixBase.h
index 9a1258097fe..706f699b8c4 100644
--- a/ThirdParty/eigen3/Eigen/src/SparseCore/SparseMatrixBase.h
+++ b/ThirdParty/eigen3/Eigen/src/SparseCore/SparseMatrixBase.h
@@ -89,6 +89,9 @@ template<typename Derived> class SparseMatrixBase : public EigenBase<Derived>
           */
 
       IsRowMajor = Flags&RowMajorBit ? 1 : 0,
+      
+      InnerSizeAtCompileTime = int(IsVectorAtCompileTime) ? int(SizeAtCompileTime)
+                             : int(IsRowMajor) ? int(ColsAtCompileTime) : int(RowsAtCompileTime),
 
       #ifndef EIGEN_PARSED_BY_DOXYGEN
       _HasDirectAccess = (int(Flags)&DirectAccessBit) ? 1 : 0 // workaround sunCC
@@ -102,7 +105,7 @@ template<typename Derived> class SparseMatrixBase : public EigenBase<Derived>
                      >::type AdjointReturnType;
 
 
-    typedef SparseMatrix<Scalar, Flags&RowMajorBit ? RowMajor : ColMajor> PlainObject;
+    typedef SparseMatrix<Scalar, Flags&RowMajorBit ? RowMajor : ColMajor, Index> PlainObject;
 
 
 #ifndef EIGEN_PARSED_BY_DOXYGEN
@@ -136,13 +139,13 @@ template<typename Derived> class SparseMatrixBase : public EigenBase<Derived>
 #   include "../plugins/CommonCwiseBinaryOps.h"
 #   include "../plugins/MatrixCwiseUnaryOps.h"
 #   include "../plugins/MatrixCwiseBinaryOps.h"
+#   include "../plugins/BlockMethods.h"
 #   ifdef EIGEN_SPARSEMATRIXBASE_PLUGIN
 #     include EIGEN_SPARSEMATRIXBASE_PLUGIN
 #   endif
 #   undef EIGEN_CURRENT_STORAGE_BASE_CLASS
 #undef EIGEN_CURRENT_STORAGE_BASE_CLASS
 
-
     /** \returns the number of rows. \sa cols() */
     inline Index rows() const { return derived().rows(); }
     /** \returns the number of columns. \sa rows() */
@@ -322,8 +325,8 @@ template<typename Derived> class SparseMatrixBase : public EigenBase<Derived>
             typename internal::traits<OtherDerived>::Scalar \
           >::ReturnType \
         >, \
-        Derived, \
-        OtherDerived \
+        const Derived, \
+        const OtherDerived \
       >
 
     template<typename OtherDerived>
@@ -387,55 +390,45 @@ template<typename Derived> class SparseMatrixBase : public EigenBase<Derived>
     template<typename OtherDerived> Scalar dot(const SparseMatrixBase<OtherDerived>& other) const;
     RealScalar squaredNorm() const;
     RealScalar norm()  const;
+    RealScalar blueNorm() const;
 
     Transpose<Derived> transpose() { return derived(); }
     const Transpose<const Derived> transpose() const { return derived(); }
     const AdjointReturnType adjoint() const { return transpose(); }
 
-    // sub-vector
-    SparseInnerVectorSet<Derived,1> row(Index i);
-    const SparseInnerVectorSet<Derived,1> row(Index i) const;
-    SparseInnerVectorSet<Derived,1> col(Index j);
-    const SparseInnerVectorSet<Derived,1> col(Index j) const;
-    SparseInnerVectorSet<Derived,1> innerVector(Index outer);
-    const SparseInnerVectorSet<Derived,1> innerVector(Index outer) const;
-
-    // set of sub-vectors
-    SparseInnerVectorSet<Derived,Dynamic> subrows(Index start, Index size);
-    const SparseInnerVectorSet<Derived,Dynamic> subrows(Index start, Index size) const;
-    SparseInnerVectorSet<Derived,Dynamic> subcols(Index start, Index size);
-    const SparseInnerVectorSet<Derived,Dynamic> subcols(Index start, Index size) const;
-    
-    SparseInnerVectorSet<Derived,Dynamic> middleRows(Index start, Index size);
-    const SparseInnerVectorSet<Derived,Dynamic> middleRows(Index start, Index size) const;
-    SparseInnerVectorSet<Derived,Dynamic> middleCols(Index start, Index size);
-    const SparseInnerVectorSet<Derived,Dynamic> middleCols(Index start, Index size) const;
-    SparseInnerVectorSet<Derived,Dynamic> innerVectors(Index outerStart, Index outerSize);
-    const SparseInnerVectorSet<Derived,Dynamic> innerVectors(Index outerStart, Index outerSize) const;
-
-      /** \internal use operator= */
-      template<typename DenseDerived>
-      void evalTo(MatrixBase<DenseDerived>& dst) const
-      {
-        dst.setZero();
-        for (Index j=0; j<outerSize(); ++j)
-          for (typename Derived::InnerIterator i(derived(),j); i; ++i)
-            dst.coeffRef(i.row(),i.col()) = i.value();
-      }
+    // inner-vector
+    typedef Block<Derived,IsRowMajor?1:Dynamic,IsRowMajor?Dynamic:1,true>       InnerVectorReturnType;
+    typedef Block<const Derived,IsRowMajor?1:Dynamic,IsRowMajor?Dynamic:1,true> ConstInnerVectorReturnType;
+    InnerVectorReturnType innerVector(Index outer);
+    const ConstInnerVectorReturnType innerVector(Index outer) const;
 
-      Matrix<Scalar,RowsAtCompileTime,ColsAtCompileTime> toDense() const
-      {
-        return derived();
-      }
+    // set of inner-vectors
+    Block<Derived,Dynamic,Dynamic,true> innerVectors(Index outerStart, Index outerSize);
+    const Block<const Derived,Dynamic,Dynamic,true> innerVectors(Index outerStart, Index outerSize) const;
+
+    /** \internal use operator= */
+    template<typename DenseDerived>
+    void evalTo(MatrixBase<DenseDerived>& dst) const
+    {
+      dst.setZero();
+      for (Index j=0; j<outerSize(); ++j)
+        for (typename Derived::InnerIterator i(derived(),j); i; ++i)
+          dst.coeffRef(i.row(),i.col()) = i.value();
+    }
+
+    Matrix<Scalar,RowsAtCompileTime,ColsAtCompileTime> toDense() const
+    {
+      return derived();
+    }
 
     template<typename OtherDerived>
     bool isApprox(const SparseMatrixBase<OtherDerived>& other,
-                  RealScalar prec = NumTraits<Scalar>::dummy_precision()) const
+                  const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const
     { return toDense().isApprox(other.toDense(),prec); }
 
     template<typename OtherDerived>
     bool isApprox(const MatrixBase<OtherDerived>& other,
-                  RealScalar prec = NumTraits<Scalar>::dummy_precision()) const
+                  const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const
     { return toDense().isApprox(other,prec); }
 
     /** \returns the matrix or vector obtained by evaluating this expression.
diff --git a/ThirdParty/eigen3/Eigen/src/SparseCore/SparseProduct.h b/ThirdParty/eigen3/Eigen/src/SparseCore/SparseProduct.h
index 6a555b83434..70b6480efee 100644
--- a/ThirdParty/eigen3/Eigen/src/SparseCore/SparseProduct.h
+++ b/ThirdParty/eigen3/Eigen/src/SparseCore/SparseProduct.h
@@ -99,15 +99,16 @@ class SparseSparseProduct : internal::no_assignment_operator,
     }
 
     template<typename Lhs, typename Rhs>
-    EIGEN_STRONG_INLINE SparseSparseProduct(const Lhs& lhs, const Rhs& rhs, RealScalar tolerance)
+    EIGEN_STRONG_INLINE SparseSparseProduct(const Lhs& lhs, const Rhs& rhs, const RealScalar& tolerance)
       : m_lhs(lhs), m_rhs(rhs), m_tolerance(tolerance), m_conservative(false)
     {
       init();
     }
 
-    SparseSparseProduct pruned(Scalar reference = 0, RealScalar epsilon = NumTraits<RealScalar>::dummy_precision()) const
+    SparseSparseProduct pruned(const Scalar& reference = 0, const RealScalar& epsilon = NumTraits<RealScalar>::dummy_precision()) const
     {
-      return SparseSparseProduct(m_lhs,m_rhs,internal::abs(reference)*epsilon);
+      using std::abs;
+      return SparseSparseProduct(m_lhs,m_rhs,abs(reference)*epsilon);
     }
 
     template<typename Dest>
diff --git a/ThirdParty/eigen3/Eigen/src/SparseCore/SparseSelfAdjointView.h b/ThirdParty/eigen3/Eigen/src/SparseCore/SparseSelfAdjointView.h
index 1162c69111e..0eda96bc471 100644
--- a/ThirdParty/eigen3/Eigen/src/SparseCore/SparseSelfAdjointView.h
+++ b/ThirdParty/eigen3/Eigen/src/SparseCore/SparseSelfAdjointView.h
@@ -69,6 +69,30 @@ template<typename MatrixType, unsigned int UpLo> class SparseSelfAdjointView
     const _MatrixTypeNested& matrix() const { return m_matrix; }
     _MatrixTypeNested& matrix() { return m_matrix.const_cast_derived(); }
 
+    /** \returns an expression of the matrix product between a sparse self-adjoint matrix \c *this and a sparse matrix \a rhs.
+      *
+      * Note that there is no algorithmic advantage of performing such a product compared to a general sparse-sparse matrix product.
+      * Indeed, the SparseSelfadjointView operand is first copied into a temporary SparseMatrix before computing the product.
+      */
+    template<typename OtherDerived>
+    SparseSparseProduct<typename OtherDerived::PlainObject, OtherDerived>
+    operator*(const SparseMatrixBase<OtherDerived>& rhs) const
+    {
+      return SparseSparseProduct<typename OtherDerived::PlainObject, OtherDerived>(*this, rhs.derived());
+    }
+
+    /** \returns an expression of the matrix product between a sparse matrix \a lhs and a sparse self-adjoint matrix \a rhs.
+      *
+      * Note that there is no algorithmic advantage of performing such a product compared to a general sparse-sparse matrix product.
+      * Indeed, the SparseSelfadjointView operand is first copied into a temporary SparseMatrix before computing the product.
+      */
+    template<typename OtherDerived> friend
+    SparseSparseProduct<OtherDerived, typename OtherDerived::PlainObject >
+    operator*(const SparseMatrixBase<OtherDerived>& lhs, const SparseSelfAdjointView& rhs)
+    {
+      return SparseSparseProduct<OtherDerived, typename OtherDerived::PlainObject>(lhs.derived(), rhs);
+    }
+    
     /** Efficient sparse self-adjoint matrix times dense vector/matrix product */
     template<typename OtherDerived>
     SparseSelfAdjointTimeDenseProduct<MatrixType,OtherDerived,UpLo>
@@ -94,7 +118,7 @@ template<typename MatrixType, unsigned int UpLo> class SparseSelfAdjointView
       * call this function with u.adjoint().
       */
     template<typename DerivedU>
-    SparseSelfAdjointView& rankUpdate(const SparseMatrixBase<DerivedU>& u, Scalar alpha = Scalar(1));
+    SparseSelfAdjointView& rankUpdate(const SparseMatrixBase<DerivedU>& u, const Scalar& alpha = Scalar(1));
     
     /** \internal triggered by sparse_matrix = SparseSelfadjointView; */
     template<typename DestScalar,int StorageOrder> void evalTo(SparseMatrix<DestScalar,StorageOrder,Index>& _dest) const
@@ -173,7 +197,7 @@ SparseSelfAdjointView<Derived, UpLo> SparseMatrixBase<Derived>::selfadjointView(
 template<typename MatrixType, unsigned int UpLo>
 template<typename DerivedU>
 SparseSelfAdjointView<MatrixType,UpLo>&
-SparseSelfAdjointView<MatrixType,UpLo>::rankUpdate(const SparseMatrixBase<DerivedU>& u, Scalar alpha)
+SparseSelfAdjointView<MatrixType,UpLo>::rankUpdate(const SparseMatrixBase<DerivedU>& u, const Scalar& alpha)
 {
   SparseMatrix<Scalar,MatrixType::Flags&RowMajorBit?RowMajor:ColMajor> tmp = u * u.adjoint();
   if(alpha==Scalar(0))
@@ -207,13 +231,12 @@ class SparseSelfAdjointTimeDenseProduct
     SparseSelfAdjointTimeDenseProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs)
     {}
 
-    template<typename Dest> void scaleAndAddTo(Dest& dest, Scalar alpha) const
+    template<typename Dest> void scaleAndAddTo(Dest& dest, const Scalar& alpha) const
     {
       EIGEN_ONLY_USED_FOR_DEBUG(alpha);
       // TODO use alpha
       eigen_assert(alpha==Scalar(1) && "alpha != 1 is not implemented yet, sorry");
       typedef typename internal::remove_all<Lhs>::type _Lhs;
-      typedef typename internal::remove_all<Rhs>::type _Rhs;
       typedef typename _Lhs::InnerIterator LhsInnerIterator;
       enum {
         LhsIsRowMajor = (_Lhs::Flags&RowMajorBit)==RowMajorBit,
@@ -241,7 +264,7 @@ class SparseSelfAdjointTimeDenseProduct
           Index b = LhsIsRowMajor ? i.index() : j;
           typename Lhs::Scalar v = i.value();
           dest.row(a) += (v) * m_rhs.row(b);
-          dest.row(b) += internal::conj(v) * m_rhs.row(a);
+          dest.row(b) += numext::conj(v) * m_rhs.row(a);
         }
         if (ProcessFirstHalf && i && (i.index()==j))
           dest.row(j) += i.value() * m_rhs.row(j);
@@ -269,7 +292,7 @@ class DenseTimeSparseSelfAdjointProduct
     DenseTimeSparseSelfAdjointProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs)
     {}
 
-    template<typename Dest> void scaleAndAddTo(Dest& /*dest*/, Scalar /*alpha*/) const
+    template<typename Dest> void scaleAndAddTo(Dest& /*dest*/, const Scalar& /*alpha*/) const
     {
       // TODO
     }
@@ -368,7 +391,7 @@ void permute_symm_to_fullsymm(const MatrixType& mat, SparseMatrix<typename Matri
         dest.valuePtr()[k] = it.value();
         k = count[ip]++;
         dest.innerIndexPtr()[k] = jp;
-        dest.valuePtr()[k] = internal::conj(it.value());
+        dest.valuePtr()[k] = numext::conj(it.value());
       }
     }
   }
@@ -429,7 +452,7 @@ void permute_symm_to_symm(const MatrixType& mat, SparseMatrix<typename MatrixTyp
       
       if(!StorageOrderMatch) std::swap(ip,jp);
       if( ((int(DstUpLo)==int(Lower) && ip<jp) || (int(DstUpLo)==int(Upper) && ip>jp)))
-        dest.valuePtr()[k] = conj(it.value());
+        dest.valuePtr()[k] = numext::conj(it.value());
       else
         dest.valuePtr()[k] = it.value();
     }
@@ -462,7 +485,10 @@ class SparseSymmetricPermutationProduct
     template<typename DestScalar, int Options, typename DstIndex>
     void evalTo(SparseMatrix<DestScalar,Options,DstIndex>& _dest) const
     {
-      internal::permute_symm_to_fullsymm<UpLo>(m_matrix,_dest,m_perm.indices().data());
+//       internal::permute_symm_to_fullsymm<UpLo>(m_matrix,_dest,m_perm.indices().data());
+      SparseMatrix<DestScalar,(Options&RowMajor)==RowMajor ? ColMajor : RowMajor, DstIndex> tmp;
+      internal::permute_symm_to_fullsymm<UpLo>(m_matrix,tmp,m_perm.indices().data());
+      _dest = tmp;
     }
     
     template<typename DestType,unsigned int DestUpLo> void evalTo(SparseSelfAdjointView<DestType,DestUpLo>& dest) const
diff --git a/ThirdParty/eigen3/Eigen/src/SparseCore/SparseSparseProductWithPruning.h b/ThirdParty/eigen3/Eigen/src/SparseCore/SparseSparseProductWithPruning.h
index 2438ac573d0..70857c7b6ea 100644
--- a/ThirdParty/eigen3/Eigen/src/SparseCore/SparseSparseProductWithPruning.h
+++ b/ThirdParty/eigen3/Eigen/src/SparseCore/SparseSparseProductWithPruning.h
@@ -17,7 +17,7 @@ namespace internal {
 
 // perform a pseudo in-place sparse * sparse product assuming all matrices are col major
 template<typename Lhs, typename Rhs, typename ResultType>
-static void sparse_sparse_product_with_pruning_impl(const Lhs& lhs, const Rhs& rhs, ResultType& res, typename ResultType::RealScalar tolerance)
+static void sparse_sparse_product_with_pruning_impl(const Lhs& lhs, const Rhs& rhs, ResultType& res, const typename ResultType::RealScalar& tolerance)
 {
   // return sparse_sparse_product_with_pruning_impl2(lhs,rhs,res);
 
@@ -85,7 +85,7 @@ struct sparse_sparse_product_with_pruning_selector<Lhs,Rhs,ResultType,ColMajor,C
   typedef typename traits<typename remove_all<Lhs>::type>::Scalar Scalar;
   typedef typename ResultType::RealScalar RealScalar;
 
-  static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, RealScalar tolerance)
+  static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, const RealScalar& tolerance)
   {
     typename remove_all<ResultType>::type _res(res.rows(), res.cols());
     internal::sparse_sparse_product_with_pruning_impl<Lhs,Rhs,ResultType>(lhs, rhs, _res, tolerance);
@@ -97,7 +97,7 @@ template<typename Lhs, typename Rhs, typename ResultType>
 struct sparse_sparse_product_with_pruning_selector<Lhs,Rhs,ResultType,ColMajor,ColMajor,RowMajor>
 {
   typedef typename ResultType::RealScalar RealScalar;
-  static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, RealScalar tolerance)
+  static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, const RealScalar& tolerance)
   {
     // we need a col-major matrix to hold the result
     typedef SparseMatrix<typename ResultType::Scalar> SparseTemporaryType;
@@ -111,7 +111,7 @@ template<typename Lhs, typename Rhs, typename ResultType>
 struct sparse_sparse_product_with_pruning_selector<Lhs,Rhs,ResultType,RowMajor,RowMajor,RowMajor>
 {
   typedef typename ResultType::RealScalar RealScalar;
-  static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, RealScalar tolerance)
+  static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, const RealScalar& tolerance)
   {
     // let's transpose the product to get a column x column product
     typename remove_all<ResultType>::type _res(res.rows(), res.cols());
@@ -124,7 +124,7 @@ template<typename Lhs, typename Rhs, typename ResultType>
 struct sparse_sparse_product_with_pruning_selector<Lhs,Rhs,ResultType,RowMajor,RowMajor,ColMajor>
 {
   typedef typename ResultType::RealScalar RealScalar;
-  static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, RealScalar tolerance)
+  static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, const RealScalar& tolerance)
   {
     typedef SparseMatrix<typename ResultType::Scalar,ColMajor> ColMajorMatrix;
     ColMajorMatrix colLhs(lhs);
diff --git a/ThirdParty/eigen3/Eigen/src/SparseCore/SparseTranspose.h b/ThirdParty/eigen3/Eigen/src/SparseCore/SparseTranspose.h
index 273f9de688f..7c300ee8dbc 100644
--- a/ThirdParty/eigen3/Eigen/src/SparseCore/SparseTranspose.h
+++ b/ThirdParty/eigen3/Eigen/src/SparseCore/SparseTranspose.h
@@ -18,7 +18,7 @@ template<typename MatrixType> class TransposeImpl<MatrixType,Sparse>
     typedef typename internal::remove_all<typename MatrixType::Nested>::type _MatrixTypeNested;
   public:
 
-    EIGEN_SPARSE_PUBLIC_INTERFACE(Transpose<MatrixType>)
+    EIGEN_SPARSE_PUBLIC_INTERFACE(Transpose<MatrixType> )
 
     class InnerIterator;
     class ReverseInnerIterator;
@@ -34,26 +34,28 @@ template<typename MatrixType> class TransposeImpl<MatrixType,Sparse>::InnerItera
   : public _MatrixTypeNested::InnerIterator
 {
     typedef typename _MatrixTypeNested::InnerIterator Base;
+    typedef typename TransposeImpl::Index Index;
   public:
 
     EIGEN_STRONG_INLINE InnerIterator(const TransposeImpl& trans, typename TransposeImpl<MatrixType,Sparse>::Index outer)
       : Base(trans.derived().nestedExpression(), outer)
     {}
-    inline typename TransposeImpl<MatrixType,Sparse>::Index row() const { return Base::col(); }
-    inline typename TransposeImpl<MatrixType,Sparse>::Index col() const { return Base::row(); }
+    Index row() const { return Base::col(); }
+    Index col() const { return Base::row(); }
 };
 
 template<typename MatrixType> class TransposeImpl<MatrixType,Sparse>::ReverseInnerIterator
   : public _MatrixTypeNested::ReverseInnerIterator
 {
     typedef typename _MatrixTypeNested::ReverseInnerIterator Base;
+    typedef typename TransposeImpl::Index Index;
   public:
 
     EIGEN_STRONG_INLINE ReverseInnerIterator(const TransposeImpl& xpr, typename TransposeImpl<MatrixType,Sparse>::Index outer)
       : Base(xpr.derived().nestedExpression(), outer)
     {}
-    inline typename TransposeImpl<MatrixType,Sparse>::Index row() const { return Base::col(); }
-    inline typename TransposeImpl<MatrixType,Sparse>::Index col() const { return Base::row(); }
+    Index row() const { return Base::col(); }
+    Index col() const { return Base::row(); }
 };
 
 } // end namespace Eigen
diff --git a/ThirdParty/eigen3/Eigen/src/SparseCore/SparseTriangularView.h b/ThirdParty/eigen3/Eigen/src/SparseCore/SparseTriangularView.h
index 477e4bd94b0..333127b78e8 100644
--- a/ThirdParty/eigen3/Eigen/src/SparseCore/SparseTriangularView.h
+++ b/ThirdParty/eigen3/Eigen/src/SparseCore/SparseTriangularView.h
@@ -2,6 +2,7 @@
 // for linear algebra.
 //
 // Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr>
 //
 // This Source Code Form is subject to the terms of the Mozilla
 // Public License v. 2.0. If a copy of the MPL was not distributed
@@ -27,6 +28,7 @@ template<typename MatrixType, int Mode> class SparseTriangularView
     enum { SkipFirst = ((Mode&Lower) && !(MatrixType::Flags&RowMajorBit))
                     || ((Mode&Upper) &&  (MatrixType::Flags&RowMajorBit)),
            SkipLast = !SkipFirst,
+           SkipDiag = (Mode&ZeroDiag) ? 1 : 0,
            HasUnitDiag = (Mode&UnitDiag) ? 1 : 0
     };
 
@@ -64,6 +66,7 @@ template<typename MatrixType, int Mode>
 class SparseTriangularView<MatrixType,Mode>::InnerIterator : public MatrixTypeNestedCleaned::InnerIterator
 {
     typedef typename MatrixTypeNestedCleaned::InnerIterator Base;
+    typedef typename SparseTriangularView::Index Index;
   public:
 
     EIGEN_STRONG_INLINE InnerIterator(const SparseTriangularView& view, Index outer)
@@ -71,7 +74,7 @@ class SparseTriangularView<MatrixType,Mode>::InnerIterator : public MatrixTypeNe
     {
       if(SkipFirst)
       {
-        while((*this) && (HasUnitDiag ? this->index()<=outer : this->index()<outer))
+        while((*this) && ((HasUnitDiag||SkipDiag)  ? this->index()<=outer : this->index()<outer))
           Base::operator++();
         if(HasUnitDiag)
           m_returnOne = true;
@@ -101,8 +104,8 @@ class SparseTriangularView<MatrixType,Mode>::InnerIterator : public MatrixTypeNe
       return *this;
     }
 
-    inline Index row() const { return Base::row(); }
-    inline Index col() const { return Base::col(); }
+    inline Index row() const { return (MatrixType::Flags&RowMajorBit ? Base::outer() : this->index()); }
+    inline Index col() const { return (MatrixType::Flags&RowMajorBit ? this->index() : Base::outer()); }
     inline Index index() const
     {
       if(HasUnitDiag && m_returnOne)  return Base::outer();
@@ -118,7 +121,12 @@ class SparseTriangularView<MatrixType,Mode>::InnerIterator : public MatrixTypeNe
     {
       if(HasUnitDiag && m_returnOne)
         return true;
-      return (SkipFirst ? Base::operator bool() : (Base::operator bool() && this->index() <= this->outer()));
+      if(SkipFirst) return  Base::operator bool();
+      else
+      {
+        if (SkipDiag) return (Base::operator bool() && this->index() < this->outer());
+        else return (Base::operator bool() && this->index() <= this->outer());
+      }
     }
   protected:
     bool m_returnOne;
@@ -128,18 +136,20 @@ template<typename MatrixType, int Mode>
 class SparseTriangularView<MatrixType,Mode>::ReverseInnerIterator : public MatrixTypeNestedCleaned::ReverseInnerIterator
 {
     typedef typename MatrixTypeNestedCleaned::ReverseInnerIterator Base;
+    typedef typename SparseTriangularView::Index Index;
   public:
 
     EIGEN_STRONG_INLINE ReverseInnerIterator(const SparseTriangularView& view, Index outer)
       : Base(view.nestedExpression(), outer)
     {
       eigen_assert((!HasUnitDiag) && "ReverseInnerIterator does not support yet triangular views with a unit diagonal");
-      if(SkipLast)
-        while((*this) && this->index()>outer)
+      if(SkipLast) {
+        while((*this) && (SkipDiag ? this->index()>=outer : this->index()>outer))
           --(*this);
+      }
     }
 
-    EIGEN_STRONG_INLINE InnerIterator& operator--()
+    EIGEN_STRONG_INLINE ReverseInnerIterator& operator--()
     { Base::operator--(); return *this; }
 
     inline Index row() const { return Base::row(); }
@@ -147,7 +157,12 @@ class SparseTriangularView<MatrixType,Mode>::ReverseInnerIterator : public Matri
 
     EIGEN_STRONG_INLINE operator bool() const
     {
-      return SkipLast ? Base::operator bool() : (Base::operator bool() && this->index() >= this->outer());
+      if (SkipLast) return Base::operator bool() ;
+      else
+      {
+        if(SkipDiag) return (Base::operator bool() && this->index() > this->outer());
+        else return (Base::operator bool() && this->index() >= this->outer());
+      }
     }
 };
 
diff --git a/ThirdParty/eigen3/Eigen/src/SparseCore/SparseUtil.h b/ThirdParty/eigen3/Eigen/src/SparseCore/SparseUtil.h
index a686e08daea..064a4070722 100644
--- a/ThirdParty/eigen3/Eigen/src/SparseCore/SparseUtil.h
+++ b/ThirdParty/eigen3/Eigen/src/SparseCore/SparseUtil.h
@@ -73,7 +73,6 @@ template<typename _Scalar, int _Flags = 0, typename _Index = int>  class Dynamic
 template<typename _Scalar, int _Flags = 0, typename _Index = int>  class SparseVector;
 template<typename _Scalar, int _Flags = 0, typename _Index = int>  class MappedSparseMatrix;
 
-template<typename MatrixType, int Size>           class SparseInnerVectorSet;
 template<typename MatrixType, int Mode>           class SparseTriangularView;
 template<typename MatrixType, unsigned int UpLo>  class SparseSelfAdjointView;
 template<typename Lhs, typename Rhs>              class SparseDiagonalProduct;
@@ -99,16 +98,16 @@ template<typename T> struct eval<T,Sparse>
 
 template<typename T,int Cols> struct sparse_eval<T,1,Cols> {
     typedef typename traits<T>::Scalar _Scalar;
-    enum { _Flags = traits<T>::Flags| RowMajorBit };
+    typedef typename traits<T>::Index _Index;
   public:
-    typedef SparseVector<_Scalar, _Flags> type;
+    typedef SparseVector<_Scalar, RowMajor, _Index> type;
 };
 
 template<typename T,int Rows> struct sparse_eval<T,Rows,1> {
     typedef typename traits<T>::Scalar _Scalar;
-    enum { _Flags = traits<T>::Flags & (~RowMajorBit) };
+    typedef typename traits<T>::Index _Index;
   public:
-    typedef SparseVector<_Scalar, _Flags> type;
+    typedef SparseVector<_Scalar, ColMajor, _Index> type;
 };
 
 template<typename T,int Rows,int Cols> struct sparse_eval {
@@ -128,12 +127,10 @@ template<typename T> struct sparse_eval<T,1,1> {
 template<typename T> struct plain_matrix_type<T,Sparse>
 {
   typedef typename traits<T>::Scalar _Scalar;
-    enum {
-          _Flags = traits<T>::Flags
-    };
-
+  typedef typename traits<T>::Index _Index;
+  enum { _Options = ((traits<T>::Flags&RowMajorBit)==RowMajorBit) ? RowMajor : ColMajor };
   public:
-    typedef SparseMatrix<_Scalar, _Flags> type;
+    typedef SparseMatrix<_Scalar, _Options, _Index> type;
 };
 
 } // end namespace internal
diff --git a/ThirdParty/eigen3/Eigen/src/SparseCore/SparseVector.h b/ThirdParty/eigen3/Eigen/src/SparseCore/SparseVector.h
index 8d7e26c699c..7e15c814b6f 100644
--- a/ThirdParty/eigen3/Eigen/src/SparseCore/SparseVector.h
+++ b/ThirdParty/eigen3/Eigen/src/SparseCore/SparseVector.h
@@ -45,35 +45,40 @@ struct traits<SparseVector<_Scalar, _Options, _Index> >
     SupportedAccessPatterns = InnerRandomAccessPattern
   };
 };
+
+// Sparse-Vector-Assignment kinds:
+enum {
+  SVA_RuntimeSwitch,
+  SVA_Inner,
+  SVA_Outer
+};
+
+template< typename Dest, typename Src,
+          int AssignmentKind = !bool(Src::IsVectorAtCompileTime) ? SVA_RuntimeSwitch
+                             : Src::InnerSizeAtCompileTime==1 ? SVA_Outer
+                             : SVA_Inner>
+struct sparse_vector_assign_selector;
+
 }
 
 template<typename _Scalar, int _Options, typename _Index>
 class SparseVector
   : public SparseMatrixBase<SparseVector<_Scalar, _Options, _Index> >
 {
+    typedef SparseMatrixBase<SparseVector> SparseBase;
+    
   public:
     EIGEN_SPARSE_PUBLIC_INTERFACE(SparseVector)
     EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseVector, +=)
     EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseVector, -=)
-
-  protected:
-  public:
-
-    typedef SparseMatrixBase<SparseVector> SparseBase;
+    
+    typedef internal::CompressedStorage<Scalar,Index> Storage;
     enum { IsColVector = internal::traits<SparseVector>::IsColVector };
     
     enum {
       Options = _Options
     };
-
-    internal::CompressedStorage<Scalar,Index> m_data;
-    Index m_size;
-
-    internal::CompressedStorage<Scalar,Index>& _data() { return m_data; }
-    internal::CompressedStorage<Scalar,Index>& _data() const { return m_data; }
-
-  public:
-
+    
     EIGEN_STRONG_INLINE Index rows() const { return IsColVector ? m_size : 1; }
     EIGEN_STRONG_INLINE Index cols() const { return IsColVector ? 1 : m_size; }
     EIGEN_STRONG_INLINE Index innerSize() const { return m_size; }
@@ -84,17 +89,26 @@ class SparseVector
 
     EIGEN_STRONG_INLINE const Index* innerIndexPtr() const { return &m_data.index(0); }
     EIGEN_STRONG_INLINE Index* innerIndexPtr() { return &m_data.index(0); }
+    
+    /** \internal */
+    inline Storage& data() { return m_data; }
+    /** \internal */
+    inline const Storage& data() const { return m_data; }
 
     inline Scalar coeff(Index row, Index col) const
     {
-      eigen_assert((IsColVector ? col : row)==0);
+      eigen_assert(IsColVector ? (col==0 && row>=0 && row<m_size) : (row==0 && col>=0 && col<m_size));
       return coeff(IsColVector ? row : col);
     }
-    inline Scalar coeff(Index i) const { return m_data.at(i); }
+    inline Scalar coeff(Index i) const
+    {
+      eigen_assert(i>=0 && i<m_size);
+      return m_data.at(i);
+    }
 
     inline Scalar& coeffRef(Index row, Index col)
     {
-      eigen_assert((IsColVector ? col : row)==0);
+      eigen_assert(IsColVector ? (col==0 && row>=0 && row<m_size) : (row==0 && col>=0 && col<m_size));
       return coeff(IsColVector ? row : col);
     }
 
@@ -106,6 +120,7 @@ class SparseVector
       */
     inline Scalar& coeffRef(Index i)
     {
+      eigen_assert(i>=0 && i<m_size);
       return m_data.atWithInsertion(i);
     }
 
@@ -139,6 +154,8 @@ class SparseVector
 
     inline Scalar& insert(Index row, Index col)
     {
+      eigen_assert(IsColVector ? (col==0 && row>=0 && row<m_size) : (row==0 && col>=0 && col<m_size));
+      
       Index inner = IsColVector ? row : col;
       Index outer = IsColVector ? col : row;
       eigen_assert(outer==0);
@@ -146,6 +163,8 @@ class SparseVector
     }
     Scalar& insert(Index i)
     {
+      eigen_assert(i>=0 && i<m_size);
+      
       Index startId = 0;
       Index p = Index(m_data.size()) - 1;
       // TODO smart realloc
@@ -169,7 +188,7 @@ class SparseVector
 
     inline void finalize() {}
 
-    void prune(Scalar reference, RealScalar epsilon = NumTraits<RealScalar>::dummy_precision())
+    void prune(const Scalar& reference, const RealScalar& epsilon = NumTraits<RealScalar>::dummy_precision())
     {
       m_data.prune(reference,epsilon);
     }
@@ -188,25 +207,31 @@ class SparseVector
 
     void resizeNonZeros(Index size) { m_data.resize(size); }
 
-    inline SparseVector() : m_size(0) { resize(0); }
+    inline SparseVector() : m_size(0) { check_template_parameters(); resize(0); }
 
-    inline SparseVector(Index size) : m_size(0) { resize(size); }
+    inline SparseVector(Index size) : m_size(0) { check_template_parameters(); resize(size); }
 
-    inline SparseVector(Index rows, Index cols) : m_size(0) { resize(rows,cols); }
+    inline SparseVector(Index rows, Index cols) : m_size(0) { check_template_parameters(); resize(rows,cols); }
 
     template<typename OtherDerived>
     inline SparseVector(const SparseMatrixBase<OtherDerived>& other)
       : m_size(0)
     {
+      check_template_parameters();
       *this = other.derived();
     }
 
     inline SparseVector(const SparseVector& other)
       : SparseBase(other), m_size(0)
     {
+      check_template_parameters();
       *this = other.derived();
     }
 
+    /** Swaps the values of \c *this and \a other.
+      * Overloaded for performance: this version performs a \em shallow swap by swaping pointers and attributes only.
+      * \sa SparseMatrixBase::swap()
+      */
     inline void swap(SparseVector& other)
     {
       std::swap(m_size, other.m_size);
@@ -230,11 +255,10 @@ class SparseVector
     template<typename OtherDerived>
     inline SparseVector& operator=(const SparseMatrixBase<OtherDerived>& other)
     {
-      if ( (bool(OtherDerived::IsVectorAtCompileTime) && int(RowsAtCompileTime)!=int(OtherDerived::RowsAtCompileTime))
-          || ((!bool(OtherDerived::IsVectorAtCompileTime)) && ( bool(IsColVector) ? other.cols()>1 : other.rows()>1 )))
-        return assign(other.transpose());
-      else
-        return assign(other);
+      SparseVector tmp(other.size());
+      internal::sparse_vector_assign_selector<SparseVector,OtherDerived>::run(tmp,other.derived());
+      this->swap(tmp);
+      return *this;
     }
 
     #ifndef EIGEN_PARSED_BY_DOXYGEN
@@ -261,73 +285,63 @@ class SparseVector
 
   public:
 
-    /** \deprecated use setZero() and reserve() */
+    /** \internal \deprecated use setZero() and reserve() */
     EIGEN_DEPRECATED void startFill(Index reserve)
     {
       setZero();
       m_data.reserve(reserve);
     }
 
-    /** \deprecated use insertBack(Index,Index) */
+    /** \internal \deprecated use insertBack(Index,Index) */
     EIGEN_DEPRECATED Scalar& fill(Index r, Index c)
     {
       eigen_assert(r==0 || c==0);
       return fill(IsColVector ? r : c);
     }
 
-    /** \deprecated use insertBack(Index) */
+    /** \internal \deprecated use insertBack(Index) */
     EIGEN_DEPRECATED Scalar& fill(Index i)
     {
       m_data.append(0, i);
       return m_data.value(m_data.size()-1);
     }
 
-    /** \deprecated use insert(Index,Index) */
+    /** \internal \deprecated use insert(Index,Index) */
     EIGEN_DEPRECATED Scalar& fillrand(Index r, Index c)
     {
       eigen_assert(r==0 || c==0);
       return fillrand(IsColVector ? r : c);
     }
 
-    /** \deprecated use insert(Index) */
+    /** \internal \deprecated use insert(Index) */
     EIGEN_DEPRECATED Scalar& fillrand(Index i)
     {
       return insert(i);
     }
 
-    /** \deprecated use finalize() */
+    /** \internal \deprecated use finalize() */
     EIGEN_DEPRECATED void endFill() {}
     
+    // These two functions were here in the 3.1 release, so let's keep them in case some code rely on them.
+    /** \internal \deprecated use data() */
+    EIGEN_DEPRECATED Storage& _data() { return m_data; }
+    /** \internal \deprecated use data() */
+    EIGEN_DEPRECATED const Storage& _data() const { return m_data; }
+    
 #   ifdef EIGEN_SPARSEVECTOR_PLUGIN
 #     include EIGEN_SPARSEVECTOR_PLUGIN
 #   endif
 
 protected:
-    template<typename OtherDerived>
-    EIGEN_DONT_INLINE SparseVector& assign(const SparseMatrixBase<OtherDerived>& _other)
+  
+    static void check_template_parameters()
     {
-      const OtherDerived& other(_other.derived());
-      const bool needToTranspose = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit);
-      if(needToTranspose)
-      {
-        Index size = other.size();
-        Index nnz = other.nonZeros();
-        resize(size);
-        reserve(nnz);
-        for(Index i=0; i<size; ++i)
-        {
-          typename OtherDerived::InnerIterator it(other, i);
-          if(it)
-              insert(i) = it.value();
-        }
-        return *this;
-      }
-      else
-      {
-        // there is no special optimization
-        return Base::operator=(other);
-      }
+      EIGEN_STATIC_ASSERT(NumTraits<Index>::IsSigned,THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE);
+      EIGEN_STATIC_ASSERT((_Options&(ColMajor|RowMajor))==Options,INVALID_MATRIX_TEMPLATE_PARAMETERS);
     }
+    
+    Storage m_data;
+    Index m_size;
 };
 
 template<typename Scalar, int _Options, typename _Index>
@@ -394,6 +408,40 @@ class SparseVector<Scalar,_Options,_Index>::ReverseInnerIterator
     const Index m_start;
 };
 
+namespace internal {
+
+template< typename Dest, typename Src>
+struct sparse_vector_assign_selector<Dest,Src,SVA_Inner> {
+  static void run(Dest& dst, const Src& src) {
+    eigen_internal_assert(src.innerSize()==src.size());
+    for(typename Src::InnerIterator it(src, 0); it; ++it)
+      dst.insert(it.index()) = it.value();
+  }
+};
+
+template< typename Dest, typename Src>
+struct sparse_vector_assign_selector<Dest,Src,SVA_Outer> {
+  static void run(Dest& dst, const Src& src) {
+    eigen_internal_assert(src.outerSize()==src.size());
+    for(typename Dest::Index i=0; i<src.size(); ++i)
+    {
+      typename Src::InnerIterator it(src, i);
+      if(it)
+        dst.insert(i) = it.value();
+    }
+  }
+};
+
+template< typename Dest, typename Src>
+struct sparse_vector_assign_selector<Dest,Src,SVA_RuntimeSwitch> {
+  static void run(Dest& dst, const Src& src) {
+    if(src.outerSize()==1)  sparse_vector_assign_selector<Dest,Src,SVA_Inner>::run(dst, src);
+    else                    sparse_vector_assign_selector<Dest,Src,SVA_Outer>::run(dst, src);
+  }
+};
+
+}
+
 } // end namespace Eigen
 
 #endif // EIGEN_SPARSEVECTOR_H
diff --git a/ThirdParty/eigen3/Eigen/src/SparseCore/SparseView.h b/ThirdParty/eigen3/Eigen/src/SparseCore/SparseView.h
index 8b0b9ea0304..fd8450463fe 100644
--- a/ThirdParty/eigen3/Eigen/src/SparseCore/SparseView.h
+++ b/ThirdParty/eigen3/Eigen/src/SparseCore/SparseView.h
@@ -18,7 +18,7 @@ namespace internal {
 template<typename MatrixType>
 struct traits<SparseView<MatrixType> > : traits<MatrixType>
 {
-  typedef int Index;
+  typedef typename MatrixType::Index Index;
   typedef Sparse StorageKind;
   enum {
     Flags = int(traits<MatrixType>::Flags) & (RowMajorBit)
@@ -56,6 +56,7 @@ protected:
 template<typename MatrixType>
 class SparseView<MatrixType>::InnerIterator : public _MatrixTypeNested::InnerIterator
 {
+  typedef typename SparseView::Index Index;
 public:
   typedef typename _MatrixTypeNested::InnerIterator IterBase;
   InnerIterator(const SparseView& view, Index outer) :
@@ -88,7 +89,7 @@ private:
 
 template<typename Derived>
 const SparseView<Derived> MatrixBase<Derived>::sparseView(const Scalar& m_reference,
-                                                          typename NumTraits<Scalar>::Real m_epsilon) const
+                                                          const typename NumTraits<Scalar>::Real& m_epsilon) const
 {
   return SparseView<Derived>(derived(), m_reference, m_epsilon);
 }
diff --git a/ThirdParty/eigen3/Eigen/src/SuperLUSupport/SuperLUSupport.h b/ThirdParty/eigen3/Eigen/src/SuperLUSupport/SuperLUSupport.h
index d8a54e18c7c..bcb355760c5 100644
--- a/ThirdParty/eigen3/Eigen/src/SuperLUSupport/SuperLUSupport.h
+++ b/ThirdParty/eigen3/Eigen/src/SuperLUSupport/SuperLUSupport.h
@@ -160,7 +160,7 @@ struct SluMatrix : SuperMatrix
     res.ncol      = mat.cols();
 
     res.storage.lda       = MatrixType::IsVectorAtCompileTime ? mat.size() : mat.outerStride();
-    res.storage.values    = mat.data();
+    res.storage.values    = (void*)(mat.data());
     return res;
   }
 
@@ -353,14 +353,14 @@ class SuperLUBase : internal::noncopyable
       *
       * \sa compute()
       */
-//     template<typename Rhs>
-//     inline const internal::sparse_solve_retval<SuperLU, Rhs> solve(const SparseMatrixBase<Rhs>& b) const
-//     {
-//       eigen_assert(m_isInitialized && "SuperLU is not initialized.");
-//       eigen_assert(rows()==b.rows()
-//                 && "SuperLU::solve(): invalid number of rows of the right hand side matrix b");
-//       return internal::sparse_solve_retval<SuperLU, Rhs>(*this, b.derived());
-//     }
+    template<typename Rhs>
+    inline const internal::sparse_solve_retval<SuperLUBase, Rhs> solve(const SparseMatrixBase<Rhs>& b) const
+    {
+      eigen_assert(m_isInitialized && "SuperLU is not initialized.");
+      eigen_assert(rows()==b.rows()
+                && "SuperLU::solve(): invalid number of rows of the right hand side matrix b");
+      return internal::sparse_solve_retval<SuperLUBase, Rhs>(*this, b.derived());
+    }
     
     /** Performs a symbolic decomposition on the sparcity of \a matrix.
       *
@@ -377,7 +377,7 @@ class SuperLUBase : internal::noncopyable
     }
     
     template<typename Stream>
-    void dumpMemory(Stream& s)
+    void dumpMemory(Stream& /*s*/)
     {}
     
   protected:
@@ -612,6 +612,7 @@ void SuperLU<MatrixType>::factorize(const MatrixType& a)
   
   this->initFactorization(a);
   
+  m_sluOptions.ColPerm = COLAMD;
   int info = 0;
   RealScalar recip_pivot_growth, rcond;
   RealScalar ferr, berr;
@@ -1014,7 +1015,7 @@ struct sparse_solve_retval<SuperLUBase<_MatrixType,Derived>, Rhs>
 
   template<typename Dest> void evalTo(Dest& dst) const
   {
-    dec().derived()._solve(rhs(),dst);
+    this->defaultEvalTo(dst);
   }
 };
 
diff --git a/ThirdParty/eigen3/Eigen/src/UmfPackSupport/UmfPackSupport.h b/ThirdParty/eigen3/Eigen/src/UmfPackSupport/UmfPackSupport.h
index f01720362de..3a48cecf769 100644
--- a/ThirdParty/eigen3/Eigen/src/UmfPackSupport/UmfPackSupport.h
+++ b/ThirdParty/eigen3/Eigen/src/UmfPackSupport/UmfPackSupport.h
@@ -39,7 +39,7 @@ inline int umfpack_symbolic(int n_row,int n_col,
                             const int Ap[], const int Ai[], const std::complex<double> Ax[], void **Symbolic,
                             const double Control [UMFPACK_CONTROL], double Info [UMFPACK_INFO])
 {
-  return umfpack_zi_symbolic(n_row,n_col,Ap,Ai,&internal::real_ref(Ax[0]),0,Symbolic,Control,Info);
+  return umfpack_zi_symbolic(n_row,n_col,Ap,Ai,&numext::real_ref(Ax[0]),0,Symbolic,Control,Info);
 }
 
 inline int umfpack_numeric( const int Ap[], const int Ai[], const double Ax[],
@@ -53,7 +53,7 @@ inline int umfpack_numeric( const int Ap[], const int Ai[], const std::complex<d
                             void *Symbolic, void **Numeric,
                             const double Control[UMFPACK_CONTROL],double Info [UMFPACK_INFO])
 {
-  return umfpack_zi_numeric(Ap,Ai,&internal::real_ref(Ax[0]),0,Symbolic,Numeric,Control,Info);
+  return umfpack_zi_numeric(Ap,Ai,&numext::real_ref(Ax[0]),0,Symbolic,Numeric,Control,Info);
 }
 
 inline int umfpack_solve( int sys, const int Ap[], const int Ai[], const double Ax[],
@@ -67,7 +67,7 @@ inline int umfpack_solve( int sys, const int Ap[], const int Ai[], const std::co
                           std::complex<double> X[], const std::complex<double> B[], void *Numeric,
                           const double Control[UMFPACK_CONTROL], double Info[UMFPACK_INFO])
 {
-  return umfpack_zi_solve(sys,Ap,Ai,&internal::real_ref(Ax[0]),0,&internal::real_ref(X[0]),0,&internal::real_ref(B[0]),0,Numeric,Control,Info);
+  return umfpack_zi_solve(sys,Ap,Ai,&numext::real_ref(Ax[0]),0,&numext::real_ref(X[0]),0,&numext::real_ref(B[0]),0,Numeric,Control,Info);
 }
 
 inline int umfpack_get_lunz(int *lnz, int *unz, int *n_row, int *n_col, int *nz_udiag, void *Numeric, double)
@@ -89,9 +89,9 @@ inline int umfpack_get_numeric(int Lp[], int Lj[], double Lx[], int Up[], int Ui
 inline int umfpack_get_numeric(int Lp[], int Lj[], std::complex<double> Lx[], int Up[], int Ui[], std::complex<double> Ux[],
                                int P[], int Q[], std::complex<double> Dx[], int *do_recip, double Rs[], void *Numeric)
 {
-  double& lx0_real = internal::real_ref(Lx[0]);
-  double& ux0_real = internal::real_ref(Ux[0]);
-  double& dx0_real = internal::real_ref(Dx[0]);
+  double& lx0_real = numext::real_ref(Lx[0]);
+  double& ux0_real = numext::real_ref(Ux[0]);
+  double& dx0_real = numext::real_ref(Dx[0]);
   return umfpack_zi_get_numeric(Lp,Lj,Lx?&lx0_real:0,0,Up,Ui,Ux?&ux0_real:0,0,P,Q,
                                 Dx?&dx0_real:0,0,do_recip,Rs,Numeric);
 }
@@ -103,7 +103,7 @@ inline int umfpack_get_determinant(double *Mx, double *Ex, void *NumericHandle,
 
 inline int umfpack_get_determinant(std::complex<double> *Mx, double *Ex, void *NumericHandle, double User_Info [UMFPACK_INFO])
 {
-  double& mx_real = internal::real_ref(*Mx);
+  double& mx_real = numext::real_ref(*Mx);
   return umfpack_zi_get_determinant(&mx_real,0,Ex,NumericHandle,User_Info);
 }
 
@@ -114,7 +114,7 @@ inline int umfpack_get_determinant(std::complex<double> *Mx, double *Ex, void *N
   * using the UmfPack library. The sparse matrix A must be squared and full rank.
   * The vectors or matrices X and B can be either dense or sparse.
   *
-  * \WARNING The input matrix A should be in a \b compressed and \b column-major form.
+  * \warning The input matrix A should be in a \b compressed and \b column-major form.
   * Otherwise an expensive copy will be made. You can call the inexpensive makeCompressed() to get a compressed matrix.
   * \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<>
   *
@@ -215,14 +215,14 @@ class UmfPackLU : internal::noncopyable
       *
       * \sa compute()
       */
-//     template<typename Rhs>
-//     inline const internal::sparse_solve_retval<UmfPAckLU, Rhs> solve(const SparseMatrixBase<Rhs>& b) const
-//     {
-//       eigen_assert(m_isInitialized && "UmfPAckLU is not initialized.");
-//       eigen_assert(rows()==b.rows()
-//                 && "UmfPAckLU::solve(): invalid number of rows of the right hand side matrix b");
-//       return internal::sparse_solve_retval<UmfPAckLU, Rhs>(*this, b.derived());
-//     }
+    template<typename Rhs>
+    inline const internal::sparse_solve_retval<UmfPackLU, Rhs> solve(const SparseMatrixBase<Rhs>& b) const
+    {
+      eigen_assert(m_isInitialized && "UmfPackLU is not initialized.");
+      eigen_assert(rows()==b.rows()
+                && "UmfPackLU::solve(): invalid number of rows of the right hand side matrix b");
+      return internal::sparse_solve_retval<UmfPackLU, Rhs>(*this, b.derived());
+    }
 
     /** Performs a symbolic decomposition on the sparcity of \a matrix.
       *
@@ -381,7 +381,8 @@ bool UmfPackLU<MatrixType>::_solve(const MatrixBase<BDerived> &b, MatrixBase<XDe
   const int rhsCols = b.cols();
   eigen_assert((BDerived::Flags&RowMajorBit)==0 && "UmfPackLU backend does not support non col-major rhs yet");
   eigen_assert((XDerived::Flags&RowMajorBit)==0 && "UmfPackLU backend does not support non col-major result yet");
-
+  eigen_assert(b.derived().data() != x.derived().data() && " Umfpack does not support inplace solve");
+  
   int errorCode;
   for (int j=0; j<rhsCols; ++j)
   {
@@ -420,7 +421,7 @@ struct sparse_solve_retval<UmfPackLU<_MatrixType>, Rhs>
 
   template<typename Dest> void evalTo(Dest& dst) const
   {
-    dec()._solve(rhs(),dst);
+    this->defaultEvalTo(dst);
   }
 };
 
diff --git a/ThirdParty/eigen3/Eigen/src/misc/SparseSolve.h b/ThirdParty/eigen3/Eigen/src/misc/SparseSolve.h
index 272c4a479d7..244bb8ec753 100644
--- a/ThirdParty/eigen3/Eigen/src/misc/SparseSolve.h
+++ b/ThirdParty/eigen3/Eigen/src/misc/SparseSolve.h
@@ -47,6 +47,23 @@ template<typename _DecompositionType, typename Rhs> struct sparse_solve_retval_b
   }
 
   protected:
+    template<typename DestScalar, int DestOptions, typename DestIndex>
+    inline void defaultEvalTo(SparseMatrix<DestScalar,DestOptions,DestIndex>& dst) const
+    {
+      // we process the sparse rhs per block of NbColsAtOnce columns temporarily stored into a dense matrix.
+      static const int NbColsAtOnce = 4;
+      int rhsCols = m_rhs.cols();
+      int size = m_rhs.rows();
+      Eigen::Matrix<DestScalar,Dynamic,Dynamic> tmp(size,rhsCols);
+      Eigen::Matrix<DestScalar,Dynamic,Dynamic> tmpX(size,rhsCols);
+      for(int k=0; k<rhsCols; k+=NbColsAtOnce)
+      {
+        int actualCols = std::min<int>(rhsCols-k, NbColsAtOnce);
+        tmp.leftCols(actualCols) = m_rhs.middleCols(k,actualCols);
+        tmpX.leftCols(actualCols) = m_dec.solve(tmp.leftCols(actualCols));
+        dst.middleCols(k,actualCols) = tmpX.leftCols(actualCols).sparseView();
+      }
+    }
     const DecompositionType& m_dec;
     typename Rhs::Nested m_rhs;
 };
-- 
GitLab