13 #ifndef EIGEN_LEVENBERGMARQUARDT__H
14 #define EIGEN_LEVENBERGMARQUARDT__H
20 namespace LevenbergMarquardtSpace {
47 template<
typename FunctorType,
typename Scalar=
double>
48 class LevenbergMarquardt
135 template<
typename FunctorType,
typename Scalar>
143 m = functor.values();
146 if (n <= 0 || m < n || tol < 0.)
150 parameters.ftol = tol;
151 parameters.xtol = tol;
152 parameters.maxfev = 100*(
n+1);
158 template<
typename FunctorType,
typename Scalar>
166 status = minimizeOneStep(
x);
171 template<
typename FunctorType,
typename Scalar>
176 m = functor.values();
178 wa1.
resize(n); wa2.resize(n); wa3.resize(n);
182 if (!useExternalScaling)
184 eigen_assert( (!useExternalScaling || diag.size()==n) &&
"When useExternalScaling is set, the caller must provide a valid 'diag'");
192 if (n <= 0 || m < n || parameters.ftol < 0. || parameters.xtol < 0. || parameters.gtol < 0. || parameters.maxfev <= 0 || parameters.factor <= 0.)
195 if (useExternalScaling)
203 if ( functor(
x, fvec) < 0)
205 fnorm = fvec.stableNorm();
214 template<
typename FunctorType,
typename Scalar>
224 Index df_ret = functor.df(
x, fjac);
233 wa2 = fjac.colwise().blueNorm();
234 ColPivHouseholderQR<JacobianType> qrfac(fjac);
235 fjac = qrfac.matrixQR();
236 permutation = qrfac.colsPermutation();
241 if (!useExternalScaling)
243 diag[j] = (wa2[j]==0.)? 1. : wa2[
j];
247 xnorm = diag.cwiseProduct(
x).stableNorm();
248 delta = parameters.factor * xnorm;
250 delta = parameters.factor;
256 wa4.applyOnTheLeft(qrfac.householderQ().adjoint());
263 if (wa2[permutation.indices()[j]] != 0.)
264 gnorm = (
std::max)(gnorm,
abs( fjac.col(j).head(j+1).dot(qtf.head(j+1)/fnorm) / wa2[permutation.indices()[j]]));
267 if (gnorm <= parameters.gtol)
271 if (!useExternalScaling)
272 diag = diag.cwiseMax(wa2);
277 internal::lmpar2<Scalar>(qrfac, diag, qtf, delta, par, wa1);
282 pnorm = diag.cwiseProduct(wa1).stableNorm();
289 if ( functor(wa2, wa4) < 0)
292 fnorm1 = wa4.stableNorm();
296 if (Scalar(.1) * fnorm1 < fnorm)
301 wa3 = fjac.template triangularView<Upper>() * (qrfac.colsPermutation().inverse() *wa1);
304 prered = temp1 + temp2 / Scalar(.5);
305 dirder = -(temp1 + temp2);
311 ratio = actred / prered;
314 if (ratio <= Scalar(.25)) {
318 temp = Scalar(.5) * dirder / (dirder + Scalar(.5) * actred);
319 if (Scalar(.1) * fnorm1 >= fnorm || temp < Scalar(.1))
322 delta = temp * (
std::min)(delta, pnorm / Scalar(.1));
324 }
else if (!(par != 0. && ratio < Scalar(.75))) {
325 delta = pnorm / Scalar(.5);
326 par = Scalar(.5) * par;
330 if (ratio >= Scalar(1e-4)) {
333 wa2 = diag.cwiseProduct(
x);
335 xnorm = wa2.stableNorm();
341 if (
abs(actred) <= parameters.ftol && prered <= parameters.ftol && Scalar(.5) * ratio <= 1. && delta <= parameters.xtol * xnorm)
343 if (
abs(actred) <= parameters.ftol && prered <= parameters.ftol && Scalar(.5) * ratio <= 1.)
345 if (delta <= parameters.xtol * xnorm)
349 if (nfev >= parameters.maxfev)
351 if (
abs(actred) <= NumTraits<Scalar>::epsilon() && prered <= NumTraits<Scalar>::epsilon() && Scalar(.5) * ratio <= 1.)
353 if (delta <= NumTraits<Scalar>::epsilon() * xnorm)
355 if (gnorm <= NumTraits<Scalar>::epsilon())
358 }
while (ratio < Scalar(1e-4));
363 template<
typename FunctorType,
typename Scalar>
371 m = functor.values();
374 if (
n <= 0 ||
m <
n || tol < 0.)
378 parameters.ftol = tol;
379 parameters.xtol = tol;
380 parameters.maxfev = 100*(
n+1);
382 return minimizeOptimumStorage(
x);
385 template<
typename FunctorType,
typename Scalar>
390 m = functor.values();
392 wa1.
resize(
n); wa2.resize(
n); wa3.resize(
n);
401 if (!useExternalScaling)
403 eigen_assert( (!useExternalScaling || diag.size()==
n) &&
"When useExternalScaling is set, the caller must provide a valid 'diag'");
411 if (
n <= 0 ||
m <
n || parameters.ftol < 0. || parameters.xtol < 0. || parameters.gtol < 0. || parameters.maxfev <= 0 || parameters.factor <= 0.)
414 if (useExternalScaling)
422 if ( functor(
x, fvec) < 0)
424 fnorm = fvec.stableNorm();
434 template<
typename FunctorType,
typename Scalar>
453 for (
i = 0;
i <
m; ++
i) {
455 internal::rwupdt<Scalar>(fjac, wa3, qtf, fvec[
i]);
463 for (
j = 0;
j <
n; ++
j) {
466 wa2[
j] = fjac.col(
j).head(
j).stableNorm();
468 permutation.setIdentity(
n);
470 wa2 = fjac.colwise().blueNorm();
475 wa1 = fjac.diagonal();
476 fjac.diagonal() = qrfac.
hCoeffs();
479 for(
Index ii=0; ii< fjac.cols(); ii++) fjac.col(ii).segment(ii+1, fjac.rows()-ii-1) *= fjac(ii,ii);
481 for (
j = 0;
j <
n; ++
j) {
482 if (fjac(
j,
j) != 0.) {
484 for (
i =
j;
i <
n; ++
i)
485 sum += fjac(
i,
j) * qtf[
i];
486 temp = -sum / fjac(
j,
j);
487 for (
i =
j;
i <
n; ++
i)
488 qtf[
i] += fjac(
i,
j) * temp;
497 if (!useExternalScaling)
498 for (
j = 0;
j <
n; ++
j)
499 diag[
j] = (wa2[
j]==0.)? 1. : wa2[
j];
503 xnorm = diag.cwiseProduct(
x).stableNorm();
504 delta = parameters.factor * xnorm;
506 delta = parameters.factor;
512 for (
j = 0;
j <
n; ++
j)
513 if (wa2[permutation.indices()[
j]] != 0.)
514 gnorm = (
std::max)(gnorm,
abs( fjac.col(
j).head(
j+1).dot(qtf.head(
j+1)/fnorm) / wa2[permutation.indices()[
j]]));
517 if (gnorm <= parameters.gtol)
521 if (!useExternalScaling)
522 diag = diag.cwiseMax(wa2);
527 internal::lmpar<Scalar>(fjac, permutation.indices(), diag, qtf, delta, par, wa1);
532 pnorm = diag.cwiseProduct(wa1).stableNorm();
539 if ( functor(wa2, wa4) < 0)
542 fnorm1 = wa4.stableNorm();
546 if (
Scalar(.1) * fnorm1 < fnorm)
551 wa3 = fjac.topLeftCorner(
n,
n).template triangularView<Upper>() * (permutation.inverse() * wa1);
554 prered = temp1 + temp2 /
Scalar(.5);
555 dirder = -(temp1 + temp2);
561 ratio = actred / prered;
564 if (ratio <=
Scalar(.25)) {
568 temp =
Scalar(.5) * dirder / (dirder +
Scalar(.5) * actred);
574 }
else if (!(par != 0. && ratio <
Scalar(.75))) {
575 delta = pnorm /
Scalar(.5);
583 wa2 = diag.cwiseProduct(
x);
585 xnorm = wa2.stableNorm();
591 if (
abs(actred) <= parameters.ftol && prered <= parameters.ftol &&
Scalar(.5) * ratio <= 1. && delta <= parameters.xtol * xnorm)
593 if (
abs(actred) <= parameters.ftol && prered <= parameters.ftol &&
Scalar(.5) * ratio <= 1.)
595 if (delta <= parameters.xtol * xnorm)
599 if (nfev >= parameters.maxfev)
608 }
while (ratio <
Scalar(1
e-4));
613 template<
typename FunctorType,
typename Scalar>
621 status = minimizeOptimumStorageOneStep(
x);
626 template<
typename FunctorType,
typename Scalar>
629 FunctorType &functor,
636 Index m = functor.values();
639 if (n <= 0 || m < n || tol < 0.)
645 lm.parameters.ftol = tol;
646 lm.parameters.xtol = tol;
647 lm.parameters.maxfev = 200*(
n+1);
Array< double, 1, 3 > e(1./3., 0.5, 2.)
const HCoeffsType & hCoeffs() const
const PermutationType & colsPermutation() const
const MatrixType & matrixQR() const
Performs non linear optimization over a non-linear function, using a variant of the Levenberg Marquar...
static LevenbergMarquardtSpace::Status lmdif1(FunctorType &functor, FVectorType &x, Index *nfev, const Scalar tol=std::sqrt(NumTraits< Scalar >::epsilon()))
JacobianType::Scalar Scalar
LevenbergMarquardtSpace::Status minimizeOneStep(FVectorType &x)
LevenbergMarquardtSpace::Status lmder1(FVectorType &x, const Scalar tol=sqrt_epsilon())
Matrix< Scalar, Dynamic, Dynamic > JacobianType
LevenbergMarquardtSpace::Status minimizeInit(FVectorType &x)
static LevenbergMarquardtSpace::Status lmdif1(FunctorType &functor, FVectorType &x, Index *nfev, const Scalar tol=sqrt_epsilon())
LevenbergMarquardtSpace::Status minimizeOptimumStorageOneStep(FVectorType &x)
LevenbergMarquardtSpace::Status minimizeOneStep(FVectorType &x)
LevenbergMarquardtSpace::Status minimize(FVectorType &x)
LevenbergMarquardtSpace::Status minimize(FVectorType &x)
LevenbergMarquardtSpace::Status lmstr1(FVectorType &x, const Scalar tol=sqrt_epsilon())
PermutationMatrix< Dynamic, Dynamic > permutation
LevenbergMarquardtSpace::Status lmder1(FVectorType &x, const Scalar tol=std::sqrt(NumTraits< Scalar >::epsilon()))
LevenbergMarquardtSpace::Status minimizeOptimumStorage(FVectorType &x)
Matrix< Scalar, Dynamic, 1 > FVectorType
LevenbergMarquardt & operator=(const LevenbergMarquardt &)
void resetParameters(void)
LevenbergMarquardtSpace::Status minimizeInit(FVectorType &x)
LevenbergMarquardtSpace::Status minimizeOptimumStorageInit(FVectorType &x)
LevenbergMarquardt(FunctorType &_functor)
FunctorType::JacobianType JacobianType
static Scalar sqrt_epsilon()
constexpr void resize(Index rows, Index cols)
@ RelativeReductionTooSmall
@ TooManyFunctionEvaluation
@ ImproperInputParameters
@ RelativeErrorAndReductionTooSmall
: TensorContractionSycl.h, provides various tensor contraction kernel for SYCL backend
Eigen::AutoDiffScalar< EIGEN_EXPR_BINARYOP_SCALAR_RETURN_TYPE(Eigen::internal::remove_all_t< DerType >, typename Eigen::internal::traits< Eigen::internal::remove_all_t< DerType >>::Scalar, product) > sqrt(const Eigen::AutoDiffScalar< DerType > &x)
EIGEN_DEFAULT_DENSE_INDEX_TYPE Index
CleanedUpDerType< DerType >::type() min(const AutoDiffScalar< DerType > &x, const T &y)
CleanedUpDerType< DerType >::type() max(const AutoDiffScalar< DerType > &x, const T &y)
EIGEN_DEFAULT_DENSE_INDEX_TYPE DenseIndex
const Eigen::CwiseUnaryOp< Eigen::internal::scalar_abs_op< typename Derived::Scalar >, const Derived > abs(const Eigen::ArrayBase< Derived > &x)
const Eigen::CwiseUnaryOp< Eigen::internal::scalar_sqrt_op< typename Derived::Scalar >, const Derived > sqrt(const Eigen::ArrayBase< Derived > &x)
adouble abs(const adouble &x)