Commit ff5a2043 authored by davidkep's avatar davidkep

update

parent 1082eb4b
......@@ -43,11 +43,13 @@ mest_options <- function (max_it = 200, eps = 1e-8) {
#' @param eps numerical tolerance to check for convergence.
#'
#' @return options for the S-Estimate algorithm.
#' @export
s_algo_options <- function (explore_it = 10, max_it = 500,
eps = 1e-8) {
s_algo_options <- function (explore_it = 10, max_it = 500, eps = 1e-8,
tightening = c('none', 'adaptive', 'exponential'),
tightening_steps = 10) {
list(max_it = as.integer(max_it[[1L]]),
eps = as.numeric(eps[[1L]]),
tightening = .tightening_id(match.arg(tightening)),
tightening_steps = as.integer(tightening_steps[[1L]]),
cold_explore_it = as.integer(explore_it[[1L]]))
}
......@@ -163,3 +165,8 @@ en_ridge_options <- function () {
1L)
}
.tightening_id <- function (tightening) {
switch (tightening, exponential = 1L, adaptive = 2L, 0L)
}
......@@ -4,7 +4,8 @@
enpy_initial_estimates <- function (x, y, alpha, lambdas, bdp = 0.25, cc,
include_intercept = TRUE,
enpy_opts = enpy_options(),
mscale_maxit, mscale_eps, en_options) {
mscale_maxit = 200, mscale_eps = 1e-8,
en_options) {
if (missing(cc)) {
cc <- .bisquare_consistency_const(bdp)
}
......@@ -33,19 +34,17 @@ enpy_initial_estimates <- function (x, y, alpha, lambdas, bdp = 0.25, cc,
en_options <- NULL
}
optional_args <- list()
optional_args <- list(en_options = en_options)
optional_args$en_options <- if (is.null(en_options)) {
if (alpha > 0) {
if (is.null(en_options)) {
optional_args$en_options <- if (alpha > 0) {
en_admm_options()
} else {
en_ridge_options()
}
} else {
en_options
}
if (optional_args$en_options$en_algorithm == 'admm') {
if (optional_args$en_options$algorithm == 'admm') {
optional_args$en_options <- .choose_admm_algorithm(optional_args$en_options,
alpha, x)
}
......
......@@ -29,7 +29,9 @@
#' @export
pense <- function(x, y, alpha, lambdas, cold_lambdas, penalty_loadings,
additional_initial_estimates, include_intercept = TRUE,
max_it = 200, eps = 1e-5, explore_it = 10, en_algorithm_opts,
max_it = 200, eps = 1e-5, explore_it = 10,
tightening = c('none', 'adaptive', 'exponential'),
tightening_steps = 10L, en_algorithm_opts,
mest_opts = mest_options(), enpy_opts = enpy_options()) {
optional_args <- list()
......@@ -39,19 +41,17 @@ pense <- function(x, y, alpha, lambdas, cold_lambdas, penalty_loadings,
x_dim <- dim(x)
if (length(y) != x_dim[[1L]]) {
stop("Number of observations does not match between `x` and `y`.")
stop("Number of observations in `x` and `y` does not match.")
}
alpha <- as.numeric(alpha[[1L]])
lambdas <- sort(as.numeric(lambdas), decreasing = TRUE)
pense_opts <- list(
max_it = as.integer(max_it[[1L]]),
eps = as.numeric(eps[[1L]]),
cold_explore_it = as.integer(explore_it[[1L]]),
intercept = isTRUE(include_intercept),
mscale = mest_opts
)
pense_opts <- c(
list(mm_options = s_algo_options(explore_it = explore_it, max_it = max_it,
eps = eps, tightening = match.arg(tightening),
tightening_steps = tightening_steps)),
list(intercept = isTRUE(include_intercept), mscale = mest_opts))
if (alpha < sqrt(.Machine$double.eps)) {
alpha <- 0
......
CXX_STD = CXX11
PKG_LIBS = $(LAPACK_LIBS) $(BLAS_LIBS) $(FLIBS) # -flto=thin
#PKG_CPPFLAGS= -D__STDC_LIMIT_MACROS -DHAVE_RCPP -DNSOPTIM_DETAILED_METRICS -DTESTTHAT_DISABLED
PKG_CPPFLAGS= -D__STDC_LIMIT_MACROS -DHAVE_RCPP -DNSOPTIM_DETAILED_METRICS -DTESTTHAT_DISABLED
PKG_CXXFLAGS= -fstrict-aliasing -Wstrict-aliasing
PKG_OPTFLAGS= -g -Os
PKG_OPTFLAGS= -g -O0
# -DNSOPTIM_METRICS_DISABLED -DNSOPTIM_DETAILED_METRICS
PKG_CPPFLAGS= -D__STDC_LIMIT_MACROS -DHAVE_RCPP -DTESTTHAT_DISABLED
# PKG_CPPFLAGS= -D__STDC_LIMIT_MACROS -DHAVE_RCPP -DTESTTHAT_DISABLED
......@@ -15,7 +15,7 @@ namespace pense {
constexpr double kDefaultConvergenceTolerance = 1e-6;
//! The threshold for any numeric value to be considered 0.
constexpr double kNumericZero = 1e-14;
constexpr double kNumericZero = 1e-12;
//! Integer IDs for the supported rho-functions
enum class RhoFunctionType {
......
This diff is collapsed.
......@@ -10,6 +10,7 @@
#include "enpy_psc.hpp"
using arma::mat;
using arma::uword;
using arma::vec;
using arma::uvec;
using arma::eig_sym;
......@@ -34,19 +35,25 @@ void FinalizePSC(const mat& sensitivity_matrix, PscResult* psc_result) {
return;
}
// Only use the Eigenvectors with non-zero Eigenvalue.
uvec shed_eigenvectors = find(eigenvalues < kNumericZero);
if (shed_eigenvectors.n_elem == eigenvalues.n_elem) {
// Only use the Eigenvectors with "non-zero" Eigenvalue.
// "Non-zero" means Eigenvalues larger than the numerical tolerance times the largest Eigenvalue.
uword cutoff_index = eigenvalues.n_elem - 1;
if (eigenvalues[cutoff_index] < kNumericZero) {
psc_result->pscs.reset();
psc_result->status = PscStatus::kError;
psc_result->status_message.append("all Eigenvalues are zero");
return;
}
const double cutoff_threshold = kNumericZero * eigenvalues[cutoff_index];
// Determine the index at which the Eigenvalues are too small (the largest one is apparently fine, c.f. line 42).
while (cutoff_index > 0 && eigenvalues[--cutoff_index] > cutoff_threshold) {}
// Hide the Eigenvectors for zero Eigenvalues
if (shed_eigenvectors.n_elem > 0) {
if (cutoff_index > 0) {
// Eigenvalues are ordered ascending, so we can simply strip all the eigenvectors for the first `k` eigenvalues.
eigenvectors.shed_cols(0, shed_eigenvectors[shed_eigenvectors.n_elem - 1]);
eigenvectors.shed_cols(0, cutoff_index);
}
// Project the sensitivity vectors onto the Eigenvectors (eq. 11-12 in the paper)
psc_result->pscs = sensitivity_matrix * eigenvectors;
......
This diff is collapsed.
......@@ -12,6 +12,7 @@
#include "rcpp_integration.hpp"
#include "constants.hpp"
#include "alias.hpp"
#include "admm_s.hpp"
namespace pense {
namespace r_interface {
......@@ -185,7 +186,9 @@ struct is_admm : std::conditional<
typename Optimizer::Coefficients>>::value ||
std::is_same<Optimizer, nsoptim::AdmmLinearOptimizer<typename Optimizer::LossFunction,
typename Optimizer::PenaltyFunction,
typename Optimizer::Coefficients>>::value,
typename Optimizer::Coefficients>>::value ||
std::is_same<Optimizer, pense::AdmmSOptimizer<typename Optimizer::PenaltyFunction,
typename Optimizer::Coefficients>>::value,
std::true_type, std::false_type>::type {};
//! Create an object of the given Optimizer type using its default constructor without any arguments.
......@@ -200,11 +203,9 @@ Optimizer MakeOptimizer(const Rcpp::List& optional_args, int) {
if (optional_args.containsElementNamed("en_options")) {
const Rcpp::List en_options = Rcpp::as<Rcpp::List>(optional_args["en_options"]);
Optimizer optim(Rcpp::as<nsoptim::AdmmConfiguration>(optional_args["en_options"]));
if (en_options.containsElementNamed("eps")) {
optim.convergence_tolerance(pense::GetFallback(en_options, "eps", pense::kDefaultConvergenceTolerance));
}
return optim;
}
return Optimizer();
......
......@@ -115,39 +115,26 @@ T AddEstimate(T iterator, T end, Metrics* metrics, Rcpp::List* solutions) {
return iterator;
}
// template<typename InnerOptimizer, typename LsEnOptimizer>
// SEXP PenseRegression(SEXP r_x, SEXP r_y, SEXP r_penalties, SEXP r_initial_ests, SEXP r_enpy_inds, SEXP r_pense_opts,
// SEXP r_enpy_opts, const Rcpp::List& optional_args) {
// static_assert(std::is_same<typename InnerOptimizer::PenaltyFunction, typename LsEnOptimizer::PenaltyFunction>::value,
// "InnerOptimizer and LsEnOptimizer must work on the same penalty function.");
// static_assert(std::is_same<typename InnerOptimizer::Coefficients, typename LsEnOptimizer::Coefficients>::value,
// "InnerOptimizer and LsEnOptimizer must work on the same coefficients.");
//! Generic PENSE Regression Algorithm.
//!
//! For the list of parameters and the returned object, see the specialized functions below.
template<template <class...> class BaseOptimizer, class PenaltyFunction, class CoefsType>
SEXP PenseRegression(SEXP r_x, SEXP r_y, SEXP r_penalties, SEXP r_initial_ests, SEXP r_enpy_inds, SEXP r_pense_opts,
SEXP r_enpy_opts, const Rcpp::List& optional_args) {
using LsOptimizer = BaseOptimizer<nsoptim::LsLoss, PenaltyFunction, CoefsType>;
using SInnerOptimizer = BaseOptimizer<SLoss::ConvexSurrogateType, PenaltyFunction, CoefsType>;
using MMSOptimizer = MMOptimizer<SLoss, PenaltyFunction, SInnerOptimizer, CoefsType>;
template<class SOptimizer, class LsOptimizer>
SEXP PenseRegressionImpl(SOptimizer optimizer, SEXP r_x, SEXP r_y, SEXP r_penalties, SEXP r_initial_ests,
SEXP r_enpy_inds, SEXP r_pense_opts, SEXP r_enpy_opts, const Rcpp::List& optional_args) {
ConstRegressionDataPtr data(MakePredictorResponseData(r_x, r_y));
Rcpp::List pense_opts = as<Rcpp::List>(r_pense_opts);
Rcpp::List enpy_opts = as<Rcpp::List>(r_enpy_opts);
const auto pense_opts = as<Rcpp::List>(r_pense_opts);
const auto enpy_opts = as<Rcpp::List>(r_enpy_opts);
const auto mm_opts = as<Rcpp::List>(pense_opts["mm_options"]);
Mscale<RhoBisquare> mscale(as<Rcpp::List>(pense_opts["mscale"]));
SLoss loss(data, mscale, as<bool>(pense_opts["intercept"]));
auto penalties = MakePenalties<MMSOptimizer>(r_penalties, optional_args);
auto enpy_penalties = MakePenalties<MMSOptimizer>(r_penalties, r_enpy_inds, optional_args);
auto penalties = MakePenalties<SOptimizer>(r_penalties, optional_args);
auto enpy_penalties = MakePenalties<SOptimizer>(r_penalties, r_enpy_inds, optional_args);
MMSOptimizer optimizer(loss, penalties.front(), MakeOptimizer<SInnerOptimizer>(optional_args), pense_opts["max_it"]);
optimizer.convergence_tolerance(pense_opts["eps"]);
optimizer.convergence_tolerance(mm_opts["eps"]);
// Compute the initial estimators
Optima<MMSOptimizer> reg_path_cold;
Optima<SOptimizer> reg_path_cold;
Metrics metrics("pense");
// Compute the cold-based solutions
......@@ -155,7 +142,7 @@ SEXP PenseRegression(SEXP r_x, SEXP r_y, SEXP r_penalties, SEXP r_initial_ests,
auto ls_en_optimizer = MakeOptimizer<LsOptimizer>(optional_args);
auto py_res = PenaYohaiInitialEstimators(loss, enpy_penalties, ls_en_optimizer, enpy_opts);
StartCoefficientsList<LsOptimizer> cold_starts = PyResultToStartCoefficients(py_res, penalties, r_enpy_inds);
reg_path_cold = RegularizationPath(loss, penalties, optimizer, cold_starts, as<int>(pense_opts["cold_explore_it"]));
reg_path_cold = RegularizationPath(loss, penalties, optimizer, cold_starts, mm_opts["cold_explore_it"]);
// Move metrics from the PY results.
auto&& enpy_metrics = metrics.CreateSubMetrics("cold_enpy");
......@@ -174,8 +161,8 @@ SEXP PenseRegression(SEXP r_x, SEXP r_y, SEXP r_penalties, SEXP r_initial_ests,
Rcpp::checkUserInterrupt();
// Compute the others-based solutions
auto other_initial_ests = as<StartCoefficientsList<MMSOptimizer>>(r_initial_ests);
Optima<MMSOptimizer> reg_path_others;
auto other_initial_ests = as<StartCoefficientsList<SOptimizer>>(r_initial_ests);
Optima<SOptimizer> reg_path_others;
if (!other_initial_ests.empty()) {
reg_path_others = RegularizationPath(loss, penalties, optimizer, other_initial_ests,
as<int>(pense_opts["cold_explore_it"]));
......@@ -211,37 +198,66 @@ SEXP PenseRegression(SEXP r_x, SEXP r_y, SEXP r_penalties, SEXP r_initial_ests,
return wrap(pense_results);
}
//! Generic PENSE Regression Algorithm.
//!
//! For the list of parameters and the returned object, see the specialized functions below.
template<template <class...> class BaseOptimizer, class PenaltyFunction, class CoefsType>
SEXP PenseRegressionMM(SEXP x, SEXP y, SEXP penalties, SEXP initial_ests, SEXP enpy_inds, SEXP pense_opts,
SEXP enpy_opts, const Rcpp::List& optional_args) {
using LsOptimizer = BaseOptimizer<nsoptim::LsLoss, PenaltyFunction, CoefsType>;
using SInnerOptimizer = BaseOptimizer<SLoss::ConvexSurrogateType, PenaltyFunction, CoefsType>;
using MMSOptimizer = MMOptimizer<SLoss, PenaltyFunction, SInnerOptimizer, CoefsType>;
const auto mm_config = as<nsoptim::MMConfiguration>(as<Rcpp::List>(pense_opts)["mm_options"]);
MMSOptimizer optimizer(MakeOptimizer<SInnerOptimizer>(optional_args), mm_config);
return PenseRegressionImpl<MMSOptimizer, LsOptimizer>(std::move(optimizer), x, y, penalties, initial_ests,
enpy_inds, pense_opts, enpy_opts, optional_args);
}
template<typename PenaltyFunction>
SEXP PenseEnRegressionDispatch(SEXP x, SEXP y, SEXP penalties, SEXP initial_ests, SEXP enpy_inds,
SEXP pense_opts, SEXP enpy_opts, const Rcpp::List& optional_args) {
const auto en_options = optional_args.containsElementNamed("en_options") ?
as<Rcpp::List>(optional_args["en_options"]) : Rcpp::List();
const bool use_sparse_coefs = pense::GetFallback(en_options, "sparse", pense::kDefaultUseSparse);
switch (pense::GetFallback(en_options, "algorithm", pense::kDefaultEnAlgorithm)) {
case pense::EnAlgorithm::kDal:
// If using the DAL optimizer, always use sparse coefficients.
return PenseRegression<DalEnOptimizer, PenaltyFunction, SparseCoefs>(
return PenseRegressionMM<DalEnOptimizer, PenaltyFunction, SparseCoefs>(
x, y, penalties, initial_ests, enpy_inds, pense_opts, enpy_opts, optional_args);
case pense::EnAlgorithm::kVarStepAdmm:
if (use_sparse_coefs) {
return PenseRegression<AdmmVarStepOptimizer, PenaltyFunction, SparseCoefs>(
return PenseRegressionMM<AdmmVarStepOptimizer, PenaltyFunction, SparseCoefs>(
x, y, penalties, initial_ests, enpy_inds, pense_opts, enpy_opts, optional_args);
} else {
return PenseRegression<AdmmVarStepOptimizer, PenaltyFunction, DenseCoefs>(
return PenseRegressionMM<AdmmVarStepOptimizer, PenaltyFunction, DenseCoefs>(
x, y, penalties, initial_ests, enpy_inds, pense_opts, enpy_opts, optional_args);
}
// #warning Abusing kVarStepAdmm to test new ADMM-S algorithm
// using AdmmLinearLsSp = AdmmLinearOptimizer<nsoptim::LsLoss, PenaltyFunction, SparseCoefs>;
// using AdmmLinearLsDe = AdmmLinearOptimizer<nsoptim::LsLoss, PenaltyFunction, DenseCoefs>;
// using AdmmSSp = pense::AdmmSOptimizer<PenaltyFunction, SparseCoefs>;
// using AdmmSDe = pense::AdmmSOptimizer<PenaltyFunction, DenseCoefs>;
// if (use_sparse_coefs) {
// return PenseRegressionImpl<AdmmSSp, AdmmLinearLsSp>(MakeOptimizer<AdmmSSp>(optional_args),
// x, y, penalties, initial_ests, enpy_inds, pense_opts, enpy_opts, optional_args);
// } else {
// return PenseRegressionImpl<AdmmSDe, AdmmLinearLsDe>(MakeOptimizer<AdmmSDe>(optional_args),
// x, y, penalties, initial_ests, enpy_inds, pense_opts, enpy_opts, optional_args);
// }
case pense::EnAlgorithm::kRidge:
// If using the ridge optimizer, always use the EN penalty (there's not adaptiveness for Ridge) and dens
// coefficients.
return PenseRegression<AugmentedRidgeOptimizer, RidgePenalty, DenseCoefs>(
return PenseRegressionMM<AugmentedRidgeOptimizer, RidgePenalty, DenseCoefs>(
x, y, penalties, initial_ests, enpy_inds, pense_opts, enpy_opts, optional_args);
case pense::EnAlgorithm::kLinearizedAdmm:
default:
if (use_sparse_coefs) {
return PenseRegression<AdmmLinearOptimizer, PenaltyFunction, SparseCoefs>(
return PenseRegressionMM<AdmmLinearOptimizer, PenaltyFunction, SparseCoefs>(
x, y, penalties, initial_ests, enpy_inds, pense_opts, enpy_opts, optional_args);
} else {
return PenseRegression<AdmmLinearOptimizer, PenaltyFunction, DenseCoefs>(
return PenseRegressionMM<AdmmLinearOptimizer, PenaltyFunction, DenseCoefs>(
x, y, penalties, initial_ests, enpy_inds, pense_opts, enpy_opts, optional_args);
}
}
......
......@@ -38,5 +38,15 @@ nsoptim::DalEnConfiguration Exporter<nsoptim::DalEnConfiguration>::get() const {
return tmp;
}
nsoptim::MMConfiguration Exporter<nsoptim::MMConfiguration>::get() const {
const Rcpp::List config_list = as<const Rcpp::List>(r_obj_);
nsoptim::MMConfiguration tmp = {
as<int>(config_list["max_it"]),
static_cast<nsoptim::MMConfiguration::TighteningType>(as<int>(config_list["tightening"])),
as<int>(config_list["tightening_steps"])
};
return tmp;
}
} // namespace traits
} // namespace Rcpp
......@@ -31,6 +31,15 @@ template<> class Exporter< nsoptim::DalEnConfiguration > {
SEXP r_obj_;
};
//! Converter for an R-list to configuration options for the MM algorithm.
template<> class Exporter< nsoptim::MMConfiguration > {
public:
explicit Exporter(SEXP r_obj) noexcept : r_obj_(r_obj) {}
nsoptim::MMConfiguration get() const;
private:
SEXP r_obj_;
};
} // namespace traits
} // namespace Rcpp
......
......@@ -182,6 +182,17 @@ inline pense::RhoFunctionType GetFallback<pense::RhoFunctionType>(const Rcpp::Li
}
}
//! enum-specific overload
template<>
inline nsoptim::MMConfiguration::TighteningType GetFallback<nsoptim::MMConfiguration::TighteningType>(
const Rcpp::List& list, const std::string& name, const nsoptim::MMConfiguration::TighteningType fallback) noexcept {
try {
return static_cast<nsoptim::MMConfiguration::TighteningType>(Rcpp::as<int>(list[name]));
} catch (...) {
return fallback;
}
}
//! Wrap an Optimum for any EN-type penalty function into an R list.
//!
//! @param optimium the Optimum object.
......
......@@ -67,10 +67,12 @@ alias::Optima<Optimizer> RegularizationPath(const typename Optimizer::LossFuncti
bool first_run = true;
while ((penalty_it != penalties.cend()) && (start_it != starts.cend())) {
std::unique_ptr<Optimum> best_optimum;
int cold_candidates = 0;
if (!start_it->empty()) {
optim_cold.penalty(*penalty_it);
// For every starting point, iterate a few times to see if the solution is promising.
for (auto&& start : *start_it) {
++cold_candidates;
auto tmp_optim = optim_cold.Optimize(start, explore_it);
if (!best_optimum) {
best_optimum.reset(new Optimum(std::move(tmp_optim)));
......@@ -82,6 +84,17 @@ alias::Optima<Optimizer> RegularizationPath(const typename Optimizer::LossFuncti
// Fully iterate the "best" optimum.
if (best_optimum->status != nsoptim::OptimumStatus::kOk) {
best_optimum.reset(new Optimum(optim_cold.Optimize(best_optimum->coefs)));
if (!best_optimum->metrics) {
best_optimum->metrics.reset(new nsoptim::Metrics("cold_est"));
}
best_optimum->metrics->AddMetric("cold_candidates", cold_candidates);
best_optimum->metrics->AddMetric("solution", "cold");
best_optimum->metrics->AddMetric("refined", "yes");
} else {
if (!best_optimum->metrics) {
best_optimum->metrics.reset(new nsoptim::Metrics("cold_est"));
}
best_optimum->metrics->AddMetric("refined", "no");
}
}
......@@ -92,14 +105,19 @@ alias::Optima<Optimizer> RegularizationPath(const typename Optimizer::LossFuncti
first_run = false;
} else {
optim_warm.penalty(*penalty_it);
const auto updated_optim = next_start ? optim_warm.Optimize(*next_start) : optim_warm.Optimize();
auto updated_optim = next_start ? optim_warm.Optimize(*next_start) : optim_warm.Optimize();
if (best_optimum && best_optimum->objf_value < updated_optim.objf_value) {
results.push_front(std::move(*best_optimum));
// The updated optimum is worse than the "cold-start" optimum. Discard the updates and continue
// from the cold optimum.
next_start.reset(new Coefficients(best_optimum->coefs));
results.push_front(std::move(*best_optimum));
} else {
if (!updated_optim.metrics) {
updated_optim.metrics.reset(new nsoptim::Metrics("cold_est"));
}
updated_optim.metrics->AddMetric("solution", "updated");
updated_optim.metrics->AddMetric("cold_candidates", cold_candidates);
results.push_front(std::move(updated_optim));
// Either there is no new cold-start optimum, or the updated optimum is better. Continue to use the updated
// optimum.
......
......@@ -100,11 +100,7 @@ class Mscale {
//! @return the M-scale of the given values.
double operator()(const arma::vec& values, double scale = -1) const {
if (scale < 0) {
if (scale_ < 0) {
scale = robust_scale_location::kMadScaleConsistencyConstant * arma::median(arma::abs(values));
} else {
scale = scale_;
}
scale = InitialEstimate(values);
}
return ComputeMscale(values, scale);
}
......@@ -116,10 +112,7 @@ class Mscale {
//! @param values a vector of values.
//! @return the M-scale of the given values.
double operator()(const arma::vec& values) {
if (scale_ < 0) {
scale_ = robust_scale_location::kMadScaleConsistencyConstant * arma::median(arma::abs(values));
}
scale_ = ComputeMscale(values, scale_);
scale_ = ComputeMscale(values, InitialEstimate(values));
return scale_;
}
......@@ -163,6 +156,31 @@ class Mscale {
return scale;
}
double InitialEstimate(const arma::vec& values) const {
// If the internal scale is already set, use it as initial estimate.
if (scale_ > eps_) {
return scale_;
} else {
// Otherwise, try the MAD of the uncentered values.
const double mad = robust_scale_location::kMadScaleConsistencyConstant * arma::median(arma::abs(values));
if (mad > eps_) {
return mad;
} else if (static_cast<arma::uword>((1 - delta_) * values.n_elem) > values.n_elem / 2) {
// If the MAD is also (almost) 0, but the M-scale takes into account more observations than the MAD,
// compute the variance of the additional elements (i.e., the variance without considering the smallest
// 50% of the observations)
const arma::uword upper_index = values.n_elem / 2;
const arma::uword lower_index = static_cast<arma::uword>((1 - delta_) * values.n_elem);
arma::vec ordered_values = arma::sort(arma::abs(values));
const double scale = arma::var(ordered_values.rows(lower_index, upper_index));
if (scale > eps_) {
return scale;
}
}
}
return 0;
}
RhoFunction rho_;
double delta_;
int max_it_;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment