Commit ba33c090 authored by davidkep's avatar davidkep

updates

parent 49819291
......@@ -7,13 +7,16 @@ export(en_admm_options)
export(en_dal_options)
export(enpy_initial_estimates)
export(enpy_options)
export(mest_options)
export(mloc)
export(mlocscale)
export(mscale)
export(mscale_algorithm_options)
export(pense)
export(pense_admm_options)
export(pense_mm_options)
export(rho_function)
export(tau_size)
importFrom(Rcpp,evalCpp)
importFrom(methods,as)
importFrom(stats,mad)
useDynLib(pense, .registration = TRUE)
This diff is collapsed.
......@@ -20,7 +20,7 @@
#' coefficient.
#' @param weights a vector of positive observation weights.
#' @param include_intercept include an intercept in the model.
#' @param en_algorithm_opts options for the EN algorithm. See [en_dal_options]
#' @param en_algorithm_opts options for the EN algorithm. See [en_algorithm_options]
#' for details.
#' @seealso [pense] for an S-estimate of regression with elastic net penalty.
#' @export
......@@ -29,7 +29,7 @@ elnet <- function(x, y, alpha, lambdas, penalty_loadings, weights,
optional_args <- list()
# Normalize input
y <- as.numeric(y)
y <- .as(y, 'numeric')
x_dim <- dim(x)
......@@ -37,8 +37,12 @@ elnet <- function(x, y, alpha, lambdas, penalty_loadings, weights,
stop("Number of observations does not match between `x` and `y`.")
}
alpha <- as.numeric(alpha[[1L]])
lambdas <- sort(as.numeric(lambdas), decreasing = TRUE)
alpha <- .as(alpha[[1L]], 'numeric')
lambdas <- sort(.as(lambdas, 'numeric'), decreasing = TRUE)
if (alpha < 0 || alpha > 1) {
stop("`alpha` is outside 0 and 1.")
}
if (alpha < sqrt(.Machine$double.eps)) {
alpha <- 0
......@@ -50,7 +54,7 @@ elnet <- function(x, y, alpha, lambdas, penalty_loadings, weights,
} else if (length(penalty_loadings) != x_dim[[2L]]) {
stop("Penalty loadings are not of length `p`.")
}
as.numeric(penalty_loadings)
.as(penalty_loadings, 'numeric')
} else {
NULL
}
......@@ -59,30 +63,17 @@ elnet <- function(x, y, alpha, lambdas, penalty_loadings, weights,
if (length(weights) != x_dim[[1L]]) {
stop("Observation weights are not the same length as `y`.")
}
as.numeric(weights)
.as(weights, 'numeric')
} else {
NULL
}
# Check EN algorithm
if (!missing(en_algorithm_opts)) {
if (!.check_en_algorithm(en_algorithm_opts, alpha)) {
en_algorithm_opts <- NULL
}
} else {
if (missing(en_algorithm_opts)) {
en_algorithm_opts <- NULL
}
optional_args$en_options <- if (is.null(en_algorithm_opts)) {
if (alpha > 0) {
en_admm_options()
} else {
en_ridge_options()
}
} else {
en_algorithm_opts
}
optional_args$en_options$algorithm <- .en_algorithm_id(optional_args$en_options)
# Check EN algorithm
optional_args$en_options <- .select_en_algorithm(en_algorithm_opts, alpha, x)
# Call internal function
res <- .elnet_internal(x, y, alpha, lambdas, penalty_loadings, weights,
......
#' ENPY Initial Estimates
#'
#' @param x `n` by `p` matrix of numeric predictors.
#' @param y vector of response values of length `n`.
#' @param alpha value for the alpha parameter, the balance between L1 and L2
#' penalization.
#' @param lambdas a vector of positive values for the lambda parameter.
#' @param penalty_loadings a vector of positive penalty loadings
#' (a.k.a. weights) for different penalization of each coefficient.
#' @param include_intercept include an intercept in the model.
#' @param bdp the desired breakdown point of the estimator, between 0 and 0.5.
#' @param cc consistency constant for the scale estimate. By default, the scale estimate is made consistent for the
#' given breakdown point under the Normal model.
#' @param en_algorithm_opts options for the EN algorithm. See [en_algorithm_options] for details.
#' @param mscale_opts options for the M-scale estimation. See [mscale_algorithm_options] for details.
#' @export
enpy_initial_estimates <- function (x, y, alpha, lambdas, bdp = 0.25, cc, include_intercept = TRUE,
enpy_opts = enpy_options(), mscale_maxit = 200, mscale_eps = 1e-9, en_options) {
enpy_opts = enpy_options(), mscale_opts = mscale_algorithm_options()) {
alpha <- .as(alpha[[1L]], 'numeric')
if (missing(cc)) {
cc <- .bisquare_consistency_const(bdp)
cc <- NULL
}
alpha <- as.numeric(alpha[[1L]])
if (alpha < 0 || alpha > 1) {
stop("`alpha` is outside 0 and 1.")
}
if (alpha < sqrt(.Machine$double.eps)) {
alpha <- 0
} else if (alpha > 1) {
stop("`alpha` must be less or equal to 1.")
}
penalties <- make_penalties(alpha, lambdas)
s_loss_params <- c(list(mscale = list(delta = as.numeric(bdp[[1L]]),
cc = cc,
maxit = as.integer(mscale_maxit[[1L]]),
eps = as.integer(mscale_eps[[1L]])),
intercept = include_intercept))
# Check EN algorithm
if (!missing(en_options)) {
if (!.check_en_algorithm(en_options, alpha)) {
en_options <- NULL
}
} else {
en_options <- NULL
}
optional_args <- list(en_options = en_options)
if (is.null(en_options)) {
optional_args$en_options <- if (alpha > 0) {
en_admm_options()
} else {
en_ridge_options()
}
}
if (optional_args$en_options$algorithm == 'admm') {
optional_args$en_options <- .choose_admm_algorithm(optional_args$en_options,
alpha, x)
}
s_loss_params <- list(mscale = .full_mscale_algo_options(bdp = bdp, cc = cc, mscale_opts = mscale_opts),
intercept = include_intercept)
optional_args$en_options$algorithm <- .en_algorithm_id(optional_args$en_options)
# Check EN algorithm for ENPY
enpy_opts$en_options <- .select_en_algorithm(enpy_opts$en_options, alpha, x)
res <- .Call(C_penpy, x, drop(y), penalties, s_loss_params, enpy_opts,
optional_args)
res <- .Call(C_penpy, x, drop(y), penalties, s_loss_params, enpy_opts, list())
lapply(res, function (res) {
if (!is.null(res$metrics)) {
......
#' Compute the PENSE Regularization Path
#'
#' @param x `n` by `p` matrix of numeric predictors.
#' @param y vector of response values of length `n`.
#' @param alpha value for the alpha parameter, the balance between L1 and L2
#' penalization.
#' @param lambdas a vector of positive values for the lambda parameter.
......@@ -14,29 +16,27 @@
#' as well as the `intercept` and `beta`
#' coefficients.
#' @param include_intercept include an intercept in the model.
#' @param max_it maximum number of iterations for the algorithm.
#' @param bdp the desired breakdown point of the estimator, between 0 and 0.5.
#' @param cc consistency constant for the scale estimate. By default, the scale estimate is made consistent for the
#' given breakdown point under the Normal model.
#' @param eps convergence tolerance for the algorithm.
#' @param nr_tracks number of optima to track in the "cold" and "others" regularization path.
#' @param explore_it number of iterations to explore potential candidate
#' solutions.
#' @param en_algorithm_opts options for the EN algorithm. See [en_dal_options]
#' for details.
#' @param mest_opts options for the M-scale estimation. See [mest_options]
#' for details.
#' @param sparse use sparse coefficient vector.
#' @param algorithm_opts options for the PENSE algorithm. See [pense_algorithm_options] for details.
#' @param mscale_opts options for the M-scale estimation. See [mscale_algorithm_options] for details.
#' @param enpy_opts options for the ENPY initial estimates, created with the
#' [enpy_options] function. See [enpy_initial_estimates] for
#' details.
#' @seealso [pensem] for an M-estimate of regression.
#' [enpy_options] function. See [enpy_initial_estimates] for details.
#'
#' @export
pense <- function(x, y, alpha, lambdas, cold_lambdas, penalty_loadings,
additional_initial_estimates, include_intercept = TRUE,
max_it = 200, eps = 1e-6, explore_it = 10,
tightening = c('none', 'adaptive', 'exponential'),
tightening_steps = 10L, en_algorithm_opts,
mest_opts = mest_options(), enpy_opts = enpy_options()) {
pense <- function(x, y, alpha, lambdas, cold_lambdas, penalty_loadings, additional_initial_estimates,
include_intercept = TRUE, bdp = 0.25, cc, eps = 1e-6, algorithm_opts, nr_tracks = 10, explore_it = 10,
sparse = TRUE, mscale_opts = mscale_algorithm_options(), enpy_opts = enpy_options()) {
optional_args <- list()
# Normalize input
y <- as.numeric(y)
y <- .as(y, 'numeric')
x_dim <- dim(x)
......@@ -44,49 +44,59 @@ pense <- function(x, y, alpha, lambdas, cold_lambdas, penalty_loadings,
stop("Number of observations in `x` and `y` does not match.")
}
alpha <- as.numeric(alpha[[1L]])
lambdas <- sort(as.numeric(lambdas), decreasing = TRUE)
pense_opts <- c(
list(mm_options = s_algo_options(explore_it = explore_it, max_it = max_it,
eps = eps, tightening = match.arg(tightening),
tightening_steps = tightening_steps)),
list(intercept = isTRUE(include_intercept), mscale = mest_opts))
if (missing(cc)) {
cc <- NULL
}
alpha <- .as(alpha[[1L]], 'numeric')
if (alpha < sqrt(.Machine$double.eps)) {
alpha <- 0
}
lambdas <- sort(.as(lambdas, 'numeric'), decreasing = TRUE)
sparse <- isTRUE(sparse)
pense_opts <- list(algo_opts = algorithm_opts,
algorithm = .pense_algorithm_id(algorithm_opts),
intercept = isTRUE(include_intercept),
eps = .as(eps[[1L]], 'numeric'),
explore_it = .as(explore_it[[1L]], 'integer'),
nr_tracks = .as(nr_tracks[[1L]], 'integer'),
mscale = .full_mscale_algo_options(bdp = bdp, cc = cc, mscale_opts = mscale_opts))
# Propagate `sparse` to the inner optimizer
if (!is.null(pense_opts$algo_opts$sparse)) {
pense_opts$algo_opts$sparse <- sparse
}
# If using the MM algorithm, ensure that the EN options are set.
if (pense_opts$algorithm == 1L) {
pense_opts$algo_opts$en_options <- .select_en_algorithm(pense_opts$algo_opts$en_options, alpha, x)
# Propagate `sparse` to the inner optimizer
if (!is.null(pense_opts$algo_opts$en_options$sparse)) {
pense_opts$algo_opts$en_options$sparse <- sparse
}
}
penalty_loadings <- if (!missing(penalty_loadings)) {
if(alpha == 0) {
stop("Penalty loadings are only supported for alpha > 0.")
} else if (length(penalty_loadings) != x_dim[[2L]]) {
stop("Penalty loadings are not of length `p`.")
}
as.numeric(penalty_loadings)
.as(penalty_loadings, 'numeric')
} else {
NULL
}
# Check EN algorithm
if (!missing(en_algorithm_opts)) {
if (!.check_en_algorithm(en_algorithm_opts, alpha)) {
en_algorithm_opts <- NULL
}
} else {
en_algorithm_opts <- NULL
}
# Check EN algorithm for ENPY
enpy_opts$en_opts <- .select_en_algorithm(enpy_opts$en_opts, alpha, x)
optional_args$en_options <- if (is.null(en_algorithm_opts)) {
if (alpha > 0) {
en_admm_options()
} else {
en_ridge_options()
}
} else {
en_algorithm_opts
# Propagate `sparse` to the ENPY optimizer
if (!is.null(enpy_opts$en_opts$sparse)) {
enpy_opts$en_opts$sparse <- sparse
}
optional_args$en_options$algorithm <- .en_algorithm_id(optional_args$en_options)
cold_lambda_inds <- .approx_match(cold_lambdas, lambdas)
if (anyNA(cold_lambda_inds)) {
......@@ -94,12 +104,19 @@ pense <- function(x, y, alpha, lambdas, cold_lambdas, penalty_loadings,
cold_lambda_inds <- .approx_match(cold_lambdas, lambdas)
}
if (any(lambdas < .Machine$double.eps)) {
stop("at least one value in `lambdas` less or equal to 0.")
}
if (missing(additional_initial_estimates)) {
additional_initial_estimates <- list()
}
init_ests <- .make_initest_list(additional_initial_estimates, lambdas,
sparse = isTRUE(optional_args$en_options$sparse))
init_ests <- if (!inherits(additional_initial_estimates, 'pense_estimate_list')) {
.make_initest_list(additional_initial_estimates, lambdas, sparse = sparse)
} else {
additional_initial_estimates
}
# Call internal function
res <- .pense_internal(x, y, alpha, lambdas, cold_lambda_inds, init_ests,
......@@ -146,6 +163,15 @@ pense <- function(x, y, alpha, lambdas, cold_lambdas, penalty_loadings,
})
}
#' Get the smallest lambda such that the PENSE estimate gives the empty model.
.pense_max_lambda <- function (x, y, alpha, pense_options, penalty_loadings = NULL) {
optional_args <- list()
if (!is.null(penalty_loadings)) {
optional_args$pen_loadings <- penalty_loadings
}
.Call(C_pense_max_lambda, x, y, pense_options, optional_args) / max(0.01, alpha)
}
## Perform some final input adjustments and call the internal C++ code.
.pense_internal <- function(x, y, alpha, lambdas, cold_lambda_inds,
additional_initial_estimates,
......
......@@ -18,15 +18,17 @@ tau_size <- function (x) {
#' @param bdp desired breakdown point (between 0 and 0.5).
#' @param cc cutoff value for the bisquare rho function. By default, chosen
#' for a consistent estimate under the Normal model.
#' @param opts a list of options for the M-scale equation, see [mest_options]
#' @param opts a list of options for the M-scale estimation algorithm, see [mscale_algorithm_options]
#' for details.
#' @return the m-scale estimate.
#' @export
mscale <- function (x, bdp = 0.25, cc = consistency_const(bdp, 'bisquare'),
opts = mest_options()) {
opts = mscale_algorithm_options()) {
# No checks for NA values!
opts$delta <- as.numeric(bdp[[1L]])
opts$cc <- as.numeric(cc[[1L]])
if (missing(cc)) {
cc <- NULL
}
opts <- .full_mscale_algo_options(bdp, cc, opts)
.Call(C_mscale, as.numeric(x), opts)
}
......@@ -38,18 +40,19 @@ mscale <- function (x, bdp = 0.25, cc = consistency_const(bdp, 'bisquare'),
#' @param cc value of the tuning constant for the chosen rho function.
#' By default, chosen to achieve 95% efficiency under the Normal
#' model.
#' @param opts a list of options for the M-estimating equation, see
#' [mest_options] for details.
#' @param opts a list of options for the M-estimating algorithm, see
#' [mscale_algorithm_options] for details.
#' @return the m-scale estimate.
#' @importFrom stats mad
#' @export
mloc <- function (x, scale = mad(x), rho, cc, opts = mest_options()) {
mloc <- function (x, scale = mad(x), rho, cc, opts = mscale_algorithm_options()) {
# No checks for NA values!
opts$rho <- rho_function(rho)
if (!missing(cc)) {
opts$cc <- as.numeric(cc[[1L]])
if (missing(cc)) {
cc <- NULL
}
.Call(C_mloc, as.numeric(x), scale, opts)
opts <- .full_mscale_algo_options(bdp, cc, opts)
opts$rho <- rho_function(rho)
.Call(C_mloc, .as(x, 'numeric'), scale, opts)
}
#' Compute the M-Location and M-Scale
......@@ -61,20 +64,22 @@ mloc <- function (x, scale = mad(x), rho, cc, opts = mest_options()) {
#' @param cc cutoff value for the bisquare rho function. By default, chosen
#' for a consistent estimate under the Normal model.
#' @param opts a list of options for the M-estimating equation,
#' see [mest_options] for details.
#' see [mscale_algorithm_options] for details.
#' @return a vector with two elements, the M-location and the M-scale estimate.
#' @export
mlocscale <- function (x, bdp = 0.25, location_rho = c('bisquare', 'huber'),
scale_cc = consistency_const(bdp, 'bisquare'),
location_cc, opts = mest_options()) {
location_cc, opts = mscale_algorithm_options()) {
# No checks for NA values!
opts$delta <- as.numeric(bdp[[1L]])
opts$cc <- as.numeric(scale_cc[[1L]])
if (missing(cc)) {
cc <- NULL
}
opts <- .full_mscale_algo_options(bdp, scale_cc, opts)
loc_opts <- list(rho = rho_function(location_rho))
if (!missing(location_cc)) {
loc_opts$cc <- as.numeric(location_cc[[1L]])
loc_opts$cc <- .as(location_cc[[1L]], 'numeric')
}
.Call(C_mlocscale, as.numeric(x), opts, loc_opts)
.Call(C_mlocscale, .as(x, 'numeric'), opts, loc_opts)
}
## Get the Constant for Consistency for the M-Scale Using the Bisquare Rho Function
......
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/control_options.R
\name{.as}
\alias{.as}
\title{A wrapper around \code{methods::as} which raises an error if the conversion results in NA.}
\usage{
.as(object, class, ...)
}
\arguments{
\item{...}{passed on to \link[methods:as]{methods::as}.}
}
\description{
A wrapper around \code{methods::as} which raises an error if the conversion results in NA.
}
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/control_options.R
\name{.full_mscale_algo_options}
\alias{.full_mscale_algo_options}
\title{Full options for the M-scale Estimation Algorithm}
\usage{
.full_mscale_algo_options(bdp, cc, mscale_opts)
}
\arguments{
\item{bdp}{the breakdown point, i.e., \code{delta} in the M-estimation equation.}
\item{cc}{the cutoff threshold for the bisquare rho function.}
\item{mscale_opts}{"public" control options created by \link{mscale_algorithm_options}.}
}
\value{
full options for the M-scale estimation algorithm.
}
\description{
Full options for the M-scale Estimation Algorithm
}
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pense_regression.R
\name{.pense_max_lambda}
\alias{.pense_max_lambda}
\title{Get the smallest lambda such that the PENSE estimate gives the empty model.}
\usage{
.pense_max_lambda(x, y, alpha, pense_options, penalty_loadings = NULL)
}
\description{
Get the smallest lambda such that the PENSE estimate gives the empty model.
}
......@@ -21,7 +21,7 @@ coefficient.}
\item{include_intercept}{include an intercept in the model.}
\item{en_algorithm_opts}{options for the EN algorithm. See \link{en_dal_options}
\item{en_algorithm_opts}{options for the EN algorithm. See \link{en_algorithm_options}
for details.}
}
\description{
......
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/control_options.R
\name{en_algorithm_options}
\alias{en_algorithm_options}
\title{Control the Algorithm to Compute (Weighted) Least-Squares Elastic Net Estimates}
\description{
The package supports multiple different algorithms to compute the EN estimate for weighted LS loss functions.
Each algorithm has certain characteristics that make it useful for some problems.
To select a specific algorithm and set its parameters, use any of the \code{en_***_options} functions.
}
\details{
\itemize{
\item \link{en_admm_options}: Select an iterative ADMM-type algorithm. There are two versions available:
\code{admm_type = "linearized"} needs \emph{O(n p)} operations per iteration and converges linearly, while
\code{admm_type = "var-stepsize"} needs \emph{O(n p^3)} operations per iteration but converges quadratically.
\item \link{en_dal_options}: Select the iterative Dual Augmented Lagrangian (DAL) method. DAL needs O(n^3 p^2) operations
per iteration, but converges exponentially.
}
}
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/control_options.R
\name{en_ridge_options}
\alias{en_ridge_options}
\title{Ridge optimizer using an Augmented data matrix.
Only available for Ridge problems (`alpha=0``) and selected automatically in this case.}
\usage{
en_ridge_options()
}
\description{
Ridge optimizer using an Augmented data matrix.
Only available for Ridge problems (`alpha=0``) and selected automatically in this case.
}
......@@ -6,7 +6,31 @@
\usage{
enpy_initial_estimates(x, y, alpha, lambdas, bdp = 0.25, cc,
include_intercept = TRUE, enpy_opts = enpy_options(),
mscale_maxit = 200, mscale_eps = 1e-09, en_options)
mscale_opts = mscale_algorithm_options())
}
\arguments{
\item{x}{\code{n} by \code{p} matrix of numeric predictors.}
\item{y}{vector of response values of length \code{n}.}
\item{alpha}{value for the alpha parameter, the balance between L1 and L2
penalization.}
\item{lambdas}{a vector of positive values for the lambda parameter.}
\item{bdp}{the desired breakdown point of the estimator, between 0 and 0.5.}
\item{cc}{consistency constant for the scale estimate. By default, the scale estimate is made consistent for the
given breakdown point under the Normal model.}
\item{include_intercept}{include an intercept in the model.}
\item{mscale_opts}{options for the M-scale estimation. See \link{mscale_algorithm_options} for details.}
\item{penalty_loadings}{a vector of positive penalty loadings
(a.k.a. weights) for different penalization of each coefficient.}
\item{en_algorithm_opts}{options for the EN algorithm. See \link{en_algorithm_options} for details.}
}
\description{
ENPY Initial Estimates
......
......@@ -5,9 +5,9 @@
\title{Options for the ENPY Algorithm}
\usage{
enpy_options(max_it = 10, eps = 1e-06, keep_psc_proportion = 0.5,
keep_residuals_measure = c("threshold", "proportion"),
keep_residuals_proportion = 0.5, keep_residuals_threshold = 2,
retain_best_factor = 1.1)
en_algorithm_opts, keep_residuals_measure = c("threshold",
"proportion"), keep_residuals_proportion = 0.5,
keep_residuals_threshold = 2, retain_best_factor = 1.1)
}
\arguments{
\item{max_it}{maximum number of PY iterations.}
......@@ -16,6 +16,8 @@ enpy_options(max_it = 10, eps = 1e-06, keep_psc_proportion = 0.5,
\item{keep_psc_proportion}{how many observations should be kept based on the Principal Sensitivy Components.}
\item{en_algorithm_opts}{options for the LS-EN algorithm. See \link{en_algorithm_options} for details.}
\item{keep_residuals_measure}{how to determine what observations to keep, based on their residuals.
If \code{proportion}, a fixed number of observations is kept, while if \code{threshold},
only observations with residuals below the threshold are kept.}
......
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/control_options.R
\name{mest_options}
\alias{mest_options}
\title{Options for the M-estimation Algorithm}
\name{mscale_algorithm_options}
\alias{mscale_algorithm_options}
\title{Options for the M-scale Estimation Algorithm}
\usage{
mest_options(max_it = 200, eps = 1e-06)
mscale_algorithm_options(max_it = 200, eps = 1e-06)
}
\arguments{
\item{max_it}{maximum number of iterations.}
......@@ -12,8 +12,8 @@ mest_options(max_it = 200, eps = 1e-06)
\item{eps}{numerical tolerance to check for convergence.}
}
\value{
options for the M-estimation algorithm.
options for the M-scale estimation algorithm.
}
\description{
Options for the M-estimation Algorithm
Options for the M-scale Estimation Algorithm
}
......@@ -5,12 +5,16 @@
\title{Compute the PENSE Regularization Path}
\usage{
pense(x, y, alpha, lambdas, cold_lambdas, penalty_loadings,
additional_initial_estimates, include_intercept = TRUE, max_it = 200,
eps = 1e-06, explore_it = 10, tightening = c("none", "adaptive",
"exponential"), tightening_steps = 10L, en_algorithm_opts,
mest_opts = mest_options(), enpy_opts = enpy_options())
additional_initial_estimates, include_intercept = TRUE, bdp = 0.25,
cc, eps = 1e-06, algorithm_opts, nr_tracks = 10, explore_it = 10,
sparse = TRUE, mscale_opts = mscale_algorithm_options(),
enpy_opts = enpy_options())
}
\arguments{
\item{x}{\code{n} by \code{p} matrix of numeric predictors.}
\item{y}{vector of response values of length \code{n}.}
\item{alpha}{value for the alpha parameter, the balance between L1 and L2
penalization.}
......@@ -31,26 +35,27 @@ coefficients.}
\item{include_intercept}{include an intercept in the model.}
\item{max_it}{maximum number of iterations for the algorithm.}
\item{bdp}{the desired breakdown point of the estimator, between 0 and 0.5.}
\item{cc}{consistency constant for the scale estimate. By default, the scale estimate is made consistent for the
given breakdown point under the Normal model.}
\item{eps}{convergence tolerance for the algorithm.}
\item{algorithm_opts}{options for the PENSE algorithm. See \link{pense_algorithm_options} for details.}
\item{nr_tracks}{number of optima to track in the "cold" and "others" regularization path.}
\item{explore_it}{number of iterations to explore potential candidate
solutions.}
\item{en_algorithm_opts}{options for the EN algorithm. See \link{en_dal_options}
for details.}
\item{sparse}{use sparse coefficient vector.}
\item{mest_opts}{options for the M-scale estimation. See \link{mest_options}
for details.}
\item{mscale_opts}{options for the M-scale estimation. See \link{mscale_algorithm_options} for details.}
\item{enpy_opts}{options for the ENPY initial estimates, created with the
\link{enpy_options} function. See \link{enpy_initial_estimates} for
details.}
\link{enpy_options} function. See \link{enpy_initial_estimates} for details.}
}
\description{
Compute the PENSE Regularization Path
}
\seealso{
\link{pensem} for an M-estimate of regression.
}
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/control_options.R
\name{pense_admm_options}
\alias{pense_admm_options}
\title{Options for the S-Estimate Algorithm}
\usage{
pense_admm_options(max_it = 5000, tau, sparse = TRUE,
prox_eps = 1e-09, prox_max_it = 200, prox_oscillate_window = 4,
prox_minimum_step_size = 0.01, prox_wolfe_c1 = 1e-04,
prox_wolfe_c2 = 0.9, prox_step_size_adj = 0.9,
prox_max_small_steps = 10)
}
\arguments{
\item{max_it}{maximum number of iterations.}
\item{tau}{step size for the algorithm.}
\item{sparse}{use sparse coefficients.}
\item{prox_eps}{numerical tolerance for computing the proximal operator of the S-loss.}
\item{prox_max_it}{maximum number of iterations for computing the proximal operator of the S-loss.}
\item{prox_oscillate_window}{moving average size to determine oscillation for computing the proximal operator of the
S-loss.}
\item{prox_minimum_step_size}{minimum step size for computing the proximal operator of the S-loss.}
\item{prox_wolfe_c1}{constant to check the first Wolfe condition for computing the proximal operator of the
S-loss.}
\item{prox_wolfe_c2}{constant to check the second Wolfe condition for computing the proximal operator of the
S-loss.}
\item{prox_max_small_steps}{maximum number of consecutive small steps when computing the proximal operator of the
S-loss. After this many steps with minimal step size, a large step is performed to
escape the current neighborhood.}
\item{step_size_adj}{multiplicative factor to decrease the step size if Wolfe's conditions are not satisfied
when computing the proximal operator of the S-loss.}
}
\value{
options for the S-Estimate algorithm.
}
\description{
Options for the S-Estimate Algorithm
}
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/control_options.R
\name{pense_algorithm_options}
\alias{pense_algorithm_options}
\title{Control the Algorithm to Compute Penalized Elastic Net S-Estimates}
\description{
The package provides different algorithms to compute the PENSE estimate.
To select a specific algorithm and set its parameters, use any of the \code{pense_***_options} functions.
}
\details{
\itemize{
\item \link{pense_admm_options}: Select the ADMM algorithm.
\item \link{pense_mm_options}: Select the MM-algorithm.
}
}
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/control_options.R
\name{pense_mm_options}
\alias{pense_mm_options}