Commit b9ec1371 authored by davidkep's avatar davidkep

update

parent 6452c290
......@@ -6,3 +6,4 @@
.DS_Store
.Rbuildignore
*.trace
# Generated by roxygen2: do not edit by hand
S3method(print,nsoptim_metrics)
export(consistency_const)
export(elnet)
export(en_admm_options)
export(en_dal_options)
export(enpy_initial_estimates)
export(enpy_options)
......
......@@ -51,6 +51,49 @@ s_algo_options <- function (explore_it = 10, max_it = 500,
cold_explore_it = as.integer(explore_it[[1L]]))
}
#' Options for the ADMM Elastic Net Algorithm
#'
#' @param max_it maximum number of iterations.
#' @param eps numerical tolerance to check for convergence.
#' @param sparse use sparse coefficients.
#' @param admm_type what type of the ADMM algorithm to use. If `linearized`,
#' uses a linearized version of ADMM which has runtime $O()$
#' and converges linearly.
#' If `var-stepsize`, uses a variable step-size ADMM
#' algorithm which converges quadratically for "true" EN
#' penalties (i.e., \eqn{alpha < 1}) but has runtime $O()$.
#' If `auto` (the default), chooses the type based on the
#' penalty and the problem size.
#' @param tau step size for the algorithm if using the `linearized` version
#' and the largest step size if using the `var-stepsize` version.
#' @param tau_adjustment_lower (smallest) multiplicative factor for the
#' adjustment of the step size
#' `tau = tau_adjustment * tau`
#' (only for the `var-stepsize` version).
#' @param tau_adjustment_upper (largest) multiplicative factor for the
#' adjustment of the step size
#' `tau = tau_adjustment * tau`
#' (only for the `var-stepsize` version).
#'
#' @return options for the ADMM EN algorithm.
#' @family EN algorithms
#' @export
en_admm_options <- function (max_it = 1000, eps = 1e-6, tau, sparse = FALSE,
admm_type = c('auto', 'linearized',
'var-stepsize'),
tau_lower_mult = 0.01, tau_adjustment_lower = 0.98,
tau_adjustment_upper = 0.999) {
list(algorithm = 'admm',
admm_type = match.arg(admm_type),
sparse = isTRUE(sparse[[1L]]),
max_it = as.integer(max_it[[1L]]),
eps = as.numeric(eps[[1L]]),
tau = if (missing(tau)) { -1 } else { as.numeric(tau[[1L]]) },
tau_lower_mult = as.numeric(tau_lower_mult[[1L]]),
tau_adjustment_lower = as.numeric(tau_adjustment_lower[[1L]]),
tau_adjustment_upper = as.numeric(tau_adjustment_upper[[1L]]))
}
#' Options for the DAL Elastic Net Algorithm
#'
#' @param max_it maximum number of (outer) iterations.
......@@ -70,7 +113,8 @@ s_algo_options <- function (explore_it = 10, max_it = 500,
en_dal_options <- function (max_it = 100, max_inner_it = 100, eps = 1e-9, eta_multiplier = 2,
eta_start_conservative = 0.01, eta_start_aggressive = 1,
lambda_relchange_aggressive = 0.25) {
list(en_algorithm = 'dal',
list(algorithm = 'dal',
sparse = TRUE,
max_it = as.integer(max_it[[1L]]),
max_inner_it = as.integer(max_inner_it[[1L]]),
eps = as.numeric(eps[[1L]]),
......@@ -80,9 +124,13 @@ en_dal_options <- function (max_it = 100, max_inner_it = 100, eps = 1e-9, eta_mu
eta_multiplier = as.numeric(eta_multiplier[[1L]]))
}
#' Check if the selected EN algorithm can handle the given penalty.
en_ridge_options <- function () {
list(algorithm = 'augridge', sparse = FALSE)
}
## Check if the selected EN algorithm can handle the given penalty.
.check_en_algorithm <- function (en_algorithm_opts, alpha) {
if (en_algorithm_opts$en_algorithm == 'dal') {
if (en_algorithm_opts$algorithm == 'dal') {
if(!isTRUE(alpha > 0)) {
warning('The DAL algorithm can not handle a Ridge penalty. Using default
algorithm as fallback.')
......@@ -92,3 +140,26 @@ en_dal_options <- function (max_it = 100, max_inner_it = 100, eps = 1e-9, eta_mu
return(TRUE)
}
## Choose the appropriate ADMM algorithm type based on the penalty and
## the problem size.
.choose_admm_algorithm <- function (en_algorithm_opts, alpha, x) {
if (isTRUE(en_algorithm_opts$admm_type != 'auto')) {
return(en_algorithm_opts)
}
en_algorithm_opts$admm_type <- if (isTRUE(alpha < 1 && ncol(x) < 1.4 * nrow(x))) {
'var-stepsize'
} else {
'linearized'
}
return(en_algorithm_opts)
}
.en_algorithm_id <- function (en_algorithm_opts) {
switch (en_algorithm_opts$algorithm,
admm = switch(en_algorithm_opts$admm_type, `var-stepsize` = 2L, 1L),
dal = 3L,
augridge = 4L,
1L)
}
#' Compute the Elastic Net Regularization Path
#'
#' Compute the EN estimator for linear regression with optional observation
#' weights and penalty loadings.
#'
#' The elastic net estimator for the linear regression model solves
#' the optimization problem
#'
#' \deqn{argmin_{\mu, \beta}
#' (1/n) \sum_i w_i (y_i - \mu - x_i' \beta)^2 +
#' \lambda \sum_j 0.5 (1 - \alpha) \beta_j^2 + \alpha l_i |\beta_j| }
#'
#' with observation weights \eqn{w_i} and penalty loadings \eqn{l_i}.
#'
#' @param alpha value for the alpha parameter, the balance between L1 and L2
#' penalization.
#' @param lambdas a vector of positive values for the lambda parameter.
#' @param penalty_loadings a vector of positive penalty loadings
#' (a.k.a. weights) for different penalization of each
#' coefficient.
#' @param weights a vector of positive observation weights.
#' @param include_intercept include an intercept in the model.
#' @param en_algorithm_opts options for the EN algorithm. See [en_dal_options]
#' for details.
#' @seealso [pense] for an S-estimate of regression with elastic net penalty.
#' @export
elnet <- function(x, y, alpha, lambdas, penalty_loadings, weights,
include_intercept = TRUE, en_algorithm_opts) {
optional_args <- list()
# Normalize input
y <- as.numeric(y)
x_dim <- dim(x)
if (length(y) != x_dim[[1L]]) {
stop("Number of observations does not match between `x` and `y`.")
}
alpha <- as.numeric(alpha[[1L]])
lambdas <- sort(as.numeric(lambdas), decreasing = TRUE)
if (alpha < sqrt(.Machine$double.eps)) {
alpha <- 0
}
penalty_loadings <- if (!missing(penalty_loadings)) {
if(alpha == 0) {
stop("Penalty loadings are only supported for alpha > 0.")
} else if (length(penalty_loadings) != x_dim[[2L]]) {
stop("Penalty loadings are not of length `p`.")
}
as.numeric(penalty_loadings)
} else {
NULL
}
weights <- if (!missing(weights)) {
if (length(weights) != x_dim[[1L]]) {
stop("Observation weights are not the same length as `y`.")
}
as.numeric(weights)
} else {
NULL
}
# Check EN algorithm
if (!missing(en_algorithm_opts)) {
if (!.check_en_algorithm(en_algorithm_opts, alpha)) {
en_algorithm_opts <- NULL
}
} else {
en_algorithm_opts <- NULL
}
optional_args$en_options <- if (is.null(en_algorithm_opts)) {
if (alpha > 0) {
en_admm_options()
} else {
en_ridge_options()
}
} else {
en_algorithm_opts
}
optional_args$en_options$algorithm <- .en_algorithm_id(optional_args$en_options)
# Call internal function
res <- .elnet_internal(x, y, alpha, lambdas, penalty_loadings, weights,
include_intercept, optional_args)
if (!is.null(res$metrics)) {
class(res$metrics) <- 'nsoptim_metrics'
}
return(res)
}
#' Perform some final input adjustments and call the internal C++ code.
.elnet_internal <- function(x, y, alpha, lambdas, penalty_loadings = NULL,
weights = NULL, include_intercept = TRUE,
optional_args) {
# Create penalties-list, without sorting the lambda sequence
penalties <- lapply(lambdas, function (l) { list(lambda = l, alpha = alpha) })
include_intercept <- isTRUE(include_intercept)
if (!is.null(penalty_loadings)) {
optional_args$pen_loadings <- penalty_loadings
}
if (!is.null(weights)) {
optional_args$obs_weights <- weights
}
return(.Call(C_lsen_regression, x, y, penalties, include_intercept,
optional_args))
}
#' ENPY Initial Estimates
#'
#' @export
enpy_initial_estimates <- function (x, y, alpha, lambdas, bdp = 0.25, cc, include_intercept = TRUE,
enpy_opts = enpy_options(), mscale_opts = mscale_options(),
s_algo_opts = s_algo_options(), en_options) {
enpy_initial_estimates <- function (x, y, alpha, lambdas, bdp = 0.25, cc,
include_intercept = TRUE,
enpy_opts = enpy_options(),
mscale_maxit, mscale_eps, en_options) {
if (missing(cc)) {
cc <- .bisquare_consistency_const(bdp)
}
alpha <- as.numeric(alpha[[1L]])
if (alpha < sqrt(.Machine$double.eps)) {
alpha <- 0
} else if (alpha > 1) {
stop("`alpha` must be less or equal to 1.")
}
penalties <- make_penalties(alpha, lambdas)
s_loss_params <- c(list(mscale = c(list(delta = bdp, cc = cc), mscale_opts),
intercept = include_intercept), s_algo_opts)
if (isTRUE(alpha == 0)) {
return(.Call(C_ridgepy, x, drop(y), penalties, s_loss_params, enpy_opts))
} else if (isTRUE(alpha > 0) && isTRUE(alpha <= 1)) {
if (missing(en_options)) {
en_options <- en_dal_options()
s_loss_params <- c(list(mscale = list(delta = as.numeric(bdp[[1L]]),
cc = cc,
maxit = as.integer(mscale_maxit[[1L]]),
eps = as.integer(mscale_eps[[1L]])),
intercept = include_intercept))
# Check EN algorithm
if (!missing(en_options)) {
if (!.check_en_algorithm(en_options, alpha)) {
en_options <- NULL
}
} else {
en_options <- NULL
}
optional_args <- list()
optional_args$en_options <- if (is.null(en_options)) {
if (alpha > 0) {
en_admm_options()
} else {
en_ridge_options()
}
return(.Call(C_enpy_dal, x, drop(y), penalties, s_loss_params, enpy_opts, list(dal_options = en_options)))
} else {
stop("`alpha` must be between 0 and 1.")
en_options
}
if (optional_args$en_options$en_algorithm == 'admm') {
optional_args$en_options <- .choose_admm_algorithm(optional_args$en_options,
alpha, x)
}
optional_args$en_options$algorithm <- .en_algorithm_id(optional_args$en_options)
res <- .Call(C_penpy, x, drop(y), penalties, s_loss_params, enpy_opts,
optional_args)
lapply(res, function (res) {
if (!is.null(res$metrics)) {
class(res$metrics) <- 'nsoptim_metrics'
}
return(res)
})
}
## Make a list of penalties and ensure that `alpha` and `lambdas` are of
## correct type and order.
make_penalties <- function (alpha, lambdas) {
alpha <- as.numeric(alpha[[1L]])
lapply(sort(as.numeric(lambdas), decreasing = TRUE), function (lambda) {
list(alpha = alpha, lambda = lambda)
})
......
......@@ -3,6 +3,9 @@
#' @param alpha value for the alpha parameter, the balance between L1 and L2
#' penalization.
#' @param lambdas a vector of positive values for the lambda parameter.
#' @param penalty_loadings a vector of positive penalty loadings
#' (a.k.a. weights) for different penalization of each
#' coefficient.
#' @param cold_lambdas a vector of lambda values at which *cold* initial
#' estimates are computed (see [enpy] for details).
#' @param additional_initial_estimates a list of other initial estimates to try.
......@@ -26,7 +29,7 @@
#' @export
pense <- function(x, y, alpha, lambdas, cold_lambdas, penalty_loadings,
additional_initial_estimates, include_intercept = TRUE,
max_it = 1000, eps = 1e-5, explore_it = 10, en_algorithm_opts,
max_it = 200, eps = 1e-5, explore_it = 10, en_algorithm_opts,
mest_opts = mest_options(), enpy_opts = enpy_options()) {
optional_args <- list()
......@@ -74,13 +77,16 @@ pense <- function(x, y, alpha, lambdas, cold_lambdas, penalty_loadings,
en_algorithm_opts <- NULL
}
if (is.null(en_algorithm_opts)) {
optional_args$en_options <- if (is.null(en_algorithm_opts)) {
if (alpha > 0) {
optional_args$en_options <- en_dal_options()
en_admm_options()
} else {
en_ridge_options()
}
} else {
optional_args$en_options <- en_algorithm_opts
en_algorithm_opts
}
optional_args$en_options$algorithm <- .en_algorithm_id(optional_args$en_options)
cold_lambda_inds <- .approx_match(cold_lambdas, lambdas)
if (anyNA(cold_lambda_inds)) {
......@@ -92,18 +98,25 @@ pense <- function(x, y, alpha, lambdas, cold_lambdas, penalty_loadings,
additional_initial_estimates <- list()
}
init_ests <- .make_initest_list(additional_initial_estimates, lambdas)
init_ests <- .make_initest_list(additional_initial_estimates, lambdas,
sparse = isTRUE(optional_args$en_options$sparse))
# Call internal function
.pense_internal(x, y, alpha, lambdas, cold_lambda_inds, init_ests,
penalty_loadings, pense_opts, enpy_opts, optional_args)
res <- .pense_internal(x, y, alpha, lambdas, cold_lambda_inds, init_ests,
penalty_loadings, pense_opts, enpy_opts, optional_args)
if (!is.null(res$metrics)) {
class(res$metrics) <- 'nsoptim_metrics'
}
return(res)
}
#' Make a list of initial estimates
#'
#' @return a list the same length as `lambdas` with a list of initial estimates
#' for each value in `lambdas`.
.make_initest_list <- function (initial_estimates, lambdas) {
## Make a list of initial estimates
##
## @return a list the same length as `lambdas` with a list of initial estimates
## for each value in `lambdas`.
## @importFrom Matrix sparseVector
## @importClassesFrom Matrix dsparseVector
.make_initest_list <- function (initial_estimates, lambdas, sparse) {
if (length(initial_estimates) == 0L) {
return(list())
}
......@@ -119,13 +132,21 @@ pense <- function(x, y, alpha, lambdas, cold_lambdas, penalty_loadings,
lapply(seq_along(lambdas), function (i) {
matches <- which(i == init_est_inds)
if (length(matches) > 0) {
return(initial_estimates[matches])
return(lapply(initial_estimates[matches], function (est) {
if (isTRUE(sparse) && !is(est$beta, 'dsparseVector')) {
est$beta <- sparseVector(as.numeric(est$beta), seq_along(est$beta),
length(est$beta))
} else if (!isTRUE(sparse) && !is.numeric(est$beta)) {
est$beta <- as.numeric(est$beta)
}
return(est)
}))
}
return(list())
})
}
#' Perform some final input adjustments and call the internal C++ code.
## Perform some final input adjustments and call the internal C++ code.
.pense_internal <- function(x, y, alpha, lambdas, cold_lambda_inds,
additional_initial_estimates,
penalty_loadings = NULL,
......@@ -137,16 +158,6 @@ pense <- function(x, y, alpha, lambdas, cold_lambdas, penalty_loadings,
optional_args$pen_loadings <- penalty_loadings
}
if (alpha == 0) {
return(.Call('C_pense_ridge_regression', x, y, penalties,
additional_initial_estimates, cold_lambda_inds,
pense_opts, enpy_opts))
}
if (!is.null(optional_args$en_options) &&
isTRUE(optional_args$en_options$en_algorithm == 'dal')) {
return(.Call('C_pense_en_regression_dal', x, y, penalties,
additional_initial_estimates, cold_lambda_inds,
pense_opts, enpy_opts, optional_args))
}
.Call(C_pense_regression, x, y, penalties, additional_initial_estimates,
cold_lambda_inds, pense_opts, enpy_opts, optional_args)
}
#' Print Metrics
#'
#' Pretty-print a list of metrices from the optimization algorithm.
#'
#' @param x metrics object for printing.
#' @param max_level maximum level of printing which is applied for printing
#' nested metrics.
#' @export
print.nsoptim_metrics <- function (x, max_level = NA, ...) {
.print_metrics(x, max_level, '')
invisible(NULL)
}
.print_metrics <- function (metrics, max_level, prefix) {
cat(prefix, '* ', metrics$name, sep = '')
other_metrics <- setdiff(names(metrics), c('name', 'sub_metrics'))
if (length(other_metrics) > 0L) {
cat(':', sep = '')
}
for (metric_name in other_metrics) {
if (is.numeric(metrics[[metric_name]])) {
cat(sprintf(' %s=%g;', metric_name, metrics[[metric_name]]))
} else if (is.character(metrics[[metric_name]])) {
cat(sprintf(' %s="%s";', metric_name,
sub('(\\s|;)+$', '', metrics[[metric_name]])))
} else {
cat(sprintf(' %s=%s;', metric_name, metrics[[metric_name]]))
}
}
cat('\n', sep = '')
if (!isTRUE(max_level <= 0L) && !is.null(metrics$sub_metrics)) {
lapply(rev(metrics$sub_metrics), .print_metrics, max_level = max_level - 1L,
prefix = paste0(prefix, ' '))
}
invisible(NULL)
}
\ No newline at end of file
......@@ -77,10 +77,10 @@ mlocscale <- function (x, bdp = 0.25, location_rho = c('bisquare', 'huber'),
.Call(C_mlocscale, as.numeric(x), opts, loc_opts)
}
#' Get the Constant for Consistency for the M-Scale Using the Bisquare Rho Function
#' @param delta desired breakdown point (between 0 and 0.5)
#'
#' @return consistency constant
## Get the Constant for Consistency for the M-Scale Using the Bisquare Rho Function
## @param delta desired breakdown point (between 0 and 0.5)
##
## @return consistency constant
.bisquare_consistency_const <- function (delta) {
##
## Pre-computed values for some delta values
......@@ -145,16 +145,30 @@ rho_function <- function (rho) {
return(match(match.arg(rho, available), available))
}
#' Approximate Value Matching
#'
#' @param x,table see [base::match] for details.
#' @param eps numerical tolerance for matching.
#' @return a vector the same lenght of `x` with integers giving the position in
#' `table` of the first match if there is a match, or `NA_integer_`
#' otherwise.
## Approximate Value Matching
##
## @param x,table see [base::match] for details.
## @param eps numerical tolerance for matching.
## @return a vector the same lenght of `x` with integers giving the position in
## `table` of the first match if there is a match, or `NA_integer_`
## otherwise.
.approx_match <- function(x, table,
eps = min(sqrt(.Machine$double.eps),
0.5 * min(x, table))) {
.Call('C_approx_match', as.numeric(x), as.numeric(table),
as.numeric(eps[[1L]]))
}
## Extract the given metric from all matching nodes (by name).
extract_metric <- function (metrics, attr, node) {
matches <- c()
if (!is.null(metrics[[attr]]) && isTRUE(metrics$name == node)) {
matches <- c(matches, metrics[[attr]])
}
if (!is.null(metrics$sub_metrics)) {
matches <- c(matches, unlist(lapply(metrics$sub_metrics, extract_metric,
attr, node),
use.names = FALSE, recursive = FALSE))
}
return (matches)
}
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utilities.R
\name{.approx_match}
\alias{.approx_match}
\title{Approximate Value Matching}
\usage{
.approx_match(x, table, eps = min(sqrt(.Machine$double.eps), 0.5 * min(x,
table)))
}
\arguments{
\item{x, table}{see \link[base:match]{base::match} for details.}
\item{eps}{numerical tolerance for matching.}
}
\value{
a vector the same lenght of \code{x} with integers giving the position in
\code{table} of the first match if there is a match, or \code{NA_integer_}
otherwise.
}
\description{
Approximate Value Matching
}
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utilities.R
\name{.bisquare_consistency_const}
\alias{.bisquare_consistency_const}
\title{Get the Constant for Consistency for the M-Scale Using the Bisquare Rho Function}
\usage{
.bisquare_consistency_const(delta)
}
\arguments{
\item{delta}{desired breakdown point (between 0 and 0.5)}
}
\value{
consistency constant
}
\description{
Get the Constant for Consistency for the M-Scale Using the Bisquare Rho Function
}
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/control_options.R
\name{.check_en_algorithm}
\alias{.check_en_algorithm}
\title{Check if the selected EN algorithm can handle the given penalty.}
\usage{
.check_en_algorithm(en_algorithm_opts, alpha)
}
\description{
Check if the selected EN algorithm can handle the given penalty.
}
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pense_regression.R
\name{.pense_internal}
\alias{.pense_internal}
% Please edit documentation in R/elnet.R
\name{.elnet_internal}
\alias{.elnet_internal}
\title{Perform some final input adjustments and call the internal C++ code.}
\usage{
.pense_internal(x, y, alpha, lambdas, cold_lambda_inds,
additional_initial_estimates, penalty_loadings = NULL, pense_opts,
enpy_opts, optional_args)
.elnet_internal(x, y, alpha, lambdas, penalty_loadings = NULL,
weights = NULL, include_intercept = TRUE, optional_args)
}
\description{
Perform some final input adjustments and call the internal C++ code.
......
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pense_regression.R
\name{.make_initest_list}
\alias{.make_initest_list}
\title{Make a list of initial estimates}
\usage{
.make_initest_list(initial_estimates, lambdas)
}
\value{
a list the same length as \code{lambdas} with a list of initial estimates
for each value in \code{lambdas}.
}
\description{
Make a list of initial estimates
}
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/elnet.R
\name{elnet}
\alias{elnet}
\title{Compute the Elastic Net Regularization Path}
\usage{
elnet(x, y, alpha, lambdas, penalty_loadings, weights,
include_intercept = TRUE, en_algorithm_opts)
}
\arguments{
\item{alpha}{value for the alpha parameter, the balance between L1 and L2
penalization.}
\item{lambdas}{a vector of positive values for the lambda parameter.}
\item{penalty_loadings}{a vector of positive penalty loadings
(a.k.a. weights) for different penalization of each
coefficient.}
\item{weights}{a vector of positive observation weights.}
\item{include_intercept}{include an intercept in the model.}
\item{en_algorithm_opts}{options for the EN algorithm. See \link{en_dal_options}
for details.}
}
\description{
Compute the EN estimator for linear regression with optional observation
weights and penalty loadings.
}
\details{
The elastic net estimator for the linear regression model solves
the optimization problem
\deqn{argmin_{\mu, \beta}
(1/n) \sum_i w_i (y_i - \mu - x_i' \beta)^2 +
\lambda \sum_j 0.5 (1 - \alpha) \beta_j^2 + \alpha l_i |\beta_j| }
with observation weights \eqn{w_i} and penalty loadings \eqn{l_i}.
}
\seealso{
\link{pense} for an S-estimate of regression with elastic net penalty.
}
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/control_options.R
\name{en_admm_options}
\alias{en_admm_options}
\title{Options for the ADMM Elastic Net Algorithm}
\usage{
en_admm_options(max_it = 1000, eps = 1e-06, tau, sparse = FALSE,
admm_type = c("auto", "linearized", "var-stepsize"),
tau_lower_mult = 0.01, tau_adjustment_lower = 0.98,
tau_adjustment_upper = 0.999)
}
\arguments{
\item{max_it}{maximum number of iterations.}
\item{eps}{numerical tolerance to check for convergence.}
\item{tau}{step size for the algorithm if using the \code{linearized} version
and the largest step size if using the \code{var-stepsize} version.}
\item{sparse}{use sparse coefficients.}
\item{admm_type}{what type of the ADMM algorithm to use. If \code{linearized},
uses a linearized version of ADMM which has runtime $O()$
and converges linearly.
If \code{var-stepsize}, uses a variable step-size ADMM
algorithm which converges quadratically for "true" EN
penalties (i.e., \eqn{alpha < 1}) but has runtime $O()$.
If \code{auto} (the default), chooses the type based on the
penalty and the problem size.}
\item{tau_adjustment_lower}{(smallest) multiplicative factor for the
adjustment of the step size
\code{tau = tau_adjustment * tau}
(only for the \code{var-stepsize} version).}
\item{tau_adjustment_upper}{(largest) multiplicative factor for the
adjustment of the step size
\code{tau = tau_adjustment * tau}
(only for the \code{var-stepsize} version).}
}
\value{
options for the ADMM EN algorithm.
}
\description{
Options for the ADMM Elastic Net Algorithm
}
\seealso{
Other EN algorithms: \code{\link{en_dal_options}}
}
\concept{EN algorithms}
......@@ -32,4 +32,7 @@ options for the DAL EN algorithm.
\description{
Options for the DAL Elastic Net Algorithm
}
\seealso{
Other EN algorithms: \code{\link{en_admm_options}}
}
\concept{EN algorithms}
......@@ -5,9 +5,8 @@
\title{ENPY Initial Estimates}
\usage{