Commit 4ef12688 authored by davidkep's avatar davidkep

Merge branch 'release/2.0.1'

parents 99d7844f 0f7be3b1
......@@ -5,4 +5,5 @@
*.Rproj
.DS_Store
.Rbuildignore
.gitignore
*.trace
Package: pense
Type: Package
Title: Penalized Elastic Net S/MM-Estimator of Regression
Version: 2.0.1
Date: 2019-10-24
Authors@R: c(
person("David", "Kepplinger", , "david.kepplinger@gmail.com",
role = c("aut", "cre")),
person("Matias", "Salibian-Barrera", role = c("aut")),
person("Gabriela", "Cohen Freue", role = "aut"),
person("Derek", "Cho", role = "ctb")
)
Copyright: See the file COPYRIGHTS for copyright details on some of the
functions and algorithms used.
Encoding: UTF-8
Biarch: true
SystemRequirements: C++11
URL: https://gitlab.math.ubc.ca/dakep/pense
BugReports: https://gitlab.math.ubc.ca/dakep/pense/issues
Description: Robust penalized elastic net S and MM estimator for linear
regression. The method is described in detail in
Cohen Freue, G. V., Kepplinger, D., Salibian-Barrera, M., and Smucler, E.
(2017) <https://gcohenfr.github.io/pdfs/PENSE_manuscript.pdf>.
Depends:
R (>= 3.4.0),
Matrix
Imports:
Rcpp,
parallel,
methods,
lifecycle
LinkingTo:
nsoptim,
Rcpp,
RcppArmadillo (>= 0.9.100)
Suggests:
testthat (>= 2.1.0)
License: MIT + file LICENSE
NeedsCompilation: yes
RoxygenNote: 6.1.1
Roxygen: list(markdown = TRUE)
MIT License
Copyright (c) 2019 David Kepplinger
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
YEAR: 2019
COPYRIGHT HOLDER: David Kepplinger
# Generated by roxygen2: do not edit by hand
S3method(print,nsoptim_metrics)
export(consistency_const)
export(elnet)
export(en_admm_options)
export(en_dal_options)
export(enpy_initial_estimates)
export(enpy_options)
export(mloc)
export(mlocscale)
export(mscale)
export(mscale_algorithm_options)
export(pense)
export(pense_admm_options)
export(pense_mm_options)
export(rho_function)
export(tau_size)
importFrom(Rcpp,evalCpp)
importFrom(lifecycle,deprecate_soft)
importFrom(methods,as)
importFrom(stats,mad)
useDynLib(pense, .registration = TRUE)
This diff is collapsed.
#' Compute the Elastic Net Regularization Path
#'
#' Compute the EN estimator for linear regression with optional observation
#' weights and penalty loadings.
#'
#' The elastic net estimator for the linear regression model solves
#' the optimization problem
#'
#' \deqn{argmin_{\mu, \beta}
#' (1/n) \sum_i w_i (y_i - \mu - x_i' \beta)^2 +
#' \lambda \sum_j 0.5 (1 - \alpha) \beta_j^2 + \alpha l_i |\beta_j| }
#'
#' with observation weights \eqn{w_i} and penalty loadings \eqn{l_i}.
#'
#' @param alpha value for the alpha parameter, the balance between L1 and L2
#' penalization.
#' @param lambdas a vector of positive values for the lambda parameter.
#' @param penalty_loadings a vector of positive penalty loadings
#' (a.k.a. weights) for different penalization of each
#' coefficient.
#' @param weights a vector of positive observation weights.
#' @param include_intercept include an intercept in the model.
#' @param en_algorithm_opts options for the EN algorithm. See [en_algorithm_options]
#' for details.
#' @seealso [pense] for an S-estimate of regression with elastic net penalty.
#' @export
elnet <- function(x, y, alpha, lambdas, penalty_loadings, weights,
include_intercept = TRUE, en_algorithm_opts) {
optional_args <- list()
# Normalize input
y <- .as(y, 'numeric')
x_dim <- dim(x)
if (length(y) != x_dim[[1L]]) {
stop("Number of observations does not match between `x` and `y`.")
}
alpha <- .as(alpha[[1L]], 'numeric')
lambdas <- sort(.as(lambdas, 'numeric'), decreasing = TRUE)
if (alpha < 0 || alpha > 1) {
stop("`alpha` is outside 0 and 1.")
}
if (alpha < sqrt(.Machine$double.eps)) {
alpha <- 0
}
penalty_loadings <- if (!missing(penalty_loadings)) {
if(alpha == 0) {
stop("Penalty loadings are only supported for alpha > 0.")
} else if (length(penalty_loadings) != x_dim[[2L]]) {
stop("Penalty loadings are not of length `p`.")
}
.as(penalty_loadings, 'numeric')
} else {
NULL
}
weights <- if (!missing(weights)) {
if (length(weights) != x_dim[[1L]]) {
stop("Observation weights are not the same length as `y`.")
}
.as(weights, 'numeric')
} else {
NULL
}
if (missing(en_algorithm_opts)) {
en_algorithm_opts <- NULL
}
# Check EN algorithm
optional_args$en_options <- .select_en_algorithm(en_algorithm_opts, alpha, x)
# Call internal function
res <- .elnet_internal(x, y, alpha, lambdas, penalty_loadings, weights,
include_intercept, optional_args)
if (!is.null(res$metrics)) {
class(res$metrics) <- 'nsoptim_metrics'
}
return(res)
}
#' Perform some final input adjustments and call the internal C++ code.
.elnet_internal <- function(x, y, alpha, lambdas, penalty_loadings = NULL,
weights = NULL, include_intercept = TRUE,
optional_args) {
# Create penalties-list, without sorting the lambda sequence
penalties <- lapply(lambdas, function (l) { list(lambda = l, alpha = alpha) })
include_intercept <- isTRUE(include_intercept)
if (!is.null(penalty_loadings)) {
optional_args$pen_loadings <- penalty_loadings
}
if (!is.null(weights)) {
optional_args$obs_weights <- weights
}
return(.Call(C_lsen_regression, x, y, penalties, include_intercept,
optional_args))
}
#' ENPY Initial Estimates
#'
#' @param x `n` by `p` matrix of numeric predictors.
#' @param y vector of response values of length `n`.
#' @param alpha value for the alpha parameter, the balance between L1 and L2
#' penalization.
#' @param lambdas a vector of positive values for the lambda parameter.
#' @param penalty_loadings a vector of positive penalty loadings
#' (a.k.a. weights) for different penalization of each coefficient.
#' @param include_intercept include an intercept in the model.
#' @param bdp the desired breakdown point of the estimator, between 0 and 0.5.
#' @param cc consistency constant for the scale estimate. By default, the scale estimate is made consistent for the
#' given breakdown point under the Normal model.
#' @param en_algorithm_opts options for the EN algorithm. See [en_algorithm_options] for details.
#' @param mscale_opts options for the M-scale estimation. See [mscale_algorithm_options] for details.
#' @export
enpy_initial_estimates <- function (x, y, alpha, lambdas, bdp = 0.25, cc, include_intercept = TRUE,
enpy_opts = enpy_options(), mscale_opts = mscale_algorithm_options()) {
alpha <- .as(alpha[[1L]], 'numeric')
if (missing(cc)) {
cc <- NULL
}
if (alpha < 0 || alpha > 1) {
stop("`alpha` is outside 0 and 1.")
}
if (alpha < sqrt(.Machine$double.eps)) {
alpha <- 0
}
penalties <- make_penalties(alpha, lambdas)
s_loss_params <- list(mscale = .full_mscale_algo_options(bdp = bdp, cc = cc, mscale_opts = mscale_opts),
intercept = include_intercept)
# Check EN algorithm for ENPY
enpy_opts$en_options <- .select_en_algorithm(enpy_opts$en_options, alpha, x)
res <- .Call(C_penpy, x, drop(y), penalties, s_loss_params, enpy_opts, list())
lapply(res, function (res) {
if (!is.null(res$metrics)) {
class(res$metrics) <- 'nsoptim_metrics'
}
return(res)
})
}
## Make a list of penalties and ensure that `alpha` and `lambdas` are of
## correct type and order.
make_penalties <- function (alpha, lambdas) {
lapply(sort(as.numeric(lambdas), decreasing = TRUE), function (lambda) {
list(alpha = alpha, lambda = lambda)
})
}
#' @useDynLib pense, .registration = TRUE
#' @importFrom Rcpp evalCpp
NULL
This diff is collapsed.
#' Print Metrics
#'
#' Pretty-print a list of metrices from the optimization algorithm.
#'
#' @param x metrics object for printing.
#' @param max_level maximum level of printing which is applied for printing
#' nested metrics.
#' @export
print.nsoptim_metrics <- function (x, max_level = NA, ...) {
.print_metrics(x, max_level, '')
invisible(NULL)
}
.print_metrics <- function (metrics, max_level, prefix) {
cat(prefix, '* ', metrics$name, sep = '')
other_metrics <- setdiff(names(metrics), c('name', 'sub_metrics'))
if (length(other_metrics) > 0L) {
cat(':', sep = '')
}
for (metric_name in other_metrics) {
if (is.numeric(metrics[[metric_name]])) {
cat(sprintf(' %s=%g;', metric_name, metrics[[metric_name]]))
} else if (is.character(metrics[[metric_name]])) {
cat(sprintf(' %s="%s";', metric_name,
sub('(\\s|;)+$', '', metrics[[metric_name]])))
} else {
cat(sprintf(' %s=%s;', metric_name, metrics[[metric_name]]))
}
}
cat('\n', sep = '')
if (!isTRUE(max_level <= 0L) && !is.null(metrics$sub_metrics)) {
lapply(rev(metrics$sub_metrics), .print_metrics, max_level = max_level - 1L,
prefix = paste0(prefix, ' '))
}
invisible(NULL)
}
\ No newline at end of file
#' Compute the Tau-Scale of Centered Values
#'
#' Compute the tau-scale without centering the values.
#'
#' @param x numeric values.
#' @return the tau scale.
#' @export
tau_size <- function (x) {
# No checks for NA values!
.Call(C_tau_size, as.numeric(x))
}
#' Compute the M-Scale of Centered Values
#'
#' Compute the M-scale without centering the values.
#'
#' @param x numeric values.
#' @param bdp desired breakdown point (between 0 and 0.5).
#' @param cc cutoff value for the bisquare rho function. By default, chosen
#' for a consistent estimate under the Normal model.
#' @param opts a list of options for the M-scale estimation algorithm, see [mscale_algorithm_options]
#' for details.
#' @return the m-scale estimate.
#' @export
mscale <- function (x, bdp = 0.25, cc = consistency_const(bdp, 'bisquare'),
opts = mscale_algorithm_options()) {
# No checks for NA values!
if (missing(cc)) {
cc <- NULL
}
opts <- .full_mscale_algo_options(bdp, cc, opts)
.Call(C_mscale, as.numeric(x), opts)
}
#' Compute the M-estimate of Location
#'
#' @param x numeric values.
#' @param scale scale of the `x` values.
#' @param rho the rho function to use.
#' @param cc value of the tuning constant for the chosen rho function.
#' By default, chosen to achieve 95% efficiency under the Normal
#' model.
#' @param opts a list of options for the M-estimating algorithm, see
#' [mscale_algorithm_options] for details.
#' @return the m-scale estimate.
#' @importFrom stats mad
#' @export
mloc <- function (x, scale = mad(x), rho, cc, opts = mscale_algorithm_options()) {
# No checks for NA values!
if (missing(cc)) {
cc <- NULL
}
opts <- .full_mscale_algo_options(.5, cc, opts)
opts$rho <- rho_function(rho)
.Call(C_mloc, .as(x, 'numeric'), scale, opts)
}
#' Compute the M-Location and M-Scale
#'
#' Simultaneously estiamte the M-Location and the M-Scale.
#'
#' @param x numeric values.
#' @param bdp desired breakdown point (between 0 and 0.5).
#' @param cc cutoff value for the bisquare rho function. By default, chosen
#' for a consistent estimate under the Normal model.
#' @param opts a list of options for the M-estimating equation,
#' see [mscale_algorithm_options] for details.
#' @return a vector with two elements, the M-location and the M-scale estimate.
#' @export
mlocscale <- function (x, bdp = 0.25, location_rho = c('bisquare', 'huber'),
scale_cc = consistency_const(bdp, 'bisquare'),
location_cc, opts = mscale_algorithm_options()) {
opts <- .full_mscale_algo_options(bdp, scale_cc, opts)
loc_opts <- list(rho = rho_function(location_rho))
if (!missing(location_cc)) {
loc_opts$cc <- .as(location_cc[[1L]], 'numeric')
}
.Call(C_mlocscale, .as(x, 'numeric'), opts, loc_opts)
}
## Get the Constant for Consistency for the M-Scale Using the Bisquare Rho Function
## @param delta desired breakdown point (between 0 and 0.5)
##
## @return consistency constant
.bisquare_consistency_const <- function (delta) {
##
## Pre-computed values for some delta values
##
eps <- sqrt(.Machine$double.eps)
if (!isTRUE(delta < 0.5 + eps && delta > -eps)) {
stop("`delta` is outside valid bounds")
}
if (abs(delta - 0.5) < eps) {
return(1.5476450)
} else if (abs(delta - 0.25) < eps) {
return(2.937015)
} else if (abs(delta - 0.1) < eps) {
return(5.182361)
} else if (delta < 0.005) {
return(50) # ~.1% bdp for bisquare
}
integral_interval <- if (delta > 0.1) {
c(1.5, 5.5)
} else {
c(5, 25)
}
# For bisquare we have the closed form solution to the expectation
expectation <- function(cc, delta) {
pnorm.mcc <- 2 * pnorm(-cc)
1/cc^6 * exp(-(cc^2/2)) * (
-cc * (15 - 4 * cc^2 + cc^4) * sqrt(2 / pi) +
3 * (5 - 3 * cc^2 + cc^4) * exp(cc^2/2) * (1 - pnorm.mcc) +
cc^6 * exp(cc^2/2) * pnorm.mcc
) - delta
}
uniroot(expectation, interval = integral_interval, delta)$root
}
#' Get the Constant for Consistency for the M-Scale
#'
#' @param delta desired breakdown point (between 0 and 0.5)
#' @param rho the name of the chosen rho function.
#'
#' @return consistency constant
#' @export
consistency_const <- function (delta, rho) {
return(switch(rho_function(rho),
bisquare = .bisquare_consistency_const(delta),
huber = stop("Huber's rho function not supported for scale ",
"estimation!")))
}
#' List or check available rho functions.
#'
#' @param rho the name of the rho function to check.
#' @return if `rho` is missing returns a vector of rho function names, otherwise
#' the integer representation of the rho function.
#' @export
rho_function <- function (rho) {
available <- c('bisquare', 'huber')
if (missing(rho)) {
return(available)
}
return(match(match.arg(rho, available), available))
}
## Approximate Value Matching
##
## @param x,table see [base::match] for details.
## @param eps numerical tolerance for matching.
## @return a vector the same lenght of `x` with integers giving the position in
## `table` of the first match if there is a match, or `NA_integer_`
## otherwise.
.approx_match <- function(x, table,
eps = min(sqrt(.Machine$double.eps),
0.5 * min(x, table))) {
.Call('C_approx_match', as.numeric(x), as.numeric(table),
as.numeric(eps[[1L]]))
}
## Extract the given metric from all matching nodes (by name).
extract_metric <- function (metrics, attr, node) {
matches <- c()
if (!is.null(metrics[[attr]]) && isTRUE(metrics$name == node)) {
matches <- c(matches, metrics[[attr]])
}
if (!is.null(metrics$sub_metrics)) {
matches <- c(matches, unlist(lapply(metrics$sub_metrics, extract_metric,
attr, node),
use.names = FALSE, recursive = FALSE))
}
return (matches)
}
# New PENSE R package
This R package is a new implementation of the Penalized Elastic Net S-Estimator (PENSE) and M-estimator (PENSEM)
for linear regression.
It also supports the adaptive versions of these two estimates, i.e., adaptive PENSE and adaptive PENSEM.
![Language: R](https://img.shields.io/badge/language-R-blue)
![Lifecycle: Experimental](https://img.shields.io/badge/lifecycle-experimental-orange)
The goal is to maintain a backwards-compatible interface, using a completely rewritten codebase.
This R package is a new implementation of the Penalized Elastic Net S-Estimator (PENSE) for linear regression.
It also supports the adaptive version, i.e., adaptive PENSE.
Currently, the package only supports a subset of the original [pense package](https://cran.r-project.org/package=pense).
Importantly, there is no support for automatic hyper-parameter selection (e.g., cross-validation) or MM-estimators.
At the moment, the interface of the new implementation is not yet backwards-compatible.
## New Codebase
The new package uses the [nsoptim](https://gitlab.math.ubc.ca/dakep/nsoptim) package for optimization routines.
## Old Overview (R)
The main functions in the package are
* `pense()` … to compute a robust elastic net S-estimator for linear regression
* `pensem()` … to compute a robust elastic net MM-estimator either directly from the data matrix or
from an S-estimator previously computed with `pense()`.
Both of these functions perform k-fold cross-validation to choose the optimal penalty level
`lambda`, but the optimal balance between the L1 and the L2 penalties (the `alpha` parameter) needs
to be pre-specified by the user.
The default breakdown point is set to 25%. If the user needs an estimator with a higher breakdown
point, the `delta` argument in the `pense_options()` and `initest_options()` can be set to the
desired breakdown point (.e.g, `delta = 0.5`).
The package also exports an efficient classical elastic net algorithm available via the functions
`elnet()` and `elnet_cv()` which chooses an optimal penalty parameter based on cross-validation.
The elastic net solution is computed either by the augmented LARS algorithm
(`en_options_aug_lars()`) or via the Dual Augmented Lagrangian algorithm (Tomioka, et al. 2011)
selected with `en_options_dal()` which is much faster in case of a large number of predictors
(> 500) and a small number of observations (< 200).
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utilities.R
\name{consistency_const}
\alias{consistency_const}
\title{Get the Constant for Consistency for the M-Scale}
\usage{
consistency_const(delta, rho)
}
\arguments{
\item{delta}{desired breakdown point (between 0 and 0.5)}
\item{rho}{the name of the chosen rho function.}
}
\value{
consistency constant
}
\description{
Get the Constant for Consistency for the M-Scale
}
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/control_options.R
\name{.as}
\alias{.as}
\title{A wrapper around \code{methods::as} which raises an error if the conversion results in NA.}
\usage{
.as(object, class, ...)
}
\arguments{
\item{...}{passed on to \link[methods:as]{methods::as}.}
}
\description{
A wrapper around \code{methods::as} which raises an error if the conversion results in NA.
}
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/elnet.R
\name{.elnet_internal}
\alias{.elnet_internal}
\title{Perform some final input adjustments and call the internal C++ code.}
\usage{
.elnet_internal(x, y, alpha, lambdas, penalty_loadings = NULL,
weights = NULL, include_intercept = TRUE, optional_args)
}
\description{
Perform some final input adjustments and call the internal C++ code.
}
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/control_options.R
\name{.full_mscale_algo_options}
\alias{.full_mscale_algo_options}
\title{Full options for the M-scale Estimation Algorithm}
\usage{
.full_mscale_algo_options(bdp, cc, mscale_opts)
}
\arguments{
\item{bdp}{the breakdown point, i.e., \code{delta} in the M-estimation equation.}
\item{cc}{the cutoff threshold for the bisquare rho function.}
\item{mscale_opts}{"public" control options created by \link{mscale_algorithm_options}.}
}
\value{
full options for the M-scale estimation algorithm.
}
\description{
Full options for the M-scale Estimation Algorithm
}
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pense_regression.R
\name{.pense_max_lambda}
\alias{.pense_max_lambda}
\title{Get the smallest lambda such that the PENSE estimate gives the empty model.}
\usage{
.pense_max_lambda(x, y, alpha, pense_options, penalty_loadings = NULL)
}
\description{
Get the smallest lambda such that the PENSE estimate gives the empty model.
}
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/elnet.R
\name{elnet}
\alias{elnet}
\title{Compute the Elastic Net Regularization Path}
\usage{
elnet(x, y, alpha, lambdas, penalty_loadings, weights,
include_intercept = TRUE, en_algorithm_opts)
}
\arguments{
\item{alpha}{value for the alpha parameter, the balance between L1 and L2
penalization.}
\item{lambdas}{a vector of positive values for the lambda parameter.}
\item{penalty_loadings}{a vector of positive penalty loadings
(a.k.a. weights) for different penalization of each
coefficient.}
\item{weights}{a vector of positive observation weights.}
\item{include_intercept}{include an intercept in the model.}
\item{en_algorithm_opts}{options for the EN algorithm. See \link{en_algorithm_options}
for details.}
}
\description{
Compute the EN estimator for linear regression with optional observation
weights and penalty loadings.
}
\details{
The elastic net estimator for the linear regression model solves
the optimization problem
\deqn{argmin_{\mu, \beta}
(1/n) \sum_i w_i (y_i - \mu - x_i' \beta)^2 +
\lambda \sum_j 0.5 (1 - \alpha) \beta_j^2 + \alpha l_i |\beta_j| }
with observation weights \eqn{w_i} and penalty loadings \eqn{l_i}.
}
\seealso{
\link{pense} for an S-estimate of regression with elastic net penalty.
}
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/control_options.R
\name{en_admm_options}
\alias{en_admm_options}
\title{Options for the ADMM Elastic Net Algorithm}
\usage{
en_admm_options(max_it = 1000, eps = 1e-08, tau, sparse = FALSE,
admm_type = c("auto", "linearized", "var-stepsize"),
tau_lower_mult = 0.01, tau_adjustment_lower = 0.98,
tau_adjustment_upper = 0.999)
}
\arguments{
\item{max_it}{maximum number of iterations.}
\item{eps}{numerical tolerance to check for convergence.}
\item{tau}{step size for the algorithm if using the \code{linearized} version
and the largest step size if using the \code{var-stepsize} version.}
\item{sparse}{use sparse coefficients.}
\item{admm_type}{what type of the ADMM algorithm to use. If \code{linearized},
uses a linearized version of ADMM which has runtime $O()$
and converges linearly.
If \code{var-stepsize}, uses a variable step-size ADMM
algorithm which converges quadratically for "true" EN
penalties (i.e., \eqn{alpha < 1}) but has runtime $O()$.
If \code{auto} (the default), chooses the type based on the
penalty and the problem size.}
\item{tau_adjustment_lower}{(smallest) multiplicative factor for the
adjustment of the step size
\code{tau = tau_adjustment * tau}
(only for the \code{var-stepsize} version).}
\item{tau_adjustment_upper}{(largest) multiplicative factor for the
adjustment of the step size
\code{tau = tau_adjustment * tau}
(only for the \code{var-stepsize} version).}
}
\value{
options for the ADMM EN algorithm.
}
\description{
Options for the ADMM Elastic Net Algorithm
}
\seealso{
Other EN algorithms: \code{\link{en_dal_options}}
}
\concept{EN algorithms}
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/control_options.R
\name{en_algorithm_options}
\alias{en_algorithm_options}
\title{Control the Algorithm to Compute (Weighted) Least-Squares Elastic Net Estimates}
\description{
The package supports multiple different algorithms to compute the EN estimate for weighted LS loss functions.
Each algorithm has certain characteristics that make it useful for some problems.
To select a specific algorithm and set its parameters, use any of the \code{en_***_options} functions.
}
\details{
\itemize{
\item \link{en_admm_options}: Select an iterative ADMM-type algorithm. There are two versions available:
\code{admm_type = "linearized"} needs \emph{O(n p)} operations per iteration and converges linearly, while
\code{admm_type = "var-stepsize"} needs \emph{O(n p^3)} operations per iteration but converges quadratically.
\item \link{en_dal_options}: Select the iterative Dual Augmented Lagrangian (DAL) method. DAL needs O(n^3 p^2) operations
per iteration, but converges exponentially.
}
}