...
 
Commits (41)
.Rproj.user
.vscode
*.o
*.so
*.Rproj
.DS_Store
.Rbuildignore
.gitignore
*.trace
......@@ -37,5 +37,5 @@ Suggests:
testthat (>= 2.1.0)
License: MIT + file LICENSE
NeedsCompilation: yes
RoxygenNote: 6.1.1
RoxygenNote: 7.0.2
Roxygen: list(markdown = TRUE)
......@@ -5,6 +5,7 @@ export(consistency_const)
export(elnet)
export(en_admm_options)
export(en_dal_options)
export(en_lars_options)
export(enpy_initial_estimates)
export(enpy_options)
export(mloc)
......@@ -14,6 +15,7 @@ export(mscale_algorithm_options)
export(pense)
export(pense_admm_options)
export(pense_mm_options)
export(pscs)
export(rho_function)
export(tau_size)
importFrom(Rcpp,evalCpp)
......
This diff is collapsed.
......@@ -20,12 +20,14 @@
#' coefficient.
#' @param weights a vector of positive observation weights.
#' @param include_intercept include an intercept in the model.
#' @param sparse use sparse coefficient vectors.
#' @param en_algorithm_opts options for the EN algorithm. See [en_algorithm_options]
#' for details.
#' @param eps numerical tolerance.
#' @seealso [pense] for an S-estimate of regression with elastic net penalty.
#' @export
elnet <- function(x, y, alpha, lambdas, penalty_loadings, weights,
include_intercept = TRUE, en_algorithm_opts) {
elnet <- function(x, y, alpha, lambdas, penalty_loadings, weights, include_intercept = TRUE, en_algorithm_opts,
sparse = FALSE, eps = 1e-6) {
optional_args <- list()
# Normalize input
......@@ -73,7 +75,7 @@ elnet <- function(x, y, alpha, lambdas, penalty_loadings, weights,
}
# Check EN algorithm
optional_args$en_options <- .select_en_algorithm(en_algorithm_opts, alpha, x)
optional_args$en_options <- .select_en_algorithm(en_algorithm_opts, alpha, x, sparse, eps)
# Call internal function
res <- .elnet_internal(x, y, alpha, lambdas, penalty_loadings, weights,
......
......@@ -11,11 +11,15 @@
#' @param bdp the desired breakdown point of the estimator, between 0 and 0.5.
#' @param cc consistency constant for the scale estimate. By default, the scale estimate is made consistent for the
#' given breakdown point under the Normal model.
#' @param en_algorithm_opts options for the EN algorithm. See [en_algorithm_options] for details.
#' @param sparse use sparse coefficient vectors.
#' @param eps numerical tolerance.
#' @param enpy_opts options for the EN-PY procedure. See [enpy_options] for details.
#' @param mscale_opts options for the M-scale estimation. See [mscale_algorithm_options] for details.
#' @param ncores number of CPU cores to use in parallel. By default, only one CPU core is used.
#' @export
enpy_initial_estimates <- function (x, y, alpha, lambdas, bdp = 0.25, cc, include_intercept = TRUE,
enpy_opts = enpy_options(), mscale_opts = mscale_algorithm_options()) {
enpy_initial_estimates <- function (x, y, alpha, lambdas, bdp = 0.25, cc, include_intercept = TRUE, penalty_loadings,
enpy_opts = enpy_options(), mscale_opts = mscale_algorithm_options(),
eps = 1e-6, sparse = FALSE, ncores = 1L) {
alpha <- .as(alpha[[1L]], 'numeric')
if (missing(cc)) {
cc <- NULL
......@@ -33,10 +37,23 @@ enpy_initial_estimates <- function (x, y, alpha, lambdas, bdp = 0.25, cc, includ
s_loss_params <- list(mscale = .full_mscale_algo_options(bdp = bdp, cc = cc, mscale_opts = mscale_opts),
intercept = include_intercept)
optional_args <- list()
if (!missing(penalty_loadings) && !is.null(penalty_loadings)) {
if(alpha == 0) {
stop("Penalty loadings are only supported for alpha > 0.")
} else if (length(penalty_loadings) != x_dim[[2L]]) {
stop("Penalty loadings are not of length `p`.")
}
optional_args$penalty_loadings <- .as(penalty_loadings, 'numeric')
}
# Check EN algorithm for ENPY
enpy_opts$en_options <- .select_en_algorithm(enpy_opts$en_options, alpha, x)
enpy_opts$num_threads <- max(1L, .as(ncores[[1L]], 'integer'))
enpy_opts$eps <- .as(eps[[1L]], 'numeric')
enpy_opts$en_options <- .select_en_algorithm(enpy_opts$en_options, alpha, x, sparse, enpy_opts$eps)
res <- .Call(C_penpy, x, drop(y), penalties, s_loss_params, enpy_opts, list())
res <- .Call(C_penpy, x, drop(y), penalties, s_loss_params, enpy_opts, optional_args)
lapply(res, function (res) {
if (!is.null(res$metrics)) {
......@@ -46,6 +63,59 @@ enpy_initial_estimates <- function (x, y, alpha, lambdas, bdp = 0.25, cc, includ
})
}
#' Compute Principal Sensitivity Components
#'
#' @param x `n` by `p` matrix of numeric predictors.
#' @param y vector of response values of length `n`.
#' @param alpha value for the alpha parameter, the balance between L1 and L2
#' penalization.
#' @param lambdas a vector of positive values for the lambda parameter.
#' @param penalty_loadings a vector of positive penalty loadings
#' (a.k.a. weights) for different penalization of each coefficient.
#' @param include_intercept include an intercept in the model.
#' @param sparse use sparse coefficient vectors.
#' @param eps numerical tolerance.
#' @param en_algorithm_opts options for the EN algorithm. See [en_algorithm_options] for details.
#' @param ncores number of CPU cores to use in parallel. By default, only one CPU core is used.
#' @export
pscs <- function (x, y, alpha, lambdas, include_intercept = TRUE, penalty_loadings, en_algorithm_opts,
eps = 1e-6, sparse = FALSE, ncores = 1L) {
alpha <- .as(alpha[[1L]], 'numeric')
if (missing(en_algorithm_opts)) {
en_algorithm_opts <- NULL
}
if (alpha < 0 || alpha > 1) {
stop("`alpha` is outside 0 and 1.")
}
if (alpha < sqrt(.Machine$double.eps)) {
alpha <- 0
}
penalties <- make_penalties(alpha, lambdas)
eps <- .as(eps[[1L]], 'numeric')
optional_args <- list(
intercept = .as(include_intercept[[1L]], 'logical'),
num_threads = max(1L, .as(ncores[[1L]], 'integer')))
en_algorithm_opts <- .select_en_algorithm(en_algorithm_opts, alpha, x, sparse, eps)
if (!missing(penalty_loadings) && !is.null(penalty_loadings)) {
if(alpha == 0) {
stop("Penalty loadings are only supported for alpha > 0.")
} else if (length(penalty_loadings) != x_dim[[2L]]) {
stop("Penalty loadings are not of length `p`.")
}
optional_args$penalty_loadings <- .as(penalty_loadings, 'numeric')
}
res <- .Call(C_pscs, x, drop(y), penalties, en_algorithm_opts, optional_args)
return(res)
}
## Make a list of penalties and ensure that `alpha` and `lambdas` are of
## correct type and order.
make_penalties <- function (alpha, lambdas) {
......
This diff is collapsed.
#!/bin/sh
rm -Rf autom4te.cache
rm config.log
rm config.status
rm src/autoconfig.hpp
rm src/Makevars
rm src/*.o
rm src/*.so
This diff is collapsed.
AC_INIT(pense, 2.0.0)
AC_PREREQ(2.61)
: ${R_HOME=`R RHOME`}
if test -z "${R_HOME}"; then
echo "could not determine R_HOME"
exit 1
fi
RBIN="${R_HOME}/bin/R"
CC=`"${RBIN}" CMD config CC`
CPP="$CC -E"
CXX=`"${RBIN}" CMD config CXX`
CXXCPP="$CXX -E"
CXXFLAGS=`"${RBIN}" CMD config CXXFLAGS`
CPPFLAGS=`"${RBIN}" CMD config CPPFLAGS`
LDFLAGS=`"${RBIN}" CMD config LDFLAGS`
AC_PROG_CC
AC_PROG_CPP
AC_PROG_CXX
################################################################################
##
## Check for necessary support by C++ compiler
##
################################################################################
AC_LANG(C++)
## Check for strict aliasing support
STRICT_ALIASING_FLAG="-fstrict-aliasing"
AC_MSG_CHECKING([whether _AC_LANG compiler accepts $STRICT_ALIASING_FLAG])
ax_check_save_flags=$[]_AC_LANG_PREFIX[]FLAGS
_AC_LANG_PREFIX[]FLAGS="$[]_AC_LANG_PREFIX[]FLAGS -Werror $STRICT_ALIASING_FLAG -Wstrict-aliasing"
AC_COMPILE_IFELSE([AC_LANG_PROGRAM()],
[AC_MSG_RESULT([yes])],
[
AC_MSG_RESULT([no])
STRICT_ALIASING_FLAG=""
])
_AC_LANG_PREFIX[]FLAGS=$ax_check_save_flags
AC_SUBST(STRICT_ALIASING_FLAG)
## Check for general OpenMP support
AC_OPENMP
# Some systems have broken OMP libraries, so also check that the actual package will work
ac_pkg_openmp_cxx=no
if test -n "${OPENMP_CXXFLAGS}"; then
AC_MSG_CHECKING([C++ support for OpenMP])
AC_LANG_CONFTEST([
AC_LANG_PROGRAM([[#include <omp.h>]], [[ return omp_get_num_threads (); ]])
])
PKG_CFLAGS="${OPENMP_CXXFLAGS}" PKG_LIBS="${OPENMP_CXXFLAGS}" "$RBIN" CMD SHLIB conftest.c 1>&AS_MESSAGE_LOG_FD 2>&AS_MESSAGE_LOG_FD && "$RBIN" --vanilla -q -e "dyn.load(paste('conftest',.Platform\$dynlib.ext,sep=''))" 1>&AS_MESSAGE_LOG_FD 2>&AS_MESSAGE_LOG_FD && ac_pkg_openmp_cxx=yes
AC_MSG_RESULT([${ac_pkg_openmp_cxx}])
fi
# Check if OpenMP supports tasks, reductions, and if constant member variables need to be shared
LDFLAGS="$LDFLAGS $OPENMP_CXXFLAGS"
ax_check_save_flags="$LDFLAGS"
if test "${ac_pkg_openmp_cxx}" = yes; then
AC_MSG_CHECKING([OpenMP support for tasks])
# Check if tasks are supported with constant member not shared
AC_LINK_IFELSE([AC_LANG_PROGRAM([[
#include <omp.h>
class Foo {
public:
Foo(const int x) : x_(x) {}
int Test(const int y) {
int sum = 0;
#pragma omp parallel num_threads(2) default(none) shared(sum)
{
#pragma omp single nowait
for (int i = 0; i < y; ++i) {
#pragma omp task default(none) firstprivate(i) shared(sum)
{
sum += x_ * i;
}
}
}
return sum;
}
private:
const int x_;
};
]],
[[
const int mult = omp_get_num_threads();
Foo f(mult);
return f.Test(2 * mult);
]])],
[
ac_pkg_openmp_cxx_tasks=yes
],
[
ac_pkg_openmp_cxx_tasks=no
])
if test "${ac_pkg_openmp_cxx_tasks}" = no; then
# Maybe constant member variables need to be shared
AC_LINK_IFELSE([AC_LANG_PROGRAM([[
#include <omp.h>
class Foo {
public:
Foo(const int x) : x_(x) {}
int Test(const int y) {
int sum = 0;
#pragma omp parallel num_threads(2) default(none) shared(sum)
{
#pragma omp single nowait
for (int i = 0; i < y; ++i) {
#pragma omp task default(none) firstprivate(i) shared(sum, x_)
{
sum += x_ * i;
}
}
}
return sum;
}
private:
const int x_;
};
]],
[[
const int mult = omp_get_num_threads();
Foo f(mult);
return f.Test(2 * mult);
]])],
[
AC_DEFINE([PENSE_OPENMP_ADD_CONST_SHARED], [1], [
Set to 1 if OpenMP requires constant class members to be shared
])
ac_pkg_openmp_cxx_tasks="yes, with shared constant members"
],
[
ac_pkg_openmp_cxx_tasks=no
ac_pkg_openmp_cxx=no
])
fi
AC_MSG_RESULT([$ac_pkg_openmp_cxx_tasks])
fi
if test "${ac_pkg_openmp_cxx}" = yes; then
AC_MSG_CHECKING([OpenMP support for custom reductions])
AC_LINK_IFELSE([AC_LANG_PROGRAM([[
#include <omp.h>
void CustomReduction(int* single, int* combined) { *combined += *single; }
]],
[[
#pragma omp declare reduction(c:int:CustomReduction(&omp_in, &omp_out))
int sum = 0;
const int y = omp_get_num_threads();
#pragma omp parallel num_threads(2) default(none) firstprivate(y) shared(sum)
{
#pragma omp for reduction(c:sum)
for (int i = 0; i < y; ++i) {
sum = i * y;
}
}
]])],
[ac_pkg_openmp_cxx=yes],
[ac_pkg_openmp_cxx=no])
AC_MSG_RESULT([$ac_pkg_openmp_cxx])
fi
LDFLAGS=$ax_check_save_flags
# if ${ac_pkg_openmp_cxx} = "yes" then OMP is good, otherwise it will be "no"
if test "${ac_pkg_openmp_cxx}" = no; then
OPENMP_CXXFLAGS=''
AC_DEFINE([PENSE_DISABLE_OPENMP], [1], [Set to 1 if the compiler does not support OpenMP])
else
AC_DEFINE([PENSE_ENABLE_OPENMP], [1], [Set to 1 if the compiler does supports recent version of OpenMP])
fi
AC_SUBST(OPENMP_CXXFLAGS)
##
## Write output
##
AC_CONFIG_HEADERS([src/autoconfig.hpp])
AC_CONFIG_FILES([src/Makevars])
AC_OUTPUT
cp src/autoconfig.hpp.win src/autoconfig.hpp
......@@ -4,8 +4,16 @@
\alias{.elnet_internal}
\title{Perform some final input adjustments and call the internal C++ code.}
\usage{
.elnet_internal(x, y, alpha, lambdas, penalty_loadings = NULL,
weights = NULL, include_intercept = TRUE, optional_args)
.elnet_internal(
x,
y,
alpha,
lambdas,
penalty_loadings = NULL,
weights = NULL,
include_intercept = TRUE,
optional_args
)
}
\description{
Perform some final input adjustments and call the internal C++ code.
......
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pense_regression.R
\name{.pense_lambda_grid}
\alias{.pense_lambda_grid}
\title{Generate a log-spaced grid of decreasing lambda values}
\usage{
.pense_lambda_grid(
x,
y,
alpha,
nlambda,
lambda_min_ratio,
pense_options,
penalty_loadings = NULL
)
}
\description{
Generate a log-spaced grid of decreasing lambda values
}
......@@ -4,8 +4,18 @@
\alias{elnet}
\title{Compute the Elastic Net Regularization Path}
\usage{
elnet(x, y, alpha, lambdas, penalty_loadings, weights,
include_intercept = TRUE, en_algorithm_opts)
elnet(
x,
y,
alpha,
lambdas,
penalty_loadings,
weights,
include_intercept = TRUE,
en_algorithm_opts,
sparse = FALSE,
eps = 1e-06
)
}
\arguments{
\item{alpha}{value for the alpha parameter, the balance between L1 and L2
......@@ -23,6 +33,10 @@ coefficient.}
\item{en_algorithm_opts}{options for the EN algorithm. See \link{en_algorithm_options}
for details.}
\item{sparse}{use sparse coefficient vectors.}
\item{eps}{numerical tolerance.}
}
\description{
Compute the EN estimator for linear regression with optional observation
......
......@@ -4,21 +4,21 @@
\alias{en_admm_options}
\title{Options for the ADMM Elastic Net Algorithm}
\usage{
en_admm_options(max_it = 1000, eps = 1e-08, tau, sparse = FALSE,
en_admm_options(
max_it = 1000,
tau,
admm_type = c("auto", "linearized", "var-stepsize"),
tau_lower_mult = 0.01, tau_adjustment_lower = 0.98,
tau_adjustment_upper = 0.999)
tau_lower_mult = 0.01,
tau_adjustment_lower = 0.98,
tau_adjustment_upper = 0.999
)
}
\arguments{
\item{max_it}{maximum number of iterations.}
\item{eps}{numerical tolerance to check for convergence.}
\item{tau}{step size for the algorithm if using the \code{linearized} version
and the largest step size if using the \code{var-stepsize} version.}
\item{sparse}{use sparse coefficients.}
\item{admm_type}{what type of the ADMM algorithm to use. If \code{linearized},
uses a linearized version of ADMM which has runtime $O()$
and converges linearly.
......@@ -45,6 +45,7 @@ options for the ADMM EN algorithm.
Options for the ADMM Elastic Net Algorithm
}
\seealso{
Other EN algorithms: \code{\link{en_dal_options}}
Other EN algorithms:
\code{\link{en_dal_options}()}
}
\concept{EN algorithms}
......@@ -2,14 +2,21 @@
% Please edit documentation in R/control_options.R
\name{en_algorithm_options}
\alias{en_algorithm_options}
\alias{en_lars_options}
\title{Control the Algorithm to Compute (Weighted) Least-Squares Elastic Net Estimates}
\usage{
en_lars_options()
}
\description{
The package supports multiple different algorithms to compute the EN estimate for weighted LS loss functions.
The package supports different algorithms to compute the EN estimate for weighted LS loss functions.
Each algorithm has certain characteristics that make it useful for some problems.
To select a specific algorithm and set its parameters, use any of the \code{en_***_options} functions.
To select a specific algorithm and set its parameters, use any of the \verb{en_***_options} functions.
}
\details{
\itemize{
\item \link{en_lars_options}: Select the tuning-free LARS algorithm. This computes \emph{exact} (up to numerical errors) solutions
to the EN-LS problem. It is not iterative and therefore can not benefit from approximate
solutions, but in turn guarantuees that a solution will be found.
\item \link{en_admm_options}: Select an iterative ADMM-type algorithm. There are two versions available:
\code{admm_type = "linearized"} needs \emph{O(n p)} operations per iteration and converges linearly, while
\code{admm_type = "var-stepsize"} needs \emph{O(n p^3)} operations per iteration but converges quadratically.
......
......@@ -4,17 +4,20 @@
\alias{en_dal_options}
\title{Options for the DAL Elastic Net Algorithm}
\usage{
en_dal_options(max_it = 100, max_inner_it = 100, eps = 1e-06,
eta_multiplier = 2, eta_start_conservative = 0.01,
eta_start_aggressive = 1, lambda_relchange_aggressive = 0.25)
en_dal_options(
max_it = 100,
max_inner_it = 100,
eta_multiplier = 2,
eta_start_conservative = 0.01,
eta_start_aggressive = 1,
lambda_relchange_aggressive = 0.25
)
}
\arguments{
\item{max_it}{maximum number of (outer) iterations.}
\item{max_inner_it}{maximum number of (inner) iterations in each outer iteration.}
\item{eps}{numerical tolerance to check for convergence.}
\item{eta_multiplier}{multiplier for the barrier parameter. In each iteration, the barrier must be more restrictive
(i.e., the multiplier must be > 1).}
......@@ -33,6 +36,7 @@ options for the DAL EN algorithm.
Options for the DAL Elastic Net Algorithm
}
\seealso{
Other EN algorithms: \code{\link{en_admm_options}}
Other EN algorithms:
\code{\link{en_admm_options}()}
}
\concept{EN algorithms}
......@@ -4,9 +4,21 @@
\alias{enpy_initial_estimates}
\title{ENPY Initial Estimates}
\usage{
enpy_initial_estimates(x, y, alpha, lambdas, bdp = 0.25, cc,
include_intercept = TRUE, enpy_opts = enpy_options(),
mscale_opts = mscale_algorithm_options())
enpy_initial_estimates(
x,
y,
alpha,
lambdas,
bdp = 0.25,
cc,
include_intercept = TRUE,
penalty_loadings,
enpy_opts = enpy_options(),
mscale_opts = mscale_algorithm_options(),
eps = 1e-06,
sparse = FALSE,
ncores = 1L
)
}
\arguments{
\item{x}{\code{n} by \code{p} matrix of numeric predictors.}
......@@ -25,12 +37,18 @@ given breakdown point under the Normal model.}
\item{include_intercept}{include an intercept in the model.}
\item{mscale_opts}{options for the M-scale estimation. See \link{mscale_algorithm_options} for details.}
\item{penalty_loadings}{a vector of positive penalty loadings
(a.k.a. weights) for different penalization of each coefficient.}
\item{en_algorithm_opts}{options for the EN algorithm. See \link{en_algorithm_options} for details.}
\item{enpy_opts}{options for the EN-PY procedure. See \link{enpy_options} for details.}
\item{mscale_opts}{options for the M-scale estimation. See \link{mscale_algorithm_options} for details.}
\item{eps}{numerical tolerance.}
\item{sparse}{use sparse coefficient vectors.}
\item{ncores}{number of CPU cores to use in parallel. By default, only one CPU core is used.}
}
\description{
ENPY Initial Estimates
......
......@@ -4,16 +4,19 @@
\alias{enpy_options}
\title{Options for the ENPY Algorithm}
\usage{
enpy_options(max_it = 10, eps = 1e-06, keep_psc_proportion = 0.5,
en_algorithm_opts, keep_residuals_measure = c("threshold",
"proportion"), keep_residuals_proportion = 0.5,
keep_residuals_threshold = 2, retain_best_factor = 1.1)
enpy_options(
max_it = 10,
keep_psc_proportion = 0.5,
en_algorithm_opts,
keep_residuals_measure = c("threshold", "proportion"),
keep_residuals_proportion = 0.5,
keep_residuals_threshold = 2,
retain_best_factor = 2
)
}
\arguments{
\item{max_it}{maximum number of PY iterations.}
\item{eps}{numerical tolerance to check for convergence.}
\item{keep_psc_proportion}{how many observations should be kept based on the Principal Sensitivy Components.}
\item{en_algorithm_opts}{options for the LS-EN algorithm. See \link{en_algorithm_options} for details.}
......@@ -26,8 +29,8 @@ only observations with residuals below the threshold are kept.}
\item{keep_residuals_threshold}{only observations with (standardized) residuals less than this threshold are kept.}
\item{retain_best_factor}{in addition to the candidates from the last iteration, also keep candidates
that are within this factor of the best candidate.}
\item{retain_best_factor}{only keep candidates that are within this factor of the best candidate. If \verb{<= 1}, only
keep candidates from the last iteration.}
}
\value{
options for the ENPY algorithm.
......
......@@ -14,7 +14,7 @@ mloc(x, scale = mad(x), rho, cc, opts = mscale_algorithm_options())
\item{rho}{the rho function to use.}
\item{cc}{value of the tuning constant for the chosen rho function.
By default, chosen to achieve 95% efficiency under the Normal
By default, chosen to achieve 95\% efficiency under the Normal
model.}
\item{opts}{a list of options for the M-estimating algorithm, see
......
......@@ -4,9 +4,14 @@
\alias{mlocscale}
\title{Compute the M-Location and M-Scale}
\usage{
mlocscale(x, bdp = 0.25, location_rho = c("bisquare", "huber"),
scale_cc = consistency_const(bdp, "bisquare"), location_cc,
opts = mscale_algorithm_options())
mlocscale(
x,
bdp = 0.25,
location_rho = c("bisquare", "huber"),
scale_cc = consistency_const(bdp, "bisquare"),
location_cc,
opts = mscale_algorithm_options()
)
}
\arguments{
\item{x}{numeric values.}
......
......@@ -4,8 +4,12 @@
\alias{mscale}
\title{Compute the M-Scale of Centered Values}
\usage{
mscale(x, bdp = 0.25, cc = consistency_const(bdp, "bisquare"),
opts = mscale_algorithm_options())
mscale(
x,
bdp = 0.25,
cc = consistency_const(bdp, "bisquare"),
opts = mscale_algorithm_options()
)
}
\arguments{
\item{x}{numeric values.}
......
......@@ -4,13 +4,33 @@
\alias{pense}
\title{Compute the PENSE Regularization Path}
\usage{
pense(x, y, alpha, lambdas, enpy_lambdas, penalty_loadings,
strategies = "ZEsEiOsOi", other_starts, include_intercept = TRUE,
bdp = 0.25, cc, eps = 1e-06, algorithm_opts,
explore_solutions = 10, explore_tol = 0.1, max_solutions = 10,
comparison_tol = sqrt(eps), sparse = TRUE,
mscale_opts = mscale_algorithm_options(), enpy_opts = enpy_options(),
...)
pense(
x,
y,
alpha,
nlambda = 50,
lambda_min_ratio,
nlambda_enpy = 10,
lambdas,
enpy_lambdas,
penalty_loadings,
strategies = "ZEsEiOsOi",
other_starts,
include_intercept = TRUE,
bdp = 0.25,
cc,
eps = 1e-06,
algorithm_opts = pense_mm_options(),
explore_solutions = 10,
explore_tol = 0.1,
max_solutions = 10,
comparison_tol = sqrt(eps),
sparse = FALSE,
ncores = 1,
mscale_opts = mscale_algorithm_options(),
enpy_opts = enpy_options(),
...
)
}
\arguments{
\item{x}{\code{n} by \code{p} matrix of numeric predictors.}
......@@ -20,10 +40,18 @@ pense(x, y, alpha, lambdas, enpy_lambdas, penalty_loadings,
\item{alpha}{value for the alpha parameter, the balance between L1 and L2
penalization.}
\item{lambdas}{a vector of positive values for the lambda parameter.}
\item{nlambda}{number of lambda values to compute the estimate for.}
\item{enpy_lambdas}{a vector of lambda values at which \emph{cold} initial
estimates are computed (see \link{enpy} for details).}
\item{lambda_min_ratio}{smallest value of lambda as a fraction of \emph{lambda_max} (the smallest lambda such that the
0-vector is a local optimum).
If \code{n < p}, the default is \code{alpha * 1e-3}, otherwise the default is \code{alpha * 1e-2}.}
\item{nlambda_enpy}{number of lambda values where the ENPY initial estimate is computed.}
\item{lambdas}{a vector of positive values for the lambda parameter. If specified, takes precedence over \code{nlambda}.}
\item{enpy_lambdas}{a vector of lambda values at which \emph{cold} initial estimates are computed
(see \link{enpy} for details). If specified, takes precedence over \code{nlambda_enpy}.}
\item{penalty_loadings}{a vector of positive penalty loadings
(a.k.a. weights) for different penalization of each
......@@ -45,7 +73,7 @@ will be used only at the specified \code{lambda} value.}
\item{cc}{consistency constant for the scale estimate. By default, the scale estimate is made consistent for the
given breakdown point under the Normal model.}
\item{eps}{convergence tolerance for the algorithm.}
\item{eps}{numerical tolerance.}
\item{algorithm_opts}{options for the PENSE algorithm. See \link{pense_algorithm_options} for details.}
......@@ -64,7 +92,9 @@ If this is less than \code{comparison_tol}, two optima are deemed equal if the s
of the intercepts is less than \code{comparison_tol} and the squared L_2 norm of the difference
vector is less than \code{comparison_tol}.}
\item{sparse}{use sparse coefficient vector.}
\item{sparse}{use sparse coefficient vectors.}
\item{ncores}{number of CPU cores to use in parallel. By default, only one CPU core is used.}
\item{mscale_opts}{options for the M-scale estimation. See \link{mscale_algorithm_options} for details.}
......
......@@ -4,19 +4,24 @@
\alias{pense_admm_options}
\title{Options for the S-Estimate Algorithm}
\usage{
pense_admm_options(max_it = 5000, tau, sparse = TRUE,
prox_eps = 1e-08, prox_max_it = 200, prox_oscillate_window = 4,
prox_minimum_step_size = 0.01, prox_wolfe_c1 = 1e-04,
prox_wolfe_c2 = 0.9, prox_step_size_adj = 0.9,
prox_max_small_steps = 10)
pense_admm_options(
max_it = 5000,
tau,
prox_eps = 1e-08,
prox_max_it = 200,
prox_oscillate_window = 4,
prox_minimum_step_size = 0.01,
prox_wolfe_c1 = 1e-04,
prox_wolfe_c2 = 0.9,
prox_step_size_adj = 0.9,
prox_max_small_steps = 10
)
}
\arguments{
\item{max_it}{maximum number of iterations.}
\item{tau}{step size for the algorithm.}
\item{sparse}{use sparse coefficients.}
\item{prox_eps}{numerical tolerance for computing the proximal operator of the S-loss.}
\item{prox_max_it}{maximum number of iterations for computing the proximal operator of the S-loss.}
......
......@@ -5,7 +5,7 @@
\title{Control the Algorithm to Compute Penalized Elastic Net S-Estimates}
\description{
The package provides different algorithms to compute the PENSE estimate.
To select a specific algorithm and set its parameters, use any of the \code{pense_***_options} functions.
To select a specific algorithm and set its parameters, use any of the \verb{pense_***_options} functions.
}
\details{
\itemize{
......
......@@ -4,8 +4,12 @@
\alias{pense_mm_options}
\title{MM-algorithm to Compute Penalized Elastic Net S-Estimates}
\usage{
pense_mm_options(max_it = 500, tightening = c("adaptive",
"exponential", "none"), tightening_steps = 10, en_algorithm_opts)
pense_mm_options(
max_it = 500,
tightening = c("adaptive", "exponential", "none"),
tightening_steps = 10,
en_algorithm_opts
)
}
\arguments{
\item{max_it}{maximum number of iterations.}
......
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/enpy.R
\name{pscs}
\alias{pscs}
\title{Compute Principal Sensitivity Components}
\usage{
pscs(
x,
y,
alpha,
lambdas,
include_intercept = TRUE,
penalty_loadings,
en_algorithm_opts,
eps = 1e-06,
sparse = FALSE,
ncores = 1L
)
}
\arguments{
\item{x}{\code{n} by \code{p} matrix of numeric predictors.}
\item{y}{vector of response values of length \code{n}.}
\item{alpha}{value for the alpha parameter, the balance between L1 and L2
penalization.}
\item{lambdas}{a vector of positive values for the lambda parameter.}
\item{include_intercept}{include an intercept in the model.}
\item{penalty_loadings}{a vector of positive penalty loadings
(a.k.a. weights) for different penalization of each coefficient.}
\item{en_algorithm_opts}{options for the EN algorithm. See \link{en_algorithm_options} for details.}
\item{eps}{numerical tolerance.}
\item{sparse}{use sparse coefficient vectors.}
\item{ncores}{number of CPU cores to use in parallel. By default, only one CPU core is used.}
}
\description{
Compute Principal Sensitivity Components
}
CXX_STD = CXX11
PKG_LIBS = $(LAPACK_LIBS) $(BLAS_LIBS) $(FLIBS)
PKG_CXXFLAGS= -fstrict-aliasing -Wstrict-aliasing
PKG_OPTFLAGS= -g -Os -flto=thin
# -DNSOPTIM_METRICS_DISABLED -DNSOPTIM_DETAILED_METRICS
PKG_CPPFLAGS= -D__STDC_LIMIT_MACROS -DHAVE_RCPP -DTESTTHAT_DISABLED
CXX_STD = CXX11
PKG_LIBS = $(LAPACK_LIBS) $(BLAS_LIBS) $(FLIBS) @OPENMP_CXXFLAGS@
PKG_CXXFLAGS= @STRICT_ALIASING_FLAG@ @OPENMP_CXXFLAGS@
PKG_CPPFLAGS= -D__STDC_LIMIT_MACROS -DHAVE_RCPP -DTESTTHAT_DISABLED -DNSOPTIM_METRICS_DISABLED
CXX_STD = CXX11
PKG_LIBS = $(LAPACK_LIBS) $(BLAS_LIBS) $(FLIBS) $(SHLIB_OPENMP_CXXFLAGS)
PKG_CXXFLAGS= $(SHLIB_OPENMP_CXXFLAGS) -fstrict-aliasing
PKG_CPPFLAGS= -D__STDC_LIMIT_MACROS -DHAVE_RCPP -DTESTTHAT_DISABLED -DNSOPTIM_METRICS_DISABLED
......@@ -223,33 +223,15 @@ vec PenseProximalOperator::operator()(const vec& u, const vec& v_prev, const dou
return v;
}
//! Compute the intercept.
double PenseProximalOperator::ComputeIntercept(const vec& fitted) const {
// Use the loss's M-scale functor because the internal M-scale functor `mscale_` is not used for the actual
// residuals but for the "proximal residuals" `y - v`.
return MLocationScale(loss_->data().cy() - fitted, loss_->mscale(), loss_->mscale().rho()).location;
}
double PenseProximalOperator::StepSize(const nsoptim::RidgePenalty& penalty, const double norm_x) const {
return StepSize(nsoptim::EnPenalty(penalty), norm_x);
}
double PenseProximalOperator::StepSize(const nsoptim::LassoPenalty& penalty, const double norm_x) const {
return StepSize(nsoptim::EnPenalty(penalty), norm_x);
}
double PenseProximalOperator::StepSize(const nsoptim::EnPenalty& penalty, const double norm_x) const {
double PenseProximalOperator::OperatorScaling() const {
if (config_.tau < 0) {
return 1.;
}
return config_.tau;
}
double PenseProximalOperator::StepSize(const nsoptim::AdaptiveEnPenalty& penalty, const double norm_x) const {
if (config_.tau < 0) {
return 1.;
}
return config_.tau;
double PenseProximalOperator::PenaltyScaling() const {
return 1;
}
double PenseProximalOperator::UpdateGradient(const vec& v, const vec& u, const double lambda) {
......
......@@ -97,32 +97,15 @@ class PenseProximalOperator {
return operator()(u, arma::zeros(u.n_elem), intercept, lambda, metrics);
}
//! Compute the intercept.
double ComputeIntercept(const arma::vec& residuals) const;
//! Compute the step size for the currently set loss.
//!
//! @param penalty the current penalty value.
//! @return the loss-specific step size.
double StepSize(const nsoptim::RidgePenalty& penalty, const double norm_x) const;
//! Compute the step size for the currently set loss.
//!
//! @param penalty the current penalty value.
//! @return the loss-specific step size.
double StepSize(const nsoptim::LassoPenalty& penalty, const double norm_x) const;
//! Compute the step size for the currently set loss.
//! Compute a good proximal operator scaling (i.e., 1 over step size) for the currently set loss.
//!
//! @param penalty the current penalty value.
//! @return the loss-specific step size.
double StepSize(const nsoptim::EnPenalty& penalty, const double norm_x) const;
//! @return Loss-specific operator scaling.
double OperatorScaling() const;
//! Compute the step size for the currently set loss.
//! Compute a the scaling constant of the penalty parameter for the currently set loss.
//!
//! @param penalty the current penalty value.
//! @return the loss-specific step size.
double StepSize(const nsoptim::AdaptiveEnPenalty& penalty, const double norm_x) const;
//! @return Loss-specific penalty scaling.
double PenaltyScaling() const;
private:
//! Update the gradient of the M-scale, the gradient of the M-scale and the scaling factor at `v` and return the
......
//
// autoconfig.hpp
// pense
//
// Created by David Kepplinger on 2019-04-03.
// Copyright © 2019 David Kepplinger. All rights reserved.
//
#ifndef AUTOCONFIG_HPP_
#define AUTOCONFIG_HPP_
#undef PENSE_ENABLE_OPENMP
#undef PENSE_DISABLE_OPENMP
#undef PENSE_OPENMP_ADD_CONST_SHARED
#endif // AUTOCONFIG_HPP_
//
// autoconfig.hpp
// pense
//
// Created by David Kepplinger on 2019-04-03.
// Copyright © 2019 David Kepplinger. All rights reserved.
//
#ifndef AUTOCONFIG_HPP_
#define AUTOCONFIG_HPP_
#define PENSE_ENABLE_OPENMP 1
/* #undef PENSE_DISABLE_OPENMP */
/* #undef PENSE_OPENMP_ADD_CONST_SHARED */
#endif // AUTOCONFIG_HPP_
//
// config.hpp
// pense
//
// Created by David Kepplinger on 2019-01-30.
// Copyright © 2019 David Kepplinger. All rights reserved.
//
#ifndef CONFIG_HPP_
#define CONFIG_HPP_
#endif // CONFIG_HPP_
......@@ -28,7 +28,8 @@ enum class EnAlgorithm {
kLinearizedAdmm = 1,
kVarStepAdmm = 2,
kDal = 3,
kRidge = 4
kRidge = 4,
kLars = 5
};
//! Integer IDs for supported EN algorithms
......
//
// container_utility.hpp
// pense
//
// Created by David Kepplinger on 2019-11-04.
// Copyright © 2019 David Kepplinger. All rights reserved.
//
#ifndef CONTAINER_UTILITY_HPP_
#define CONTAINER_UTILITY_HPP_
#include <functional>
#include <forward_list>
namespace pense {
namespace utility {
//! A std::forward_list with items ordered according to the value of another element.
template<typename T1, typename T2, typename Comparator = std::less<T1>>
class OrderedList {
using ListType = std::forward_list<T2>;
public:
//! Create an empty ordered list.
OrderedList() noexcept {}
//! Create an empty ordered list using *comp* for comparisons.
explicit OrderedList(const Comparator& comp) noexcept : comp_(comp) {}
//! Insert an item at the position given by *order_item*.
//!
//! @return Iterator pointing to the inserted element.
typename ListType::iterator insert(const T1& order_item, const T2& item) {
auto order_it = order_items_.begin();
auto insert_order_it = order_items_.before_begin();
const auto order_end = order_items_.end();
auto insert_item_it = items_.before_begin();
while (order_it != order_end && comp_(*order_it, order_item)) {
++insert_item_it;
++insert_order_it;
++order_it;
}
order_items_.insert_after(insert_order_it, order_item);
return items_.insert_after(insert_item_it, item);
}
//! Emplace an item at the position given by *order_item*.
//!
//! @return Iterator pointing to the inserted element.
template<typename... Args>
typename ListType::iterator emplace(const T1& order_item, Args&&... args) {
auto order_it = order_items_.begin();
auto insert_order_it = order_items_.before_begin();
const auto order_end = order_items_.end();
auto insert_item_it = items_.before_begin();
while (order_it != order_end && comp_(*order_it, order_item)) {
++insert_item_it;
++insert_order_it;
++order_it;
}
order_items_.insert_after(insert_order_it, order_item);
return items_.emplace_after(insert_item_it, std::forward<Args>(args)...);
}
//! Get the list of actual items.
const std::forward_list<T2>& items() const noexcept {
return items_;
}
//! Get the list of actual items.
std::forward_list<T2>& items() noexcept {
return items_;
}
typename ListType::iterator begin() noexcept {
return items_.begin();
}
typename ListType::iterator end() noexcept {
return items_.end();
}
typename ListType::const_iterator begin() const noexcept {
return items_.begin();
}
typename ListType::const_iterator end() const noexcept {
return items_.end();
}
typename ListType::const_iterator cbegin() const noexcept {
return items_.cbegin();
}
typename ListType::const_iterator cend() const noexcept {
return items_.cend();
}
private:
Comparator comp_;
std::forward_list<T1> order_items_;
std::forward_list<T2> items_;
};
} // namespace utility
} // namespace pense
#endif // CONTAINER_UTILITY_HPP_
......@@ -33,6 +33,8 @@ constexpr double kKeepResidualsThreshold = 2; //!< Fixed threshold to keep obse
//!< on the residuals.
constexpr double kRetainBestFactor = 1.1; //!< Retain not only the candidates from the last iteration,
//!< but also those that are within this factor of the best candidate.
constexpr int kDefaultNumThreads = 1; //!< Default number of threads.
inline uword HashUpdate(const uword hash, const uword value) noexcept;
......@@ -93,7 +95,8 @@ PyConfiguration ParseConfiguration(const Rcpp::List& config) noexcept {
GetFallback(config, "use_residual_threshold", kUseResidualThreshold),
GetFallback(config, "keep_residuals_proportion", kKeepResidualsProportion),
GetFallback(config, "keep_residuals_threshold", kKeepResidualsThreshold),
GetFallback(config, "retain_best_factor", kRetainBestFactor)
GetFallback(config, "retain_best_factor", kRetainBestFactor),
GetFallback(config, "num_threads", kDefaultNumThreads)
};
}
......
This diff is collapsed.
......@@ -18,10 +18,139 @@ using arma::find;
namespace pense {
namespace enpy_psc_internal {
//! Compute the Principal Sensitivity Components using identities for the Ridge penalty
//! if OpenMP is enabled and necessary.
//!
//! @param loss the LS regression loss object to compute the PSCs for.
//! @param penalties a lisf of Ridge penalties for which the PSCs should be computed at once.
//! @param optim Optimizer to use to compute estimate on full data.
//! @param num_threads number of OpenMP threads.
//! @return A list of PSC structures, one for each given penalty, in the same order as `penalties`.
alias::FwdList<pense::PscResult<DirectRidgeOptimizer>> ComputeRidgePscs(const nsoptim::LsRegressionLoss& loss,
const alias::FwdList<nsoptim::RidgePenalty>& penalties, const DirectRidgeOptimizer& optim, int num_threads) {
const nsoptim::PredictorResponseData& data = loss.data();
// A list of PscResult objects, one per penalty.
pense::utility::OrderedList<double, pense::PscResult<DirectRidgeOptimizer>, std::greater<double>> psc_results;
arma::mat x_int;
const arma::mat& x = loss.IncludeIntercept() ? x_int : data.cx();
arma::mat ridge_gram;
double gram_diag_int = 0;
if (loss.IncludeIntercept()) {
x_int = arma::join_rows(arma::ones(data.n_obs()), data.cx());
ridge_gram = x_int.t() * x_int;
gram_diag_int = ridge_gram.at(0, 0);
} else {
ridge_gram = data.cx().t() * data.cx();
}
// Computing PSCs can be done in parallel for each penalty.
#pragma omp parallel num_threads(num_threads) default(none) \
shared(psc_results, penalties, loss, data, x, optim, ridge_gram) firstprivate(gram_diag_int)
{
#pragma omp single nowait
{
for (auto pen_it = penalties.cbegin(), pen_end = penalties.cend(); pen_it != pen_end; ++pen_it) {
// Compute optimum on full data.
#pragma omp task default(none) firstprivate(pen_it, ridge_gram, gram_diag_int) \
shared(psc_results, loss, data, x, optim)
{
auto optimizer = optim;
optimizer.loss(loss);
optimizer.penalty(*pen_it);
auto psc_result_it = psc_results.begin();
#pragma omp critical(emplace_psc_result)
psc_result_it = psc_results.emplace(pen_it->lambda(), optimizer.Optimize());
// Compute hat matrix for LOO residuals manually
ridge_gram.diag() += (data.n_obs() - 1) * pen_it->lambda();
if (loss.IncludeIntercept()) {
ridge_gram.at(0, 0) = gram_diag_int;
}
arma::mat hat = x * arma::solve(ridge_gram, x.t());
// Fitted y from all data:
const arma::vec y_hat = data.cx() * psc_result_it->optimum.coefs.beta +
psc_result_it->optimum.coefs.intercept;
// Fitted y from LOO
const arma::vec y_hat_loo = hat * data.cy();
// Compute sensitivity matrix
hat.each_row() %= arma::trans((data.cy() - y_hat_loo) / (1 - hat.diag()));
hat.each_col() += y_hat - y_hat_loo;
enpy_psc_internal::FinalizePSC(hat, &(*psc_result_it));
}
}
}
}
return psc_results.items();
}
//! Compute the Principal Sensitivity Components using identities for the Ridge penalty
//! if OpenMP is disabled.
//!
//! @param loss the LS regression loss object to compute the PSCs for.
//! @param penalties a lisf of Ridge penalties for which the PSCs should be computed at once.
//! @param optimizer Optimizer to use to compute leave-one-out residuals.
//! @return A list of PSC structures, one for each given penalty, in the same order as `penalties`.
alias::FwdList<pense::PscResult<DirectRidgeOptimizer>> ComputeRidgePscs(const nsoptim::LsRegressionLoss& loss,
const alias::FwdList<nsoptim::RidgePenalty>& penalties, DirectRidgeOptimizer optimizer) {
const nsoptim::PredictorResponseData& data = loss.data();
// A list of PscResult objects, one per penalty.
alias::FwdList<pense::PscResult<DirectRidgeOptimizer>> psc_results;
auto psc_result_it = psc_results.before_begin();
arma::mat x_int;
const arma::mat& x = loss.IncludeIntercept() ? x_int : data.cx();
arma::mat ridge_gram;
double gram_diag_int = 0;
if (loss.IncludeIntercept()) {
x_int = arma::join_rows(arma::ones(data.n_obs()), data.cx());
ridge_gram = x_int.t() * x_int;
gram_diag_int = ridge_gram.at(0, 0);
} else {
ridge_gram = data.cx().t() * data.cx();
}
double diagonal_offset = 0;
// First optimize with respect to the full data set and compute the predictions.
optimizer.loss(loss);
for (auto&& penalty : penalties) {
// Compute optimum on full data.
optimizer.penalty(penalty);
psc_result_it = psc_results.emplace_after(psc_result_it, optimizer.Optimize());
// Compute hat matrix for LOO residuals manually
const double prev_diagonal_offset = diagonal_offset;
diagonal_offset = (data.n_obs() - 1) * penalty.lambda();
ridge_gram.diag() += (diagonal_offset - prev_diagonal_offset);
if (loss.IncludeIntercept()) {
ridge_gram.at(0, 0) = gram_diag_int;
}
arma::mat hat = x * arma::solve(ridge_gram, x.t());
// Fitted y from all data:
const arma::vec y_hat = data.cx() * psc_result_it->optimum.coefs.beta + psc_result_it->optimum.coefs.intercept;
// Fitted y from LOO
const arma::vec y_hat_loo = hat * data.cy();
// Compute sensitivity matrix
hat.each_row() %= arma::trans((data.cy() - y_hat_loo) / (1 - hat.diag()));
hat.each_col() += y_hat - y_hat_loo;
enpy_psc_internal::FinalizePSC(hat, &(*psc_result_it));
}
return psc_results;
}
void FinalizePSC(const mat& sensitivity_matrix, PscResult* psc_result) {
if (psc_result->loo_warnings > 0) {
psc_result->status = PscStatus::kWarning;
psc_result->status_message.append("Some LOO residuals are unreliable; ");
if (psc_result->warnings > 0) {
psc_result->status = PscStatusCode::kWarning;
psc_result->message.append("Some LOO residuals are unreliable; ");
}
// Get the Eigen decomposition of the outer product of the sensitivity vectors, i.e., the eigenvectors
......@@ -30,8 +159,8 @@ void FinalizePSC(const mat& sensitivity_matrix, PscResult* psc_result) {
const bool success = eig_sym(eigenvalues, psc_result->pscs, sensitivity_matrix * sensitivity_matrix.t());
if (!success) {
psc_result->pscs.reset();
psc_result->status = PscStatus::kError;
psc_result->status_message.append("Eigendecomposition failed");
psc_result->status = PscStatusCode::kError;
psc_result->message.append("Eigendecomposition failed");
return;
}
......@@ -41,8 +170,8 @@ void FinalizePSC(const mat& sensitivity_matrix, PscResult* psc_result) {
if (eigenvalues[cutoff_index] < kNumericZero) {
psc_result->pscs.reset();
psc_result->status = PscStatus::kError;
psc_result->status_message.append("all Eigenvalues are zero");
psc_result->status = PscStatusCode::kError;
psc_result->message.append("All Eigenvalues are zero");
return;
}
......@@ -56,5 +185,35 @@ void FinalizePSC(const mat& sensitivity_matrix, PscResult* psc_result) {
psc_result->pscs.shed_cols(0, cutoff_index);
}
}
//! Concatenate two lists containing LooStatus objects, one for each penalty.
//! The metrics and status from *single* are concatenated to the metrics and the status from *combined*.
//!
//! @param single List to concatenate to the other list.
//! @param combined Output of the concatenated lists.
void ConcatenateLooStatus(alias::FwdList<LooStatus>* single, alias::FwdList<LooStatus>* combined) noexcept {
auto in_it = single->begin();
auto out_it = combined->before_begin();
const auto in_end = single->end();
const auto out_end = combined->end();
while (in_it != in_end) {
auto out_it_insert = out_it++;
if (out_it == out_end) {
out_it = combined->emplace_after(out_it_insert, std::move(*in_it));
out_it->warnings += in_it->warnings;
} else {
out_it->warnings += in_it->warnings;
out_it->metrics.splice_after(out_it->metrics.before_begin(), in_it->metrics);
if (in_it->status == PscStatusCode::kError) {
out_it->status = PscStatusCode::kError;
} else if (in_it->status == PscStatusCode::kWarning && out_it->status != PscStatusCode::kError) {
out_it->status = PscStatusCode::kWarning;
}
}
++in_it;
}
}
} // namespace enpy_psc_internal
} // namespace pense
This diff is collapsed.
//
// pense_forward.hpp
// enpy_types.hpp
// pense
//
// Created by David Kepplinger on 2019-01-30.
// Copyright © 2019 David Kepplinger. All rights reserved.
//
#ifndef PENSE_FORWARD_HPP_
#define PENSE_FORWARD_HPP_
#ifndef ENPY_TYPES_HPP_
#define ENPY_TYPES_HPP_
#include <nsoptim_forward.hpp>
#include <nsoptim.hpp>
#include "alias.hpp"
namespace pense {
......@@ -17,10 +17,13 @@ namespace pense {
//! Contains a list of initial estimates and the associated metrics.
template<typename T>
struct PyResult {
nsoptim::Metrics metrics = nsoptim::Metrics("enpy_initest");
PyResult() noexcept : metrics("enpy_initest") {}
explicit PyResult(nsoptim::Metrics&& _metrics) noexcept : metrics(std::move(_metrics)) {}
nsoptim::Metrics metrics;
alias::Optima<T> initial_estimates;
};
} // namespace pense
#endif // PENSE_FORWARD_HPP_
#endif // ENPY_TYPES_HPP_
......@@ -23,7 +23,7 @@ class MLoss : public nsoptim::LossFunction<nsoptim::PredictorResponseData> {
using ConstDataPtr = std::shared_ptr<const nsoptim::PredictorResponseData>;
public:
using ConvexSurrogateType = nsoptim::WeightedLsLoss;
using ConvexSurrogateType = nsoptim::WeightedLsRegressionLoss;
MLoss(ConstDataPtr data, const RhoFunction& rho, const double scale, bool include_intercept = true) noexcept
: include_intercept_(include_intercept), data_(data), rho_(rho), scale_(scale),
......
//
// omp_utils.hpp
// pense
//
// Created by David Kepplinger on 2019-11-02.
// Copyright © 2019 David Kepplinger. All rights reserved.
//
#ifndef OMP_UTILS_HPP_
#define OMP_UTILS_HPP_
#include "autoconfig.hpp"
#ifdef PENSE_DISABLE_OPENMP
# undef PENSE_ENABLE_OPENMP
#endif
#ifdef PENSE_ENABLE_OPENMP
# undef PENSE_DISABLE_OPENMP
#endif
#define const_shared(list)
#ifdef PENSE_ENABLE_OPENMP
#include <omp.h>
namespace pense {
namespace omp {
#ifdef PENSE_OPENMP_ADD_CONST_SHARED
# undef const_shared
# define const_shared(list) shared(list)
#endif
//! Returns ``true`` if OpenMP is enabled.
inline bool Enabled(const int nr_threads) noexcept {
return nr_threads > 1;
}
//! A conditional lock.
//! The lock is only active, if it is constructed as such.
class Lock {
public:
//! A lock which is only activated if the first argument is set to ``true``,
inline explicit Lock(const bool enabled = true) noexcept : enabled_(enabled) {
if (enabled_) {
omp_init_lock(&lock_);
}
}
//! A lock can not be copied, moved, or assigned to!
Lock(const Lock&) = delete;
Lock(Lock&&) = delete;
Lock& operator=(const Lock&) = delete;
Lock& operator=(Lock&&) = delete;
virtual ~Lock() {
if (enabled_) {
omp_destroy_lock(&lock_);
}
}
//! Acquire the lock.
inline void Acquire() noexcept {
if (enabled_) {
omp_set_lock(&lock_);
}
}
//! Release the lock.
inline void Release() noexcept {
if (enabled_) {
omp_unset_lock(&lock_);
}
}
private:
const bool enabled_;
omp_lock_t lock_;
};
//! An implicit guard which locks the given lock on construction and unlocks the lock when the guard goes out of scope.