...
 
Commits (56)
.Rproj.user
.vscode
*.o
*.so
*.Rproj
.DS_Store
.Rbuildignore
.gitignore
*.trace
Package: pense
Type: Package
Title: Penalized Elastic Net S/MM-Estimator of Regression
Version: 2.0.1
Version: 2.0.0
Date: 2019-10-24
Authors@R: c(
person("David", "Kepplinger", , "david.kepplinger@gmail.com",
role = c("aut", "cre")),
person("Matias", "Salibian-Barrera", role = c("aut")),
person("Gabriela", "Cohen Freue", role = "aut"),
person("Derek", "Cho", role = "ctb")
person("Gabriela", "Cohen Freue", role = "aut")
)
Copyright: See the file COPYRIGHTS for copyright details on some of the
functions and algorithms used.
......@@ -20,22 +19,24 @@ BugReports: https://gitlab.math.ubc.ca/dakep/pense/issues
Description: Robust penalized elastic net S and MM estimator for linear
regression. The method is described in detail in
Cohen Freue, G. V., Kepplinger, D., Salibian-Barrera, M., and Smucler, E.
(2017) <https://gcohenfr.github.io/pdfs/PENSE_manuscript.pdf>.
(2019) <https://projecteuclid.org/euclid.aoas/1574910036>.
Depends:
R (>= 3.4.0),
R (>= 3.5.0),
Matrix
Imports:
Rcpp,
parallel,
methods,
lifecycle
parallel,
lifecycle (>= 0.2.0),
rlang (>= 0.4.0)
LinkingTo:
nsoptim,
Rcpp,
RcppArmadillo (>= 0.9.100)
RcppArmadillo (>= 0.9.600)
Suggests:
testthat (>= 2.1.0)
License: MIT + file LICENSE
NeedsCompilation: yes
RoxygenNote: 6.1.1
Roxygen: list(markdown = TRUE)
RoxygenNote: 7.1.0
Roxygen: list(markdown = TRUE, load = "source")
RdMacros: lifecycle
# Generated by roxygen2: do not edit by hand
S3method(as_starting_point,enpy_starting_points)
S3method(as_starting_point,pense_cvfit)
S3method(as_starting_point,pense_fit)
S3method(as_starting_point,starting_point)
S3method(c,starting_point)
S3method(c,starting_points)
S3method(coef,pense_cvfit)
S3method(coef,pense_fit)
S3method(pensem_cv,default)
S3method(pensem_cv,pense_cvfit)
S3method(plot,pense_cvfit)
S3method(plot,pense_fit)
S3method(predict,pense_cvfit)
S3method(predict,pense_fit)
S3method(print,nsoptim_metrics)
S3method(residuals,pense_cvfit)
S3method(residuals,pense_fit)
export(adamest_cv)
export(adapense_cv)
export(as_starting_point)
export(consistency_const)
export(elnet)
export(elnet_cv)
export(en_admm_options)
export(en_dal_options)
export(en_lars_options)
export(en_options_aug_lars)
export(en_options_dal)
export(enpy)
export(enpy_initial_estimates)
export(enpy_options)
export(initest_options)
export(mloc)
export(mlocscale)
export(mm_algorithm_options)
export(mscale)
export(mscale_algorithm_options)
export(mstep_options)
export(pense)
export(pense_admm_options)
export(pense_mm_options)
export(pense_cv)
export(pense_options)
export(pensem)
export(pensem_cv)
export(prinsens)
export(regmest)
export(regmest_cv)
export(rho_function)
export(starting_point)
export(tau_size)
importFrom(Matrix,drop)
importFrom(Matrix,sparseMatrix)
importFrom(Matrix,sparseVector)
importFrom(Rcpp,evalCpp)
importFrom(lifecycle,deprecate_soft)
importFrom(graphics,abline)
importFrom(graphics,lines)
importFrom(graphics,plot)
importFrom(graphics,segments)
importFrom(graphics,text)
importFrom(lifecycle,deprecate_stop)
importFrom(lifecycle,deprecate_warn)
importFrom(lifecycle,deprecated)
importFrom(lifecycle,is_present)
importFrom(methods,as)
importFrom(methods,is)
importFrom(parallel,clusterApplyLB)
importFrom(parallel,clusterCall)
importFrom(parallel,clusterEvalQ)
importFrom(rlang,abort)
importFrom(rlang,warn)
importFrom(stats,mad)
useDynLib(pense, .registration = TRUE)
## Utility functions interfacing with configuration determined by autoconfig
.k_multithreading_support <- TRUE
## Utility functions interfacing with configuration determined by autoconfig
.k_multithreading_support <- @R_HAVE_OPENMP@
## Utility functions interfacing with configuration determined by autoconfig
.k_multithreading_support <- TRUE
#' Extract Coefficient Estimates
#'
#' Extract coefficients from a PENSE (or LS-EN) regularization path fitted by [pense()] or [elnet()].
#'
#' @param object PENSE regularization path to extract coefficients from.
#' @param lambda a single value of the penalty parameter.
#' @param sparse should coefficients be returned as sparse or dense vectors? Defaults to the `sparse` argument
#' supplied to [pense()]. Can also be set to `sparse = 'matrix'`, in which case a sparse matrix
#' is returned instead of a sparse vector.
#' @param exact defunct Always gives a warning if `lambda` is not part of the fitted sequence and hence coefficients
#' are interpolated.
#' @param correction defunct.
#' @param ... currently not used.
#' @return either a numeric vector or a sparse vector of type [dsparseVector][Matrix::sparseVector-class]
#' of size \eqn{p + 1}, depending on the `sparse` argument.
#' Note: prior to version 2.0.0 sparse coefficients were returned as sparse matrix of type *dgCMatrix*.
#' To get a sparse matrix, use `sparse = 'matrix'`.
#'
#' @seealso coef.cv_pensefit for extracting coefficients from a PENSE fit with hyper-parameters chosen by
#' cross-validation
#' @example examples/pense_fit.R
#' @export
#' @importFrom lifecycle deprecate_warn deprecated is_present
#' @importFrom rlang warn
coef.pense_fit <- function (object, lambda, sparse = NULL, exact = deprecated(), correction = deprecated(), ...) {
if (is_present(exact)) {
deprecate_warn('2.0.0', 'coef(exact=)')
}
if (is_present(correction)) {
deprecate_stop('2.0.0', 'coef(correction=)')
}
if (length(lambda) > 1L) {
warn("Only first element in `lambda` is used.")
}
cl <- match.call(expand.dots = TRUE)
concat <- !isFALSE(cl$concat)
lambda <- .as(lambda[[1L]], 'numeric')
if (isTRUE(lambda > object$lambda[[1L]]) && isTRUE(sum(abs(object$estimates[[1L]]$beta)) < .Machine$double.eps)) {
return(.concat_coefs(object$estimates[[1L]], object$call, sparse, parent.frame(), concat))
}
lambda_match <- .approx_match(lambda, object$lambda)
if (is.na(lambda_match)) {
warn("Requested penalization level not part of the sequence. Returning interpolated coefficients.")
return(.interpolate_coefs(object, lambda, object$lambda, sparse = sparse, envir = parent.frame(), concat = concat))
} else {
return(.concat_coefs(object$estimates[[lambda_match]], object$call, sparse, parent.frame(), concat))
}
}
#' Extract Coefficient Estimates
#'
#' Extract coefficients from a PENSE (or LS-EN) regularization path with hyper-parameters chosen by cross-validation.
#'
#' If `lambda = "se"` and `object` contains fitted estimates for every penalization level in the sequence, extract the
#' coefficients of the most parsimonious model with prediction performance statistically indistinguishable from the best
#' model. This is determined to be the model with prediction performance within `se_mult * cv_se` from the best model.
#'
#' @param object PENSE with cross-validated hyper-parameters to extract coefficients from.
#' @param lambda either a string specifying which penalty level to use or a a single numeric value of the penalty
#' parameter. See details.
#' @param se_mult If `lambda = "se"`, the multiple of standard errors to tolerate.
#' @param sparse should coefficients be returned as sparse or dense vectors? Defaults to the `sparse` argument
#' supplied to [pense_cv()]. Can also be set to `sparse = 'matrix'`, in which case a sparse matrix
#' is returned instead of a sparse vector.
#' @param exact deprecated. Always gives a warning if `lambda` is not part of the fitted sequence and coefficients
#' are interpolated.
#' @param correction defunct.
#' @param ... currently not used.
#' @return either a numeric vector or a sparse vector of type [dsparseVector][Matrix::sparseVector-class]
#' of size \eqn{p + 1}, depending on the `sparse` argument.
#' Note: prior to version 2.0.0 sparse coefficients were returned as sparse matrix of type *dgCMatrix*.
#' To get a sparse matrix, use `sparse = 'matrix'`.
#'
#' @example examples/pense_fit.R
#' @export
coef.pense_cvfit <- function (object, lambda = c('min', 'se'), se_mult = 2, sparse = NULL, exact = deprecated(),
correction = deprecated(), ...) {
if (is_present(exact)) {
deprecate_warn('2.0.0', 'coef(exact=)')
}
if (is_present(correction)) {
deprecate_stop('2.0.0', 'coef(correction=)')
}
if (is.character(lambda)) {
lambda <- match.arg(lambda)
}
cl <- match.call(expand.dots = TRUE)
concat <- !isFALSE(cl$concat)
if (isFALSE(object$call$fit_all)) {
if (!isTRUE(lambda == 'min')) {
warn(paste("`object` was created with `fit_all = FALSE`. Only the estimate at the minimum is available and will",
"be returned."))
}
return(.concat_coefs(object$estimates[[1L]], object$call, sparse, parent.frame(), concat))
}
lambda_seq <- object$cvres$lambda
lambda <- if (is.character(lambda)) {
if (!isTRUE(ncol(object$cv_replications) > 1L) && isTRUE(lambda == 'se') && isTRUE(se_mult > 0)) {
warn(paste("Only a single cross-validation replication was performed. Standard error not available.",
"Using minimum lambda."))
}
if (isTRUE(lambda == 'min')) {
lambda <- 'se'
se_mult <- 0
}
se_selection <- .cv_se_selection(object$cvres$cvavg, object$cvres$cvse, se_mult)
lambda_seq[[which(se_selection == 'se_fact')]]
} else {
if (length(lambda) > 1L) {
warn("Only first element in `lambda` is used.")
}
.as(lambda[[1L]], 'numeric')
}
if (isTRUE(lambda > lambda_seq[[1L]]) && isTRUE(sum(abs(object$estimates[[1L]]$beta)) < .Machine$double.eps)) {
return(.concat_coefs(object$estimates[[1L]], object$call, sparse, parent.frame(), concat))
}
lambda_match <- .approx_match(lambda, lambda_seq)
if (is.na(lambda_match)) {
warn("Requested penalization level not part of the sequence. Returning interpolated coefficients.")
return(.interpolate_coefs(object, lambda, lambda_seq, sparse = sparse, envir = parent.frame(), concat = concat))
} else {
return(.concat_coefs(object$estimates[[lambda_match]], object$call, sparse, parent.frame(), concat))
}
}
## Interpolate the coefficients at `lambda` using estimates from `object` at `lambda_seq`.
## @param ... passed on to `.concat_coefs()`
.interpolate_coefs <- function (object, lambda, lambda_seq, ...) {
if (lambda < lambda_seq[[length(lambda_seq)]]) {
return(.concat_coefs(object$estimates[[length(lambda_seq)]], object$call))
} else if (lambda > lambda_seq[[1L]]) {
return(.concat_coefs(object$estimates[[1L]], object$call))
}
lambda_diff <- lambda_seq - lambda
lambda_diff_abs <- abs(lambda_diff)
# take the average of the closest two solutions
lambda_ind_left <- min(which(lambda_diff < 0))
lambda_ind_right <- max(which(lambda_diff > 0))
interp_w <- 1 / lambda_diff_abs[c(lambda_ind_left, lambda_ind_right)]
interp_beta <- (interp_w[[1L]] * object$estimates[[lambda_ind_left]]$beta +
interp_w[[2L]] * object$estimates[[lambda_ind_right]]$beta) / sum(interp_w)
interp_int <- (interp_w[[1L]] * object$estimates[[lambda_ind_left]]$intercept +
interp_w[[2L]] * object$estimates[[lambda_ind_right]]$intercept) / sum(interp_w)
return(.concat_coefs(list(intercept = interp_int, beta = interp_beta), object$call, ...))
}
#' @importFrom methods is
#' @importFrom Matrix sparseVector sparseMatrix
.concat_coefs <- function (coef, call, sparse, envir, concat = TRUE) {
if (!concat) {
return(coef)
}
# Determine names
var_names <- tryCatch(colnames(eval(call$x, envir = envir)), error = function (e) NULL)
if (is.null(var_names)) {
var_names <- paste('X', seq_len(length(coef$beta)), sep = '')
}
var_names <- c('(Intercept)', var_names)
if (!is.null(sparse) && pmatch(sparse[[1L]], 'matrix', nomatch = 0L) == 1L) {
if (is(coef$beta, 'dsparseVector')) {
return(sparseMatrix(x = c(coef$intercept, coef$beta@x), i = c(1L, 1L + coef$beta@i),
j = rep.int(1L, length(coef$beta@i) + 1L),
dims = c(coef$beta@length + 1L, 1L), dimnames = list(var_names, NULL)))
} else {
nz_ind <- which(abs(coef$beta) > .Machine$double.eps)
return(sparseMatrix(x = c(coef$intercept, coef$beta[nz_ind]), i = c(1L, 1L + nz_ind),
j = rep.int(1L, length(nz_ind) + 1L),
dims = c(length(coef$beta) + 1L, 1L), dimnames = list(var_names, NULL)))
}
} else if (isTRUE(sparse) || (is.null(sparse) && is(coef$beta, 'dsparseVector'))) {
if (is(coef$beta, 'dsparseVector')) {
return(sparseVector(c(coef$intercept, coef$beta@x), i = c(1L, coef$beta@i + 1L), length = coef$beta@length + 1L))
} else {
return(sparseVector(c(coef$intercept, coef$beta), i = seq_len(length(coef$beta) + 1L),
length = length(coef$beta) + 1L))
}
} else if (isFALSE(sparse) || (is.null(sparse) && is.numeric(coef$beta))) {
coefvec <- c(coef$intercept, as.numeric(coef$beta))
names(coefvec) <- var_names
return(coefvec)
} else {
abort("argument `sparse` must be TRUE/FALSE or \"matrix\".")
}
}
This diff is collapsed.
#' Deprecated Options for Initial Estimates
#'
#' \Sexpr[results=rd, stage=render]{lifecycle::badge('deprecated')}
#'
#' Superseded by [enpy_options()].
#'
#' @export
#' @keywords internal
#' @importFrom lifecycle deprecate_warn
initest_options <- function (keep_solutions = 5, psc_method = c("exact", "rr"), maxit = 10, maxit_pense_refinement = 5,
eps = 1e-6, psc_keep = 0.5, resid_keep_method = c("proportion", "threshold"),
resid_keep_prop = 0.6, resid_keep_thresh = 2, mscale_eps = 1e-8, mscale_maxit = 200) {
deprecate_warn('2.0.0', 'initest_options()', with = 'enpy_options()')
resid_keep_method <- match.arg(resid_keep_method)
list(keepSolutions = .as(keep_solutions[[1L]], 'integer'), pscMethod = psc_method,
maxit = .as(maxit[[1L]], 'integer'), keepResidualsMethod = resid_keep_method,
keepResidualsThreshold = .as(resid_keep_thresh[[1L]], 'numeric'),
keepResidualsProportion = .as(resid_keep_prop[[1L]], 'numeric'),
keepPSCProportion = .as(psc_keep[[1L]], 'numeric'))
}
#' Deprecated Options for the EN Algorithms
#'
#' \Sexpr[results=rd, stage=render]{lifecycle::badge('deprecated')}
#'
#' @name deprecated_en_options
#' @keywords internal
NULL
#' @export
#' @describeIn deprecated_en_options Superseded by [en_lars_options()].
#' @importFrom lifecycle deprecate_warn
en_options_aug_lars <- function (use_gram = c("auto", "yes", "no"), eps = 1e-12) {
deprecate_warn('2.0.0', 'en_options_aug_lars()', with = 'en_lars_options()')
en_lars_options()
}
#' @export
#' @describeIn deprecated_en_options Superseded by [en_dal_options()]
en_options_dal <- function (maxit = 100, eps = 1e-8, eta_mult = 2, eta_start_numerator = 1e-2,
eta_start, preconditioner = c("approx", "none", "diagonal"), verbosity = 0) {
deprecate_warn('2.0.0', 'en_options_dal()', with = 'en_dal_options()')
en_dal_options(max_it = .as(maxit[[1L]], 'integer'), eta_multiplier = .as(eta_mult[[1L]], 'numeric'),
eta_start_conservative = .as(eta_start_numerator[[1L]], 'numeric'),
eta_start_aggressive = .as(eta_start_numerator[[1L]], 'numeric'))
}
#' Deprecated Additional Options for the Penalized EN S-estimator
#'
#' \Sexpr[results=rd, stage=render]{lifecycle::badge('deprecated')}
#'
#' Superseded by [mm_algorithm_options()] and options supplied directly to [pense()].
#'
#' @export
#' @keywords internal
#' @importFrom lifecycle deprecate_warn
pense_options <- function (delta = 0.25, maxit = 1000, eps = 1e-6, mscale_eps = 1e-8, mscale_maxit = 200,
verbosity = 0, cc = NULL, en_correction = TRUE) {
deprecate_warn('2.0.0', 'pense_options()')
list(maxit = .as(maxit[[1L]], 'integer'), bdp = .as(delta[[1L]], 'numeric'), eps = .as(eps[[1L]], 'numeric'),
mscale_opts = mscale_algorithm_options(max_it = maxit, eps = mscale_eps))
}
#' Deprecated Additional Options for the Penalized EN MM-estimator
#'
#' \Sexpr[results=rd, stage=render]{lifecycle::badge('deprecated')}
#'
#' Superseded by [mm_algorithm_options()] and options supplied directly to [pense()].
#'
#' @export
#' @keywords internal
#' @importFrom lifecycle deprecate_warn
mstep_options <- function (cc = 3.44, maxit = 1000, eps = 1e-6, adjust_bdp = FALSE, verbosity = 0,
en_correction = TRUE) {
deprecate_warn('2.0.0', 'mstep_options()')
list(maxit = .as(maxit[[1L]], 'integer'))
}
#' Deprecated: ENPY Initial Estimates for EN S-Estimators
#'
#' \Sexpr[results=rd, stage=render]{lifecycle::badge('deprecated')}
#'
#' Superseded by [enpy_initial_estimates()].
#'
#' @export
#' @keywords internal
#' @importFrom lifecycle deprecate_warn
enpy <- function(x, y, alpha, lambda, delta, cc, options, en_options) {
deprecate_warn('2.0.0', 'enpy()', with = 'enpy_initial_estimates()')
new_enpy <- enpy_initial_estimates(x, y, alpha = alpha, lambda = lambda, bdp = delta, cc = cc)
coefs <- unlist(lapply(new_enpy, function (est) {
c(est$intercept, as.numeric(est$beta))
}), recursive = FALSE, use.names = FALSE)
coefs <- matrix(coefs, ncol = length(new_enpy))
objfv <- unlist(lapply(new_enpy, `[[`, 'objf_value'), recursive = FALSE, use.names = FALSE)
list(coeff = coefs, objf = objfv)
}
This diff is collapsed.
#' ENPY Initial Estimates
#'
#' @param x `n` by `p` matrix of numeric predictors.
#' @param y vector of response values of length `n`.
#' @param alpha value for the alpha parameter, the balance between L1 and L2
#' penalization.
#' @param lambdas a vector of positive values for the lambda parameter.
#' @param penalty_loadings a vector of positive penalty loadings
#' (a.k.a. weights) for different penalization of each coefficient.
#' @param include_intercept include an intercept in the model.
#' @param bdp the desired breakdown point of the estimator, between 0 and 0.5.
#' @param cc consistency constant for the scale estimate. By default, the scale estimate is made consistent for the
#' given breakdown point under the Normal model.
#' @param en_algorithm_opts options for the EN algorithm. See [en_algorithm_options] for details.
#' @param mscale_opts options for the M-scale estimation. See [mscale_algorithm_options] for details.
#' ENPY Initial Estimates for EN S-Estimators
#'
#' Compute initial estimates for the EN S-estimator using the EN-PY procedure.
#'
#' If these manually computed initial estimates are intended as starting points for [pense()], they are by default
#' *shared* for all penalization levels.
#' To restrict the use of the initial estimates to the penalty level they were computed for, use
#' `enpy_starting_point(..., specific = TRUE)`. See [enpy_starting_point()] for details.
#'
#' @inheritParams pense
#' @param enpy_opts options for the EN-PY algorithm, created with the [enpy_options()] function.
#' @param lambda a vector of positive values of penalization levels.
#' @export
enpy_initial_estimates <- function (x, y, alpha, lambdas, bdp = 0.25, cc, include_intercept = TRUE,
enpy_opts = enpy_options(), mscale_opts = mscale_algorithm_options()) {
alpha <- .as(alpha[[1L]], 'numeric')
#'
#' @references Cohen Freue, G.V.; Kepplinger, D.; Salibián-Barrera, M.; Smucler, E.
#' Robust elastic net estimators for variable selection and identification of proteomic biomarkers.
#' *Ann. Appl. Stat.* **13** (2019), no. 4, 2065--2090 <https://doi.org/10.1214/19-AOAS1269>
#'
#' @importFrom lifecycle deprecate_warn deprecate_stop deprecated is_present
#' @importFrom rlang abort
enpy_initial_estimates <- function (x, y, alpha, lambda, bdp = 0.25, cc, intercept = TRUE, penalty_loadings,
enpy_opts = enpy_options(), mscale_opts = mscale_algorithm_options(),
eps = 1e-6, sparse = FALSE, ncores = 1L) {
y <- .as(y, 'numeric')
x_dim <- dim(x)
if (length(y) != x_dim[[1L]]) {
abort("Number of observations in `x` and `y` does not match.")
} else if (x_dim[[2L]] <= 1L) {
abort("`x` must be a matrix with at least 1 column.")
}
if (missing(cc)) {
cc <- NULL
}
alpha <- .as(alpha[[1L]], 'numeric')
if (alpha < 0 || alpha > 1) {
stop("`alpha` is outside 0 and 1.")
}
if (alpha < sqrt(.Machine$double.eps)) {
abort("`alpha` is outside 0 and 1.")
} else if (alpha < sqrt(.Machine$double.eps)) {
alpha <- 0
}
penalties <- make_penalties(alpha, lambdas)
s_loss_params <- list(mscale = .full_mscale_algo_options(bdp = bdp, cc = cc, mscale_opts = mscale_opts),
intercept = include_intercept)
sparse <- .as(sparse[[1L]], 'logical')
# Check EN algorithm for ENPY
enpy_opts$en_options <- .select_en_algorithm(enpy_opts$en_options, alpha, x)
enpy_opts$num_threads <- max(1L, .as(ncores[[1L]], 'integer'))
enpy_opts$eps <- .as(eps[[1L]], 'numeric')
enpy_opts$en_options <- .select_en_algorithm(enpy_opts$en_options, alpha, sparse, enpy_opts$eps)
sparse <- enpy_opts$en_options$sparse
res <- .Call(C_penpy, x, drop(y), penalties, s_loss_params, enpy_opts, list())
if (enpy_opts$num_threads > 1L && !isTRUE(.k_multithreading_support)) {
warn("Multithreading not supported. Using only 1 core.")
}
lapply(res, function (res) {
if (!is.null(res$metrics)) {
class(res$metrics) <- 'nsoptim_metrics'
}
return(res)
optional_args <- list()
if (!missing(penalty_loadings) && !is.null(penalty_loadings)) {
checked_pls <- .prepare_penalty_loadings(penalty_loadings, x, alpha, sparse = sparse, stop_all_infinite = TRUE)
optional_args$penalty_loadings <- checked_pls$loadings
restore_coef_length <- checked_pls$restore_fun
x <- checked_pls$trimmed_x
} else {
restore_coef_length <- function (coef) coef
optional_args$penalty_loadings <- NULL
}
lambda <- sort(.as(lambda, 'numeric'), decreasing = TRUE)
penalties <- lapply(lambda, function (lambda) { list(alpha = alpha, lambda = lambda) })
s_loss_params <- list(mscale = .full_mscale_algo_options(bdp = bdp, cc = cc, mscale_opts = mscale_opts),
intercept = intercept)
res <- .Call(C_penpy, x, drop(y), penalties, s_loss_params, enpy_opts, optional_args)
res <- lapply(res, function (lambda_res) {
lapply(.metrics_attrib(lambda_res$estimates, lambda_res$metrics), function (est) {
structure(restore_coef_length(est), class = c('shared_starting_point', 'starting_point'))
})
})
structure(unlist(res, recursive = FALSE, use.names = FALSE), class = c('enpy_starting_points', 'starting_points'))
}
## Make a list of penalties and ensure that `alpha` and `lambdas` are of
## correct type and order.
make_penalties <- function (alpha, lambdas) {
lapply(sort(as.numeric(lambdas), decreasing = TRUE), function (lambda) {
list(alpha = alpha, lambda = lambda)
})
#' Principal Sensitivity Components
#'
#' Compute Principal Sensitivity Components for Elastic Net Regression
#'
#' @inheritParams pense
#' @param en_algorithm_opts options for the LS-EN algorithm. See [en_algorithm_options] for details.
#' @param method defunct. PSCs are always computed for EN estimates. For the PY procedure for unpenalized estimation
#' use package [pyinit](https://cran.r-project.org/package=pyinit).
#'
#' @return a list of principal sensitivity components, one per element in `lambda`. Each PSC is itself a list
#' with items `lambda`, `alpha`, and `pscs`.
#'
#' @export
#'
#' @references Cohen Freue, G.V.; Kepplinger, D.; Salibián-Barrera, M.; Smucler, E.
#' Robust elastic net estimators for variable selection and identification of proteomic biomarkers.
#' *Ann. Appl. Stat.* **13** (2019), no. 4, 2065--2090 <https://doi.org/10.1214/19-AOAS1269>
#' @references Pena, D., and Yohai, V.J.
#' A Fast Procedure for Outlier Diagnostics in Large Regression Problems.
#' *J. Amer. Statist. Assoc.* **94** (1999). no. 446, 434--445. <https://doi.org/10.2307/2670164>
#'
#' @importFrom lifecycle deprecate_warn deprecate_stop deprecated is_present
#' @importFrom rlang abort
prinsens <- function (x, y, alpha, lambda, intercept = TRUE, penalty_loadings, en_algorithm_opts,
eps = 1e-6, sparse = FALSE, ncores = 1L, method = deprecated()) {
y <- .as(y, 'numeric')
x_dim <- dim(x)
if (length(y) != x_dim[[1L]]) {
abort("Number of observations in `x` and `y` does not match.")
} else if (x_dim[[2L]] <= 1L) {
abort("`x` must be a matrix with at least 1 column.")
}
if (is_present(method)) {
if (method == 'en') {
deprecate_warn('2.0.0', 'prinsens(method = )')
} else {
deprecate_stop('2.0.0', 'prinsens(method = )', details = 'For unpenalized estimates use the pyinit package.')
}
}
sparse <- .as(sparse[[1L]], 'logical')
if (missing(en_algorithm_opts)) {
en_algorithm_opts <- NULL
}
alpha <- .as(alpha[[1L]], 'numeric')
if (alpha < 0 || alpha > 1) {
abort("`alpha` is outside 0 and 1.")
} else if (alpha < sqrt(.Machine$double.eps)) {
alpha <- 0
}
en_algorithm_opts <- .select_en_algorithm(en_algorithm_opts, alpha, sparse, eps)
sparse <- en_algorithm_opts$sparse
optional_args <- list(intercept = .as(intercept[[1L]], 'logical'),
num_threads = max(1L, .as(ncores[[1L]], 'integer')))
if (!missing(penalty_loadings) && !is.null(penalty_loadings)) {
checked_pls <- .prepare_penalty_loadings(penalty_loadings, x, alpha, sparse = sparse, stop_all_infinite = TRUE)
optional_args$penalty_loadings <- checked_pls$loadings
x <- checked_pls$trimmed_x
} else {
optional_args$penalty_loadings <- NULL
}
lambda <- sort(.as(lambda, 'numeric'), decreasing = TRUE)
penalties <- lapply(lambda, function (lambda) { list(alpha = alpha, lambda = lambda) })
eps <- .as(eps[[1L]], 'numeric')
if (optional_args$num_threads > 1L && !isTRUE(.k_multithreading_support)) {
warn("Multithreading not supported. Using only 1 core.")
}
res <- .Call(C_pscs, x, drop(y), penalties, en_algorithm_opts, optional_args)
mapply(penalties, res, FUN = function (pen, pscs) {
pen$pscs <- pscs
return(pen)
}, SIMPLIFY = FALSE, USE.NAMES = FALSE)
}
This diff is collapsed.
#' Compute Penalized Elastic Net M-Estimates from PENSE
#'
#' This is a convenience wrapper around [pense_cv()] and [regmest_cv()], for the common use-case of computing
#' a highly-robust S-estimate followed by a more efficient M-estimate using the scale of the residuals from the
#' S-estimate.
#'
#' @param x either a numeric matrix of predictor values, or a cross-validated PENSE fit from [pense_cv()].
#' @importFrom lifecycle deprecate_warn
#' @export
pensem_cv <- function (x, ...) {
UseMethod('pensem_cv')
}
#' @description
#' [pensem()] is a deprecated alias for `pensem_cv()`.
#'
#' @rdname pensem_cv
#' @export
pensem <- function (x, ...) {
deprecate_warn('2.0.0', 'pensem()', 'pensem_cv()')
UseMethod('pensem_cv')
}
#'
#' @inheritParams pense
#' @param cc cutoff constant for Tukey's bisquare \eqn{\rho} function in the M-estimation objective function.
#' @param lambda_m,lambda_s optional user-supplied sequence of penalization levels for the S- and M-estimates.
#' If given and not `NULL`, `nlambda` and `lambda_min_ratio` are ignored for the respective estimate (S and/or M).
#' @template cv_params
#'
#' @return an object of cross-validated regularized M-estimates as returned from [regmest_cv()].
#'
#' @export
#'
#' @rdname pensem_cv
#' @importFrom rlang abort
#' @importFrom methods is
pensem_cv.default <- function (x, y, alpha = 0.5, nlambda = 50, lambda_min_ratio, lambda_m, lambda_s,
standardize = TRUE, penalty_loadings, intercept = TRUE, bdp = 0.25, ncores = 1,
sparse = FALSE, eps = 1e-6, cc = 4.7, cv_k = 5, cv_repl = 1, cl = NULL,
cv_metric = c('tau_size', 'mape', 'rmspe'), add_zero_based = TRUE,
explore_solutions = 10, explore_tol = 0.1, max_solutions = 10,
comparison_tol = sqrt(eps), algorithm_opts = mm_algorithm_options(),
mscale_opts = mscale_algorithm_options(), nlambda_enpy = 10, enpy_opts = enpy_options(),
...) {
if (is(x, 'pense_fit')) {
abort("A `pense()` fit can not be used for `pensem()`. Use `pense_cv()` instead.")
}
cl <- match.call(expand.dots = TRUE)
if (missing(cv_k)) {
cl$cv_k <- cv_k
}
## Compute the PENSE fit
pense_call <- cl
pense_call[[1L]] <- quote(pense::pense_cv)
pense_call$fit_all <- FALSE
pense_call$lambda_m <- NULL
if (!missing(lambda_s)) {
pense_call$lambda <- lambda_s
pense_call$lambda_s <- NULL
}
pense_fit <- eval.parent(pense_call)
## Compute the PENSEM fit
# Determine the scale of the residuals from the best PENSE fit
pense_coefs <- coef(pense_fit, lambda = 'min')
resids <- as.numeric(y - x %*% pense_coefs[-1L] - pense_coefs[[1L]])
resid_scale <- mscale(resids, bdp = bdp, opts = mscale_opts)
regmest_call <- cl
regmest_call[[1L]] <- quote(pense::regmest_cv)
regmest_call$fit_all <- TRUE
regmest_call$starting_points <- as_starting_point(pense_fit, lambda = 'min')
regmest_call$scale <- resid_scale
regmest_call$mscale_bdp <- bdp
regmest_call$lambda_s <- NULL
if (!missing(lambda_m)) {
regmest_call$lambda <- lambda_m
regmest_call$lambda_m <- NULL
}
regmest_fit <- eval.parent(regmest_call)
regmest_fit$initial <- pense_fit
return(regmest_fit)
}
#' @rdname pensem_cv
#'
#' @param scale initial scale estimate to use in the M-estimation. By default the S-scale from the PENSE fit is used.
#' @param x_train,y_train override arguments `x` and `y` as provided in the call to `pense_cv()`. This is useful if
#' the arguments in the `pense_cv()` call are not available in the current environment.
#' @export
#'
#' @seealso [pense_cv()] to compute the starting S-estimate.
pensem_cv.pense_cvfit <- function (x, scale, alpha, nlambda = 50, lambda_min_ratio, lambda_m,
standardize = TRUE, penalty_loadings, intercept = TRUE, bdp = 0.25, ncores = 1,
sparse = FALSE, eps = 1e-6, cc = 4.7, cv_k = 5, cv_repl = 1, cl = NULL,
cv_metric = c('tau_size', 'mape', 'rmspe'), add_zero_based = TRUE,
explore_solutions = 10, explore_tol = 0.1, max_solutions = 10,
comparison_tol = sqrt(eps), algorithm_opts = mm_algorithm_options(),
mscale_opts = mscale_algorithm_options(), x_train, y_train, ...) {
cl <- match.call(expand.dots = TRUE)
if (missing(cv_k)) {
cl$cv_k <- cv_k
}
# Extract information from pense fit
if (missing(alpha)) {
alpha <- x$estimates[[1L]]$alpha
}
if (missing(x_train)) {
x_train <- eval.parent(x$call$x)
}
if (missing(y_train)) {
y_train <- eval.parent(x$call$y)
}
## Compute the PENSEM fit
# Determine the scale of the residuals from the best PENSE fit
if (missing(scale)) {
pense_coefs <- coef(x, lambda = 'min')
resids <- as.numeric(y_train - x_train %*% pense_coefs[-1L] - pense_coefs[[1L]])
scale <- mscale(resids, bdp = bdp, opts = mscale_opts)
}
regmest_call <- cl
regmest_call[[1L]] <- quote(pense::regmest_cv)
regmest_call$fit_all <- TRUE
regmest_call$x <- x_train
regmest_call$y <- y_train
regmest_call$alpha <- alpha
regmest_call$starting_points <- as_starting_point(x, lambda = 'min')
regmest_call$scale <- scale
regmest_call$mscale_bdp <- bdp
regmest_call$lambda_s <- NULL
regmest_call$x_train <- NULL
regmest_call$y_train <- NULL
if (!missing(lambda_m)) {
regmest_call$lambda <- lambda_m
regmest_call$lambda_m <- NULL
}
regmest_fit <- eval.parent(regmest_call)
return(regmest_fit)
}
#' Plot Method for Penalized Estimates
#'
#' Plot the coefficient path for fitted penalized elastic net S- or LS-estimates of regression.
#'
#' @param x fitted estimates.
#' @param ... currently ignored.
#'
#' @example examples/pense_fit.R
#' @export
plot.pense_fit <- function (x, ...) {
.plot_coef_path(x, x$lambda, envir = parent.frame())
}
#' Plot Method for Penalized Estimates With Cross-Validation
#'
#' Plot the cross-validation performance or the coefficient path for fitted penalized elastic net S- or LS-estimates
#' of regression.
#'
#' @param x fitted estimates with cross-validation information.
#' @param what plot either the CV performance or the coefficient path.
#' @param se_mult if plotting CV performance, multiplier of the estimated SE.
#' @param ... currently ignored.
#'
#' @example examples/pense_fit.R
#' @export
plot.pense_cvfit <- function(x, what = c('cv', 'coef.path'), se_mult, ...) {
what <- match.arg(what)
if (what == 'coef.path' && isFALSE(x$call$fit_all)) {
stop("`x` was created with `fit_all = FALSE`. Coefficient path not available.")
} else if (!isTRUE(ncol(x$cv_replications) > 1L)) {
if (what == 'cv' && !missing(se_mult) && isTRUE(se_mult > 0)) {
warning("Only a single cross-validation replication was performed. Standard error not available.")
}
se_mult <- 0
}
if (missing(se_mult)) {
se_mult <- 2
}
se_sel <- .cv_se_selection(x$cvres$cvavg, x$cvres$cvse, se_mult)
lambda_opt <- x$cvres$lambda[[which(se_sel == 'se_fact')]]
switch(what, coef.path = .plot_coef_path(x, x$cvres$lambda, lambda_opt, envir = parent.frame()),
cv = .plot_cv_res(x, se_mult, se_sel))
}
#' @importFrom graphics plot segments abline
#' @importFrom rlang warn
.plot_cv_res <- function (object, se_mult, se_sel) {
measure_label <- switch(object$cv_measure, mape = "Median absolute prediction error",
rmspe = "Root mean square prediction error",
tau_size = expression(paste(tau, "-scale of the prediction error")),
"Prediction performance")
colors <- c('gray20', '#0072B2', '#56B4E9')
with(object$cvres, {
min_ind <- which(se_sel == 'min')
if (length(min_ind) == 0L) {
# minimum and SE rule coincide
min_ind <- which(se_sel == 'se_fact')
}
cols <- colors[as.integer(se_sel)]
xrange <- range(lambda)
# Ensure errorbars don't extend into the negative!
errorbar_ymin <- cvavg - se_mult * cvse
neg_errorbars <- which(errorbar_ymin <= 0)
if (length(neg_errorbars) > 0L) {
warn("Error bars extending into the negative range are truncated.")
if (length(neg_errorbars) < length(cvavg)) {
# There are errorbars that do don't extend to the negative. Use the smallest positive value.
errorbar_ymin[neg_errorbars] <- min(errorbar_ymin[-neg_errorbars])
} else {
# All errorbars extend to the negative. Use arbitrary value.
errorbar_ymin <- rep.int(cvavg * 0.95, length(errorbar_ymin))
}
}
plot(lambda, cvavg, log = 'xy', col = cols,
ylim = range(cvavg + se_mult * cvse, errorbar_ymin),
pch = 20L, xlab = expression(lambda), ylab = measure_label, main = "CV prediction performance")
if (isTRUE(se_mult > 0)) {
segments(lambda, errorbar_ymin, lambda, cvavg + se_mult * cvse, col = cols)
abline(h = cvavg[[min_ind]] + se_mult * cvse[[min_ind]], col = colors[[3L]], lty = '22')
}
})
}
#' @importFrom graphics plot lines text abline
.plot_coef_path <- function(object, lambda_seq, lambda_opt, envir) {
var_names <- tryCatch(colnames(eval(object$call$x, envir = envir)), error = function (e) NULL)
if (is.null(var_names)) {
var_names <- paste('X', seq_len(length(object$estimates[[1]]$beta)), sep = '')
}
active_vars <- do.call(rbind, lapply(object$estimates, function (est) {
actives <- .active_indices_and_values(est)
if (length(actives$var_index) > 0L) {
return(data.frame(.active_indices_and_values(est), lambda = est$lambda))
} else {
return(NULL)
}
}))
active_vars$var <- factor(var_names[active_vars$var_index],
levels = var_names[sort(unique(active_vars$var_index))])
var_colors <- (seq_len(nlevels(active_vars$var)) - 1L) %% 10L + 1L
names(var_colors) <- levels(active_vars$var)
lambda_seq <- rev(lambda_seq)
active_vars <- active_vars[order(active_vars$lambda), ]
plot(1, 0, xlim = range(lambda_seq), ylim = range(0, active_vars$value), type = "n", log = "x",
xlab = expression(lambda), ylab = "Coefficient estimate")
for (i in seq_len(nlevels(active_vars$var))) {
sel_var <- levels(active_vars$var)[[i]]
with(active_vars, {
act_values <- value[var == sel_var]
act_lambda <- lambda[var == sel_var]
matching_lambdas <- match(act_lambda, lambda_seq)
lambda_bound <- range(matching_lambdas, na.rm = TRUE)
pad_0s <- 0L
if (lambda_bound[[1L]] > 1L) {
act_lambda <- c(act_lambda, lambda_seq[[lambda_bound[[1L]] - 1L]], min(lambda))
pad_0s <- 2L
}
if (lambda_bound[[2L]] < length(lambda_seq)) {
act_lambda <- c(act_lambda, lambda_seq[[lambda_bound[[2L]] + 1L]], max(lambda_seq))
pad_0s <- pad_0s + 2L
}
lines(x = act_lambda, y = c(act_values, numeric(pad_0s)), col = var_colors[[sel_var]])
text(x = min(act_lambda), y = act_values[which.min(act_lambda)], labels = sel_var,
adj = 1, pos = 2, cex = 0.9, col = "#4f4f4f")
})
}
if (!missing(lambda_opt)) {
abline(v = lambda_opt, lty = 2, col = "#4f4f4f")
}
}
#' @importFrom methods is
.active_indices_and_values <- function (est) {
if (is(est$beta, 'dsparseVector')) {
return(list(var_index = est$beta@i, value = est$beta@x))
}
indices <- which(abs(est$beta) > .Machine$double.eps)
return(list(var_index = indices, value = est$beta[indices]))
}
#' Print Metrics
#'
#' Pretty-print a list of metrices from the optimization algorithm.
#' Pretty-print a list of metrics from optimization algorithm (if `pense` was built with metrics enabled).
#'
#' @param x metrics object for printing.
#' @param max_level maximum level of printing which is applied for printing
#' nested metrics.
#' @param max_level maximum level of printing which is applied for printing nested metrics.
#' @keywords internal
#' @export
#' @method print nsoptim_metrics
print.nsoptim_metrics <- function (x, max_level = NA, ...) {
.print_metrics(x, max_level, '')
invisible(NULL)
......@@ -37,4 +37,4 @@ print.nsoptim_metrics <- function (x, max_level = NA, ...) {
prefix = paste0(prefix, ' '))
}
invisible(NULL)
}
\ No newline at end of file
}
This diff is collapsed.
#' Predict Method for PENSE Fits
#'
#' Predict response values using a PENSE (or LS-EN) regularization path fitted by [pense()] or [elnet()].
#'
#' @param object PENSE regularization path to extract residuals from.
#' @param newdata an optional matrix of new predictor values. If missing, the fitted values are computed.
#' @param lambda a single value of the penalty parameter.
#' @param exact defunct Always gives a warning if `lambda` is not part of the fitted sequence and coefficients
#' need to be interpolated.
#' @param correction defunct.
#' @param ... currently not used.
#' @return a numeric vector of residuals for the given penalization level.
#'
#' @importFrom Matrix drop
#' @importFrom lifecycle deprecate_warn deprecated is_present
#' @importFrom rlang abort
#'
#' @example examples/residuals.R
#' @export
predict.pense_fit <- function(object, newdata, lambda, exact = deprecated(), correction = deprecated(), ...) {
if (is_present(exact)) {
deprecate_warn('2.0.0', 'residuals(exact=)')
}
if (is_present(correction)) {
deprecate_stop('2.0.0', 'residuals(correction=)')
}
coef_est <- coef(object, lambda = lambda, concat = FALSE)
if (missing(newdata)) {
newdata <- eval.parent(object$call$x)
} else {
newdata <- data.matrix(newdata)
x_dim <- dim(newdata)
if (x_dim[[2L]] != length(coef_est$beta)) {
abort(sprintf("`newdata` is of invalid size (contains %d predictors instead of the required %d)",
x_dim[[2L]], length(coef_est$beta)))
}
}
return(drop(newdata %*% coef_est$beta) + coef_est$intercept)
}
#' Predict Method for PENSE Fits
#'
#' Predict response values using a PENSE (or LS-EN) regularization path with hyper-parameters chosen by
#' cross-validation.
#'
#' If `lambda = "se"` and `object` contains fitted estimates for every penalization level in the sequence, extract the
#' residuals of the most parsimonious model with prediction performance statistically indistinguishable from the best
#' model. This is determined to be the model with prediction performance within `se_mult * cv_se` from the best model.
#'
#' @param object PENSE with cross-validated hyper-parameters to extract coefficients from.
#' @param newdata an optional matrix of new predictor values. If missing, the fitted values are computed.
#' @param lambda either a string specifying which penalty level to use or a a single numeric value of the penalty
#' parameter. See details.
#' @param se_mult If `lambda = "se"`, the multiple of standard errors to tolerate.
#' @param exact deprecated. Always gives a warning if `lambda` is not part of the fitted sequence and coefficients
#' are interpolated.
#' @param correction defunct.
#' @param ... currently not used.
#' @return a numeric vector of residuals for the given penalization level.
#'
#' @importFrom Matrix drop
#' @importFrom lifecycle deprecate_warn deprecated is_present
#' @importFrom rlang warn
#'
#' @example examples/residuals.R
#' @export
predict.pense_cvfit <- function(object, newdata, lambda = c('min', 'se'), se_mult = 2, exact = deprecated(),
correction = deprecated(), ...) {
if (is_present(exact)) {
deprecate_warn('2.0.0', 'coef(exact=)')
}
if (is_present(correction)) {
deprecate_stop('2.0.0', 'coef(correction=)')
}
if (is.character(lambda)) {
lambda <- match.arg(lambda)
}
coef_est <- coef(object, lambda = lambda, se_mult = se_mult, concat = FALSE)
if (missing(newdata)) {
newdata <- eval.parent(object$call$x)
} else {
newdata <- data.matrix(newdata)
x_dim <- dim(newdata)
if (x_dim[[2L]] != length(coef_est$beta)) {
abort(sprintf("`newdata` is of invalid size (contains %d predictors instead of the required %d)",
x_dim[[2L]], length(coef_est$beta)))
}
}
return(drop(newdata %*% coef_est$beta) + coef_est$intercept)
}
#' Extract Residuals
#'
#' Extract residuals from a PENSE (or LS-EN) regularization path fitted by [pense()] or [elnet()].
#'
#' @param object PENSE regularization path to extract residuals from.
#' @param lambda a single value of the penalty parameter.
#' @param exact defunct Always gives a warning if `lambda` is not part of the fitted sequence and coefficients
#' need to be interpolated.
#' @param correction defunct.
#' @param ... currently not used.
#' @return a numeric vector of residuals for the given penalization level.
#'
#' @example examples/residuals.R
#' @export
residuals.pense_fit <- function(object, lambda, exact = deprecated(), correction = deprecated(), ...) {
train_y <- eval.parent(object$call$y)
cl <- match.call(expand.dots = FALSE)
cl[[1L]] <- quote(pense:::predict.pense_fit)
return(train_y - eval.parent(cl))
}
#' Extract Residuals
#'
#' Extract residuals from a PENSE (or LS-EN) regularization path with hyper-parameters chosen by cross-validation.
#'
#' If `lambda = "se"` and `object` contains fitted estimates for every penalization level in the sequence, extract the
#' residuals of the most parsimonious model with prediction performance statistically indistinguishable from the best
#' model. This is determined to be the model with prediction performance within `se_mult * cv_se` from the best model.
#'
#' @param object PENSE with cross-validated hyper-parameters to extract coefficients from.
#' @param lambda either a string specifying which penalty level to use or a a single numeric value of the penalty
#' parameter. See details.
#' @param se_mult If `lambda = "se"`, the multiple of standard errors to tolerate.
#' @param exact deprecated. Always gives a warning if `lambda` is not part of the fitted sequence and coefficients
#' are interpolated.
#' @param correction defunct.
#' @param ... currently not used.
#' @return a numeric vector of residuals for the given penalization level.
#'
#' @example examples/residuals.R
#' @export
residuals.pense_cvfit <- function(object, lambda = c('min', 'se'), se_mult = 2, exact = deprecated(),
correction = deprecated(), ...) {
train_y <- eval.parent(object$call$y)
cl <- match.call(expand.dots = FALSE)
cl[[1L]] <- quote(pense:::predict.pense_cvfit)
return(train_y - eval.parent(cl))
}
This diff is collapsed.
This diff is collapsed.
#!/bin/sh
rm -Rf autom4te.cache
rm config.log
rm config.status
rm src/autoconfig.hpp
rm R/autoconfig.R
rm src/Makevars
rm src/*.o
rm src/*.so
This diff is collapsed.
AC_INIT(pense, 2.0.0)
AC_PREREQ(2.62)
: ${R_HOME=`R RHOME`}
if test -z "${R_HOME}"; then
echo "could not determine R_HOME"
exit 1
fi
RBIN="${R_HOME}/bin/R"
CC=`"${RBIN}" CMD config CC`
CPP="$CC -E"
CXX=`"${RBIN}" CMD config CXX`
CXXCPP="$CXX -E"
CXXFLAGS=`"${RBIN}" CMD config CXXFLAGS`
CPPFLAGS=`"${RBIN}" CMD config CPPFLAGS`
LDFLAGS=`"${RBIN}" CMD config LDFLAGS`
AC_PROG_CC
AC_PROG_CPP
AC_PROG_CXX
################################################################################
##
## Check for necessary support by C++ compiler
##
################################################################################
AC_LANG(C++)
## Check for strict aliasing support
STRICT_ALIASING_FLAG="-fstrict-aliasing"
AC_MSG_CHECKING([whether _AC_LANG compiler accepts $STRICT_ALIASING_FLAG])
ax_check_save_flags=$[]_AC_LANG_PREFIX[]FLAGS
_AC_LANG_PREFIX[]FLAGS="$[]_AC_LANG_PREFIX[]FLAGS -Werror $STRICT_ALIASING_FLAG -Wstrict-aliasing"
AC_COMPILE_IFELSE([AC_LANG_PROGRAM()],
[AC_MSG_RESULT([yes])],
[
AC_MSG_RESULT([no])
STRICT_ALIASING_FLAG=""
])
_AC_LANG_PREFIX[]FLAGS=$ax_check_save_flags
AC_SUBST(STRICT_ALIASING_FLAG)
## Check for OpenMP support
R_HAVE_OPENMP='FALSE'
AC_OPENMP
# Some systems have broken OMP libraries, so also check that the actual package will work
ac_pkg_openmp_cxx=no
AC_MSG_CHECKING([C++ support for OpenMP])
AC_LANG_CONFTEST([
AC_LANG_PROGRAM([[#include <omp.h>]], [[ return omp_get_num_threads (); ]])
])
PKG_CFLAGS="${OPENMP_CXXFLAGS}" PKG_LIBS="${OPENMP_CXXFLAGS}" "$RBIN" CMD SHLIB conftest.c 1>&AS_MESSAGE_LOG_FD 2>&AS_MESSAGE_LOG_FD && \
"$RBIN" --vanilla -q -e "dyn.load(paste('conftest',.Platform\$dynlib.ext,sep=''))" 1>&AS_MESSAGE_LOG_FD 2>&AS_MESSAGE_LOG_FD && ac_pkg_openmp_cxx=yes
AC_MSG_RESULT([${ac_pkg_openmp_cxx}])
# Check if OpenMP supports tasks, reductions, and if constant member variables need to be shared
LDFLAGS="$LDFLAGS $OPENMP_CXXFLAGS"
ax_check_save_flags="$LDFLAGS"
if test "${ac_pkg_openmp_cxx}" = yes; then
AC_MSG_CHECKING([OpenMP support for tasks])