Commit aede43e4 authored by davidkep's avatar davidkep

update docs

parent 479404f0
......@@ -929,7 +929,7 @@ EXCLUDE_SYMLINKS = NO
# Note that the wildcards are matched against the file with absolute path, so to
# exclude all test directories for example use the pattern */test/*
EXCLUDE_PATTERNS =
EXCLUDE_PATTERNS = *__* # Exclude paths which contain a double underscore.
# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names
# (namespaces, classes, functions, etc.) that should be excluded from the
......
......@@ -13,7 +13,7 @@ General Optimizer interface
``nsoptim::Optimizer``
======================
.. cpp:class:: template<typename _LossFunction, typename _PenaltyFunction, typename _Coefficients> class Optimizer
.. cpp:class:: template<typename _LossFunction, typename _PenaltyFunction, typename _Coefficients> Optimizer
Base class for all optimizers. The only use of this class is that it ensures that ``_LossFunction`` is a valid loss function class, ``_PenaltyFunction`` is a valid penalty function class, and that both of them can handle coefficient
of type ``_Coefficients``.
......@@ -37,6 +37,17 @@ General Optimizer interface
Optimum type as returned by this optimizer.
.. rubric:: Public methods
.. cpp:function:: Optimum Optimize()
Find an optimum of the objective function.
.. cpp:function:: Optimum Optimize(const Coefficients& start)
Find an optimum of the objective function, starting the optimization at *start*.
************
MM Optimizer
************
......@@ -54,7 +65,7 @@ The MM optimizer requires an *inner optimizer*, i.e., an optimizer which can opt
The MM optimizer has several configuration parameters that are set on construction by supplying a :cpp:class:`nsoptim::MMConfiguration` object.
.. _ref-optim-mm-convex-surrogate::
.. _ref-optim-mm-convex-surrogate:
Convex Surrogate
================
......@@ -78,7 +89,7 @@ Penalty functions can provide a similar member to return a convex surrogate.
Linearized Alternative Direction Method of Moments (ADMM) Optimizer
*******************************************************************
* Supported loss functions: :cpp:class:`LsLoss`, :cpp:class:`WeightedLsLoss`
* Supported loss functions: :cpp:class:`LsRegressionLoss`, :cpp:class:`WeightedLsRegressionLoss`
* Supported penalty functions: :cpp:class:`EnPenalty`, :cpp:class:`AdaptiveEnPenalty`
Linearized ADMM works for objective functions that can be written as :math:`l(A x) + p(x)` and solves the problem
......@@ -134,7 +145,7 @@ Proximal Operator
Alternative Direction Method of Moments (ADMM) Optimizer with Variable Step-Size
********************************************************************************
* Supported loss functions: :cpp:class:`LsLoss`, :cpp:class:`WeightedLsLoss`
* Supported loss functions: :cpp:class:`LsRegressionLoss`, :cpp:class:`WeightedLsRegressionLoss`
* Supported penalty functions: :cpp:class:`EnPenalty`, :cpp:class:`AdaptiveEnPenalty`
This implementation operates directly on the objective function :math:`l(x) + p(x)`, but adjusts the step size
......
......@@ -20,8 +20,8 @@
namespace nsoptim {
namespace ls_loss {
//! Compute the minimum of the 1- and infinity norm of the weighted matrix W X, where W is a diagonal matrix with
//! entries `sqrt_weights`.
//! Compute the minimum of the 1- and infinity norm of the weighted matrix :math:`W X`, where :math:`W` is a diagonal
//! matrix with entries `sqrt_weights`.
inline double TwoNormUpper(const arma::mat& x, const arma::vec& sqrt_weights) {
double norm_1 = 0, norm_inf = 0;
......@@ -49,6 +49,7 @@ inline double TwoNormUpper(const arma::mat& x, const arma::vec& sqrt_weights) {
} // namespace ls_loss
//! A regression loss function implementing the weighted least-squares loss defined as
//!
//! 1/(2n) * sum_{i = 1}^n w_i * (y_i - \hat{\mu} - x_i' . \beta)^2
class WeightedLsLoss : public LossFunction<PredictorResponseData>,
public ConvexFunction<WeightedLsLoss> {
......
......@@ -162,6 +162,8 @@ class LsProximalOperator {
using LossFunction = LsLoss;
//! Initialize the proximal operator with fixed step size `1 / tau`.
//!
//! @param tau Fixed stepsize. If negative (the default), compute the step-size based on the data.
explicit LsProximalOperator(const double tau = -1) noexcept : config_tau_(tau) {}
//! Set the loss function for the proximal operator.
......@@ -174,11 +176,11 @@ class LsProximalOperator {
//! Compute the proximal operator `v` for the given input parameters.
//!
//! @param u
//! @param v_prev ignored.
//! @param intercept
//! @param lambda
//! @param metrics optional metrics object to collect metrics of the proximal operator
//! @param u Current value of the other ADMM coefficient.
//! @param v_prev Previous value of the input coefficient. Ignored.
//! @param intercept Current value of the intercept.
//! @param lambda Scaling factor for the proximal operator.
//! @param metrics Optional metrics object to collect metrics of the proximal operator.
inline arma::vec operator()(const arma::vec& u, const arma::vec&, const double intercept, const double lambda,
Metrics * const = nullptr) {
return this->operator()(u, intercept, lambda);
......@@ -186,10 +188,10 @@ class LsProximalOperator {
//! Compute the proximal operator `v` for the given input parameters.
//!
//! @param u
//! @param intercept
//! @param lambda
//! @param metrics optional metrics object to collect metrics of the proximal operator
//! @param u Current value of the other ADMM coefficient.
//! @param intercept Current value of the intercept.
//! @param lambda Scaling factor for the proximal operator.
//! @param metrics Optional metrics object to collect metrics of the proximal operator.
inline arma::vec operator()(const arma::vec& u, const double intercept, const double lambda,
Metrics * const = nullptr) {
const int n = loss_->data().n_obs();
......@@ -202,14 +204,18 @@ class LsProximalOperator {
}
//! Compute the intercept.
//!
//! @param fitted Current fitted values.
//! @return The intercept minimizing the objective for the current fitted values.
inline double ComputeIntercept(const arma::vec& fitted) const noexcept {
return arma::mean(loss_->data().cy() - fitted);
}
//! Compute the step size for the currently set loss.
//!
//! @param penalty the current penalty value.
//! @return the loss-specific step size.
//! @param penalty Current penalty.
//! @param norm_x Norm of the predictor matrix.
//! @return Loss-specific step size.
double StepSize(const EnPenalty& penalty, const double norm_x) const {
if (config_tau_ < 0) {
const PredictorResponseData& data = loss_->data();
......@@ -230,8 +236,9 @@ class LsProximalOperator {
//! Compute the step size for the currently set loss.
//!
//! @param penalty the current penalty value.
//! @return the loss-specific step size.
//! @param penalty Current penalty.
//! @param norm_x Norm of the predictor matrix.
//! @return Loss-specific step size.
double StepSize(const AdaptiveEnPenalty& penalty, const double norm_x) const {
if (config_tau_ < 0) {
const PredictorResponseData& data = loss_->data();
......@@ -252,16 +259,18 @@ class LsProximalOperator {
//! Compute the step size for the currently set loss.
//!
//! @param penalty the current penalty value.
//! @return the loss-specific step size.
//! @param penalty Current penalty.
//! @param norm_x Norm of the predictor matrix.
//! @return Loss-specific step size.
inline double StepSize(const RidgePenalty& penalty, const double norm_x) const {
return StepSize(EnPenalty(penalty), norm_x);
}
//! Compute the step size for the currently set loss.
//!
//! @param penalty the current penalty value.
//! @return the loss-specific step size.
//! @param penalty Current penalty.
//! @param norm_x Norm of the predictor matrix.
//! @return Loss-specific step size.
inline double StepSize(const LassoPenalty& penalty, const double norm_x) const {
return StepSize(EnPenalty(penalty), norm_x);
}
......@@ -277,6 +286,8 @@ class WeightedLsProximalOperator {
using LossFunction = WeightedLsLoss;
//! Initialize the proximal operator with fixed step size `1 / tau`.
//!
//! @param tau Fixed stepsize. If negative (the default), compute the step-size based on the data.
explicit WeightedLsProximalOperator(const double tau = -1) noexcept : config_tau_(tau) {}
//! Set the loss function for the proximal operator.
......@@ -290,11 +301,11 @@ class WeightedLsProximalOperator {
//! Compute the proximal operator `v` for the given input parameters.
//!
//! @param u
//! @param v_prev ignored.
//! @param intercept
//! @param lambda
//! @param metrics optional metrics object to collect metrics of the proximal operator
//! @param u Current value of the other ADMM coefficient.
//! @param v_prev Previous value of the input coefficient. Ignored.
//! @param intercept Current value of the intercept.
//! @param lambda Scaling factor for the proximal operator.
//! @param metrics Optional metrics object to collect metrics of the proximal operator.
inline arma::vec operator()(const arma::vec& u, const arma::vec&, const double intercept, const double lambda,
Metrics * const = nullptr) const {
return this->operator()(u, intercept, lambda);
......@@ -302,10 +313,10 @@ class WeightedLsProximalOperator {
//! Compute the proximal operator `v` for the given input parameters.
//!
//! @param u
//! @param intercept
//! @param lambda
//! @param metrics optional metrics object to collect metrics of the proximal operator
//! @param u Current value of the other ADMM coefficient.
//! @param intercept Current value of the intercept.
//! @param lambda Scaling factor for the proximal operator.
//! @param metrics Optional metrics object to collect metrics of the proximal operator.
inline arma::vec operator()(const arma::vec& u, const double intercept, const double lambda,
Metrics * const = nullptr) const {
const auto n = loss_->data().n_obs();
......@@ -318,14 +329,18 @@ class WeightedLsProximalOperator {
}
//! Compute the intercept.
//!
//! @param fitted Current fitted values.
//! @return The intercept minimizing the objective for the current fitted values.
inline double ComputeIntercept(const arma::vec& fitted) const noexcept {
return arma::mean((loss_->data().cy() - fitted) % weights_) / loss_->mean_weight();
}
//! Compute the step size for the currently set loss.
//!
//! @param penalty the current penalty value.
//! @return the loss-specific step size.
//! @param penalty Current penalty.
//! @param norm_x Norm of the predictor matrix.
//! @return Loss-specific step size.
double StepSize(const EnPenalty& penalty, const double norm_x) const {
if (config_tau_ < 0) {
const PredictorResponseData& data = loss_->data();
......@@ -345,8 +360,9 @@ class WeightedLsProximalOperator {
//! Compute the step size for the currently set loss.
//!
//! @param penalty the current penalty value.
//! @return the loss-specific step size.
//! @param penalty Current penalty.
//! @param norm_x Norm of the predictor matrix.
//! @return Loss-specific step size.
double StepSize(const AdaptiveEnPenalty& penalty, const double norm_x) const {
if (config_tau_ < 0) {
const PredictorResponseData& data = loss_->data();
......@@ -366,16 +382,18 @@ class WeightedLsProximalOperator {
//! Compute the step size for the currently set loss.
//!
//! @param penalty the current penalty value.
//! @return the loss-specific step size.
//! @param penalty Current penalty.
//! @param norm_x Norm of the predictor matrix.
//! @return Loss-specific step size.
inline double StepSize(const RidgePenalty& penalty, const double norm_x) const {
return StepSize(EnPenalty(penalty), norm_x);
}
//! Compute the step size for the currently set loss.
//!
//! @param penalty the current penalty value.
//! @return the loss-specific step size.
//! @param penalty Current penalty.
//! @param norm_x Norm of the predictor matrix.
//! @return Loss-specific step size.
inline double StepSize(const LassoPenalty& penalty, const double norm_x) const {
return StepSize(EnPenalty(penalty), norm_x);
}
......
......@@ -20,13 +20,12 @@ namespace nsoptim {
namespace traits {
namespace internal {
//! Test if the loss function T uses data type U
template<typename>
template<typename, typename>
static auto test_loss_supports_data(double) -> std::false_type;
//! Test if the loss function T uses data type U
template<typename T>
static auto test_loss_supports_data(int) -> sfinae_method_type<decltype(std::declval<T>().data()),
typename T::DataType>;
template<typename T, typename U>
static auto test_loss_supports_data(int) -> sfinae_method_type<decltype(std::declval<T>().data()), U>;
//! Test if the loss function T can create a "zero" coefficient object of type U.
template<typename, typename>
......@@ -40,8 +39,8 @@ static auto test_loss_supports_zero_coefs(int) -> sfinae_method_type<
//! Type trait for a loss functions.
//! Tests if the given type implements the LossFunction interface.
template<typename T>
struct has_data_member : decltype(internal::test_loss_supports_data<T>(0))::type {};
template<typename T, typename U = typename T::DataType>
struct has_data_member : decltype(internal::test_loss_supports_data<T, U>(0))::type {};
//! Type trait if a loss function supports evaluation of the coefficient type `U`.
template<typename T, typename U>
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment