Commit 479404f0 authored by davidkep's avatar davidkep

remove unneeded Copy/Clone methods

parent c5a467c2
......@@ -39,14 +39,6 @@ class AdaptiveEnPenalty : public PenaltyFunction, public ConvexFunction<Adaptive
~AdaptiveEnPenalty() = default;
AdaptiveEnPenalty Copy() {
return AdaptiveEnPenalty(*this);
}
AdaptiveEnPenalty Clone() const {
return AdaptiveEnPenalty(*this);
}
void lambda(const double lambda) noexcept {
lambda_ = lambda;
}
......@@ -131,14 +123,6 @@ class AdaptiveLassoPenalty : public PenaltyFunction, public ConvexFunction<Adapt
~AdaptiveLassoPenalty() = default;
AdaptiveLassoPenalty Copy() {
return AdaptiveLassoPenalty(*this);
}
AdaptiveLassoPenalty Clone() const {
return AdaptiveLassoPenalty(*this);
}
void alpha(const double) const noexcept {}
double alpha() const noexcept {
......
......@@ -38,14 +38,6 @@ class EnPenalty : public PenaltyFunction, public ConvexFunction<EnPenalty> {
~EnPenalty() = default;
EnPenalty Copy() noexcept {
return EnPenalty(*this);
}
EnPenalty Clone() const noexcept {
return EnPenalty(*this);
}
operator AdaptiveEnPenalty() const {
return AdaptiveEnPenalty(std::make_shared<const arma::vec>(), alpha_, lambda_);
}
......@@ -122,14 +114,6 @@ class LassoPenalty : public PenaltyFunction, public ConvexFunction<LassoPenalty>
~LassoPenalty() = default;
LassoPenalty Copy() noexcept {
return LassoPenalty(*this);
}
LassoPenalty Clone() const noexcept {
return LassoPenalty(*this);
}
double alpha() const noexcept {
return 1.;
}
......@@ -209,14 +193,6 @@ class RidgePenalty : public PenaltyFunction, public ConvexFunction<RidgePenalty>
~RidgePenalty() = default;
RidgePenalty Copy() noexcept {
return RidgePenalty(*this);
}
RidgePenalty Clone() const noexcept {
return RidgePenalty(*this);
}
double alpha() const noexcept {
return 0.;
}
......
......@@ -141,13 +141,6 @@ class WeightedLsLoss : public LossFunction<PredictorResponseData>,
weighted_pred_norm_ * arma::norm(x.beta - y.beta, 2);
}
//! Clone the weighted LS loss function. The returned object does not share anything with this weighted LS loss.
//!
//! @return a deep copy of this weighted LS loss function.
WeightedLsLoss Clone() const {
return WeightedLsLoss(*this, std::true_type{});
}
//! Evaluate the gradient of the weighted LS loss at the given coefficient value.
//!
//! The gradient of the LS loss is given by
......@@ -205,14 +198,6 @@ class WeightedLsLoss : public LossFunction<PredictorResponseData>,
return include_intercept_;
}
protected:
WeightedLsLoss(const WeightedLsLoss& other, std::true_type) noexcept :
include_intercept_(other.include_intercept_),
data_(std::make_shared<const PredictorResponseData>(*other.data_)),
mean_weight_(other.mean_weight_),
sqrt_weights_(std::make_shared<const arma::vec>(*other.sqrt_weights_)),
weighted_pred_norm_(other.weighted_pred_norm_) {}
private:
bool include_intercept_;
std::shared_ptr<const PredictorResponseData> data_;
......@@ -311,13 +296,6 @@ class LsLoss : public LossFunction<PredictorResponseData>,
pred_norm_ * arma::norm(x.beta - y.beta, 2);
}
//! Clone the weighted LS loss function. The returned object does not share anything with this weighted LS loss.
//!
//! @return a deep copy of this weighted LS loss function.
LsLoss Clone() const {
return LsLoss(*this);
}
//! Evaluate the gradient of the weighted LS loss at the given coefficient value.
//!
//! The gradient of the LS loss is given by
......
......@@ -565,19 +565,6 @@ class GenericLinearizedAdmmOptimizer : public Optimizer<typename ProxOp::LossFun
~GenericLinearizedAdmmOptimizer() = default;
GenericLinearizedAdmmOptimizer Clone() const {
if (!loss_ || !penalty_) {
return GenericLinearizedAdmmOptimizer();
}
GenericLinearizedAdmmOptimizer clone(loss_->Clone(), penalty_->Clone(), prox_, config_);
return clone;
}
GenericLinearizedAdmmOptimizer Copy() {
return GenericLinearizedAdmmOptimizer(*this);
}
void Reset() {
state_.v.reset();
}
......@@ -877,19 +864,6 @@ class AdmmVarStepOptimizer : public Optimizer<LossFunction, PenaltyFunction, Coe
~AdmmVarStepOptimizer() = default;
AdmmVarStepOptimizer Clone() const {
if (!loss_ || !penalty_) {
return AdmmVarStepOptimizer();
}
AdmmVarStepOptimizer clone(loss_->Clone(), penalty_->Clone());
return clone;
}
AdmmVarStepOptimizer Copy() {
return AdmmVarStepOptimizer(*this);
}
void Reset() {
state_.gap = -1;
data_ = nullptr;
......
......@@ -81,21 +81,6 @@ class AugmentedRidgeOptimizer : public Optimizer<LossFunction, RidgePenalty, Reg
~AugmentedRidgeOptimizer() = default;
AugmentedRidgeOptimizer Clone() const {
if (!loss_ || !penalty_) {
return AugmentedRidgeOptimizer();
}
AugmentedRidgeOptimizer clone(loss_->Clone(), penalty_->Clone());
clone.mean_x_ = mean_x_;
clone.mean_y_ = mean_y_;
return clone;
}
AugmentedRidgeOptimizer Copy() {
return AugmentedRidgeOptimizer(*this);
}
void Reset() {}
LossFunction& loss() const {
......
......@@ -92,22 +92,6 @@ class DalEnOptimizer : public Optimizer<LossFunction, PenaltyFunction, Regressio
DalEnOptimizer(DalEnOptimizer&& other) = default;
DalEnOptimizer& operator=(DalEnOptimizer&& other) = default;
//! Create a deep copy of the DAL optimizer, i.e., the state is lost and no data is shared between the original
//! and the returned object.
//!
//! @return an independent clone of this DAL optimizer.
DalEnOptimizer Clone() const {
return DalEnOptimizer(*this, std::true_type{});
}
//! Create a (shallow) copy of the DAL optimizer, including it's state and some data may be shared between the
//! original and the returned object.
//!
//! @return a shallow copy of this DAL optimizer.
DalEnOptimizer Copy() {
return DalEnOptimizer(*this);
}
//! Reset the optimizier. This compeletely purges the current *state*.
void Reset() {
eta_.nxlambda = -1;
......@@ -354,16 +338,6 @@ class DalEnOptimizer : public Optimizer<LossFunction, PenaltyFunction, Regressio
using SoftthresholdCutoffType = typename std::conditional<traits::is_adaptive<PenaltyFunction>::value,
arma::vec, double>::type;
//! Private constructor that *clones* the given other optimizer.
DalEnOptimizer(const DalEnOptimizer& other, std::true_type) noexcept
: config_(other.config_),
loss_(other.loss_ ? LossFunctionPtr(new LossFunction(other.loss_->Clone())) : nullptr),
penalty_(other.penalty_ ? PenaltyFunctionPtr(new PenaltyFunction(other.penalty_->Clone())) :
nullptr),
coefs_(other.coefs_),
data_(loss_.get()), hessian_(other.hessian_, data_),
eta_(), convergence_tolerance_(other.convergence_tolerance_) {}
//! Minimize the AL function `phi`.
//!
//! @param softthr_cutoff the cutoff value(s) for the soft-thresholding function.
......
......@@ -278,33 +278,6 @@ class MMOptimizer : public Optimizer<LossFunction, PenaltyFunction, Coefficients
~MMOptimizer() = default;
//! Create a deep copy of the MM algorithm, i.e., the state is lost and no data is shared between the original
//! and the returned object.
//!
//! @return an independent clone of this MM algorithm.
MMOptimizer Clone() const {
MMOptimizer clone(config_);
if (loss_) {
clone.loss(*loss_);
}
if (penalty_) {
clone.penalty(*penalty_);
}
clone.optimizer_ = optimizer_.Clone();
clone.inner_convergence_tolerance_ = inner_convergence_tolerance_;
clone.coefs_ = coefs_;
clone.convergence_tolerance_ = convergence_tolerance_;
return clone;
}
//! Create a (shallow) copy of the MM algorithm, including it's state and some data may be shared between the
//! original and the returned object.
//!
//! @return a shallow copy of this MM algorithm.
MMOptimizer Copy() {
return MMOptimizer(*this);
}
//! Reset the optimizier. This compeletely purges the current *state*.
void Reset() {
coefs_.Reset();
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment