Commit fbb3a219 authored by davidkep's avatar davidkep

Improve the linearized ADMM optimizer for weighted LS

parent dd82d05d
......@@ -9,6 +9,8 @@
#ifndef NSOPTIM_OBJECTIVE_LS_LOSS_HPP_
#define NSOPTIM_OBJECTIVE_LS_LOSS_HPP_
#include <algorithm>
#include <nsoptim/armadillo.hpp>
#include <nsoptim/objective/convex.hpp>
#include <nsoptim/container/data.hpp>
......@@ -62,7 +64,7 @@ class WeightedLsLoss : public LossFunction<PredictorResponseData>,
const bool include_intercept = true) noexcept
: include_intercept_(include_intercept), data_(data), mean_weight_(arma::mean(*weights)),
sqrt_weights_(std::make_shared<const arma::vec>(arma::sqrt(*weights / mean_weight_))),
weighted_pred_norm_(ls_loss::TwoNormUpper(data->cx(), *sqrt_weights_)) {}
weighted_pred_norm_(-1) {}
WeightedLsLoss(std::shared_ptr<const PredictorResponseData> data, const arma::vec& weights,
const bool include_intercept = true) noexcept
......@@ -116,6 +118,25 @@ class WeightedLsLoss : public LossFunction<PredictorResponseData>,
//! @return the difference between `x` and `y`.
template<typename T>
double Difference(const RegressionCoefficients<T>& x, const RegressionCoefficients<T>& y) const {
const double weighted_pred_norm = (weighted_pred_norm_ < 0) ? ls_loss::TwoNormUpper(data_->cx(), *sqrt_weights_) :
weighted_pred_norm_;
return std::sqrt(sqrt_weights_->n_elem * mean_weight_) * std::abs(x.intercept - y.intercept) +
weighted_pred_norm * arma::norm(x.beta - y.beta, 2);
}
//! Get the difference between two sets of regression coefficients.
//!
//! For the weighted LS loss, the difference is an approximation to the 2-norm of the matrix-vector product
//! ||W . X . (beta1 - beta2) + w ( mu1 - mu2 )||_2 < |mu1 - mu2| sqrt(sum(w)) + ||W X|| ||beta1 - beta2||_2
//!
//! @param x a set of regression coefficients.
//! @param y the other set of regression coefficients.
//! @return the difference between `x` and `y`.
template<typename T>
double Difference(const RegressionCoefficients<T>& x, const RegressionCoefficients<T>& y) {
if (weighted_pred_norm_ < 0) {
weighted_pred_norm_ = ls_loss::TwoNormUpper(data_->cx(), *sqrt_weights_);
}
return std::sqrt(sqrt_weights_->n_elem * mean_weight_) * std::abs(x.intercept - y.intercept) +
weighted_pred_norm_ * arma::norm(x.beta - y.beta, 2);
}
......@@ -137,13 +158,13 @@ class WeightedLsLoss : public LossFunction<PredictorResponseData>,
template<typename T>
GradientType<T> Gradient(const RegressionCoefficients<T>& coefs) const {
if (include_intercept_) {
const arma::vec neg_weighted_residuals = mean_weight_ * (*sqrt_weights_) % (*sqrt_weights_) %
const arma::vec neg_weighted_residuals = mean_weight_ * arma::square(*sqrt_weights_) %
(data_->cx() * coefs.beta + coefs.intercept - data_->cy());
return GradientType<T>(arma::mean(neg_weighted_residuals),
arma::mean(data_->cx().each_col() % neg_weighted_residuals, 0));
}
const arma::vec neg_weighted_residuals = mean_weight_ * (*sqrt_weights_) % (*sqrt_weights_) %
const arma::vec neg_weighted_residuals = mean_weight_ * arma::square(*sqrt_weights_) %
(data_->cx() * coefs.beta - data_->cy());
return GradientType<T>(0, -arma::mean(data_->cx().each_col() % neg_weighted_residuals, 0));
}
......@@ -158,8 +179,8 @@ class WeightedLsLoss : public LossFunction<PredictorResponseData>,
//! Access the un-normalized weights used by this weighted LS loss function.
//!
//! @return a vector of weights.
const WeightsType weights() const noexcept {
return mean_weight_ * (*sqrt_weights_) % (*sqrt_weights_);
WeightsType weights() const noexcept {
return mean_weight_ * arma::square(*sqrt_weights_);
}
//! Access the normalized sqrt-weights used by this weighted LS loss function.
......@@ -213,8 +234,7 @@ class LsLoss : public LossFunction<PredictorResponseData>,
struct is_ls_loss_tag {};
explicit LsLoss(std::shared_ptr<const PredictorResponseData> data, const bool include_intercept = true)
: include_intercept_(include_intercept), data_(data),
pred_norm_((data_->n_pred() < data_->n_obs()) ? arma::norm(data->cx(), "inf") : arma::norm(data->cx(), 1)) {}
: include_intercept_(include_intercept), data_(data), pred_norm_(-1) {}
LsLoss(const LsLoss& other) = default;
LsLoss(LsLoss&& other) = default;
......@@ -268,6 +288,25 @@ class LsLoss : public LossFunction<PredictorResponseData>,
//! @return the relative difference between `x` and `y`.
template<typename T>
double Difference(const RegressionCoefficients<T>& x, const RegressionCoefficients<T>& y) const {
const double pred_norm = (pred_norm_ < 0) ? std::min(arma::norm(data_->cx(), "inf"), arma::norm(data_->cx(), 1)) :
pred_norm_;
return std::sqrt(data_->n_obs()) * std::abs(x.intercept - y.intercept) +
pred_norm * arma::norm(x.beta - y.beta, 2);
}
//! Get the difference between two sets of regression coefficients.
//!
//! For the LS loss, the relative difference is an approximation to the 2-norm of the matrix-vector product
//! ||X . (beta1 - beta2) + ( mu1 - mu2 )||_2 < |mu1 - mu2| sqrt(n) + ||X|| ||beta1 - beta2||_2
//!
//! @param x a set of regression coefficients.
//! @param y the other set of regression coefficients.
//! @return the relative difference between `x` and `y`.
template<typename T>
double Difference(const RegressionCoefficients<T>& x, const RegressionCoefficients<T>& y) {
if (pred_norm_ < 0) {
pred_norm_ = std::min(arma::norm(data_->cx(), "inf"), arma::norm(data_->cx(), 1));
}
return std::sqrt(data_->n_obs()) * std::abs(x.intercept - y.intercept) +
pred_norm_ * arma::norm(x.beta - y.beta, 2);
}
......
......@@ -155,6 +155,7 @@ class AdmmLinearOptimizer : public Optimizer<LossFunction, PenaltyFunction, Coef
using PenaltyPtr = std::unique_ptr<PenaltyFunction>;
using IsWeightedTag = typename traits::is_weighted<LossFunction>::type;
using IsAdaptiveTag = typename traits::is_adaptive<PenaltyFunction>::type;
using WeightsType = typename std::conditional<IsWeightedTag::value, arma::vec, char>::type;
static_assert(traits::is_en_penalty<PenaltyFunction>::value, "PenaltyFunction must be an EN-type penalty.");
static_assert(traits::is_ls_loss<LossFunction>::value, "LossFunction must be an least-squares-type loss.");
......@@ -173,7 +174,7 @@ class AdmmLinearOptimizer : public Optimizer<LossFunction, PenaltyFunction, Coef
//! @param loss a weighted LS loss function.
//! @param penalty the Ridge penalty.
explicit AdmmLinearOptimizer(const AdmmConfiguration& config = admm_optimizer::kDefaultAdmmConfiguration) noexcept
: config_(config), loss_(nullptr), penalty_(nullptr), data_(nullptr) {}
: config_(config), loss_(nullptr), penalty_(nullptr) {}
//! Ininitialize the optimizer using the given (weighted) LS loss function and the Ridge penalty.
//!
......@@ -181,7 +182,7 @@ class AdmmLinearOptimizer : public Optimizer<LossFunction, PenaltyFunction, Coef
//! @param penalty the Ridge penalty.
AdmmLinearOptimizer(const LossFunction& loss, const PenaltyFunction& penalty,
const AdmmConfiguration& config = admm_optimizer::kDefaultAdmmConfiguration) noexcept
: config_(config), loss_(new LossFunction(loss)), penalty_(new PenaltyFunction(penalty)), data_(nullptr) {}
: config_(config), loss_(new LossFunction(loss)), penalty_(new PenaltyFunction(penalty)) {}
//! Default copy constructor.
//!
......@@ -195,8 +196,6 @@ class AdmmLinearOptimizer : public Optimizer<LossFunction, PenaltyFunction, Coef
penalty_(other.penalty_ ? new PenaltyFunction(*other.penalty_) : nullptr),
coefs_(other.coefs_),
state_(other.state_),
weighted_data_(other.weighted_data_ ? new PredictorResponseData(*other.weighted_data_) : nullptr),
data_(weighted_data_.get()),
step_size_(other.step_size_),
norm_x_sq_inv_(other.norm_x_sq_inv_),
convergence_tolerance_(other.convergence_tolerance_) {}
......@@ -231,7 +230,7 @@ class AdmmLinearOptimizer : public Optimizer<LossFunction, PenaltyFunction, Coef
}
void Reset() {
data_ = nullptr;
state_.v.reset();
}
LossFunction& loss() const {
......@@ -242,10 +241,19 @@ class AdmmLinearOptimizer : public Optimizer<LossFunction, PenaltyFunction, Coef
}
void loss(const LossFunction& loss) noexcept {
if (DataChanged(loss, IsWeightedTag{})) {
data_ = nullptr;
}
loss_.reset(new LossFunction(loss));
const double norm_x = loss_->IncludeIntercept() ?
arma::norm(arma::join_rows(arma::ones(loss_->data().n_obs(), 1), loss_->data().cx()), 2) :
arma::norm(loss_->data().cx(), 2);
if (config_.tau < 0) {
step_size_ = std::sqrt(norm_x);
} else {
step_size_ = 1 / config_.tau;
}
norm_x_sq_inv_ = 1 / (norm_x * norm_x);
UpdateWeights(IsWeightedTag{});
}
PenaltyFunction& penalty() const {
......@@ -300,15 +308,10 @@ class AdmmLinearOptimizer : public Optimizer<LossFunction, PenaltyFunction, Coef
throw std::logic_error("no loss set");
}
if (!data_) {
UpdateData(IsWeightedTag{});
}
coefs_ = start;
state_ = State {
loss_->IncludeIntercept() ? ProximalLs(data_->cx() * start.beta, start.intercept, IsWeightedTag{}) :
((data_->cx() * start.beta + step_size_ * data_->cy()) / (1 + step_size_)),
arma::zeros(data_->n_obs())
ProximalLs(loss_->data().cx() * coefs_.beta, IsWeightedTag{}),
arma::zeros(loss_->data().n_obs())
};
return Optimize(max_it);
}
......@@ -327,19 +330,14 @@ class AdmmLinearOptimizer : public Optimizer<LossFunction, PenaltyFunction, Coef
throw std::logic_error("no penalty set");
}
// Check if the data needs to be updated
const bool reset = !data_;
if (reset) {
UpdateData(IsWeightedTag{});
}
const PredictorResponseData& data = loss_->data();
const bool include_intercept = loss_->IncludeIntercept();
const double scaled_lambda = ScaledLambda(IsWeightedTag{});
const bool check_empty = admm_optimizer::AllZero(coefs_.beta) || (coefs_.beta.n_elem != data_->n_pred());
const double scaled_lambda = data.n_obs() * penalty_->lambda();
const bool check_empty = admm_optimizer::AllZero(coefs_.beta) || (coefs_.beta.n_elem != data.n_pred());
// Check if the coefficients are correct.
if (coefs_.beta.n_elem != data_->n_pred()) {
coefs_.beta.zeros(data_->n_pred());
if (coefs_.beta.n_elem != data.n_pred()) {
coefs_.beta.zeros(data.n_pred());
coefs_.intercept = 0;
}
......@@ -347,32 +345,28 @@ class AdmmLinearOptimizer : public Optimizer<LossFunction, PenaltyFunction, Coef
metrics->AddDetail("type", "linearized");
// This is the convergence tolerance for the "un-standardized" residual.
const double conv_tol = convergence_tolerance_ * penalty_->alpha() * scaled_lambda / step_size_;
const auto en_cutoff = DetermineCutoff(scaled_lambda, IsAdaptiveTag{});
const double en_multiplier = 1 / (1 + norm_x_sq_inv_ * scaled_lambda * (1 - penalty_->alpha()));
const double conv_tol = convergence_tolerance_ * penalty_->alpha() * scaled_lambda;
const auto en_cutoff = DetermineCutoff(IsAdaptiveTag{});
const double en_multiplier = 1 / (1 + norm_x_sq_inv_ * step_size_ * scaled_lambda * (1 - penalty_->alpha()));
double gap = 0;
const double ss_scaling = 1 / (1 + step_size_);
const arma::vec scaled_y = data_->cy() * step_size_ * ss_scaling;
arma::vec fitted = data_->cx() * coefs_.beta;
arma::vec fitted = data.cx() * coefs_.beta;
if (include_intercept) {
coefs_.intercept = ComputeIntercept(fitted, IsWeightedTag{});
fitted += InterceptUpdate(IsWeightedTag{});
}
if (check_empty && !admm_optimizer::AnyViolateKKT(data_->cx(), data_->cy() - fitted, scaled_lambda / step_size_,
if (check_empty && !admm_optimizer::AnyViolateKKT(data.cx(), Residuals(fitted, IsWeightedTag{}), scaled_lambda,
*penalty_)) {
// None of the predictors will be activated for the current penalty. Return the current coefficient value.
return FinalizeResult(0, 0, OptimumStatus::kOk, std::move(metrics));
}
// Check if the state needs to be re-initialized
if (reset || state_.v.n_elem != fitted.n_elem) {
if (state_.v.n_elem != fitted.n_elem) {
// This is the ProximalLS function, but inlined...
state_.v = ss_scaling * fitted + scaled_y;
state_.l.zeros(data_->n_obs());
state_.v = ProximalLs(fitted, IsWeightedTag{});
state_.l.zeros(data.n_obs());
}
// Adjust the fitted values
fitted -= state_.v;
......@@ -390,16 +384,15 @@ class AdmmLinearOptimizer : public Optimizer<LossFunction, PenaltyFunction, Coef
prev_state.l = state_.l;
// remember: fitted is already fitted - state_.v
coefs_.beta = en_multiplier * SoftThreshold(coefs_.beta, -norm_x_sq_inv_,
data_->cx().t() * (fitted + state_.l), en_cutoff);
data.cx().t() * (fitted + state_.l), en_cutoff);
fitted = data_->cx() * coefs_.beta;
fitted = data.cx() * coefs_.beta;
if (include_intercept) {
coefs_.intercept = ComputeIntercept(fitted, IsWeightedTag{});
fitted += InterceptUpdate(IsWeightedTag{});
}
// This is the ProximalLS function, but inlined...
state_.v = ss_scaling * (fitted + state_.l) + scaled_y;
state_.v = ProximalLs(fitted + state_.l, IsWeightedTag{});
fitted -= state_.v;
state_.l += fitted;
......@@ -435,111 +428,77 @@ class AdmmLinearOptimizer : public Optimizer<LossFunction, PenaltyFunction, Coef
return MakeOptimum(*loss_, *penalty_, coefs_, std::move(metrics), status, message);
}
//! Simple check if the data in the given loss function is different from the data in the private loss function.
inline bool DataChanged(const LossFunction& loss, std::false_type) const noexcept {
return loss_ && (&loss.data() != &loss_->data());
//! Compute the **doubly** weighted residuals.
//! This does not compute the true "weighted" residuals W.(y - mu - X.beta), but rather W'W.(y - mu - X.beta)!
arma::vec Residuals(const arma::vec& fitted, std::true_type) const {
return weights_ % (loss_->data().cy() - fitted);
}
//! Simple check if the data in the given loss function is different from the data in the private loss function.
inline bool DataChanged(const LossFunction& loss, std::true_type) const noexcept {
return loss_ && ((&loss.data() != &loss_->data()) || (&loss.sqrt_weights() != &loss_->sqrt_weights()));
}
//! Get the appropriately scaled lambda for a weighted LS loss
inline double ScaledLambda(std::true_type) const noexcept {
return data_->n_obs() * penalty_->lambda() * step_size_ / loss_->mean_weight();
}
//! Get the appropriately scaled lambda for an unweighted LS loss
inline double ScaledLambda(std::false_type) const noexcept {
return data_->n_obs() * penalty_->lambda() * step_size_;
//! Compute the unweighted residuals.
arma::vec Residuals(const arma::vec& fitted, std::false_type) const noexcept {
return loss_->data().cy() - fitted;
}
//! Determine the cutoff for the soft-threshold function for adaptive penalties
arma::vec DetermineCutoff(const double scaled_lambda, std::true_type) const {
return penalty_->loadings() * (penalty_->alpha() * scaled_lambda * norm_x_sq_inv_);
arma::vec DetermineCutoff(std::true_type) const noexcept {
return penalty_->loadings() * DetermineCutoff(std::false_type{});
}
//! Determine the cutoff for the soft-threshold function for non-adaptive penalties
double DetermineCutoff(const double scaled_lambda, std::false_type) const {
return penalty_->alpha() * scaled_lambda * norm_x_sq_inv_;
double DetermineCutoff(std::false_type) const noexcept {
return loss_->data().n_obs() * penalty_->alpha() * penalty_->lambda() * step_size_ * norm_x_sq_inv_;
}
//! Update the data for a weighted LS loss
void UpdateData(std::true_type) {
weighted_data_.reset(new PredictorResponseData(loss_->data().cx().each_col() % loss_->sqrt_weights(),
loss_->data().cy() % loss_->sqrt_weights()));
data_ = weighted_data_.get();
double norm_x = 0;
//! Apply the proximal operator for the weighted LS loss with intercept
arma::vec ProximalLs(const arma::vec& v, std::true_type) const {
if (loss_->IncludeIntercept()) {
norm_x = arma::norm(arma::join_rows(data_->cx(), loss_->sqrt_weights()), 2);
return (v + step_size_ * weights_ % (loss_->data().cy() - coefs_.intercept)) /
(1 + step_size_ * weights_);
} else {
norm_x = arma::norm(data_->cx(), 2);
return (v + step_size_ * weights_ % loss_->data().cy()) / (1 + step_size_ * weights_);
}
UpdateStepSize(norm_x);
}
//! Update the data for an un-weighted LS loss
void UpdateData(std::false_type) {
data_ = &(loss_->data());
double norm_x;
//! Apply the proximal operator for the LS loss with intercept.
arma::vec ProximalLs(const arma::vec& v, std::false_type) const {
if (loss_->IncludeIntercept()) {
norm_x = arma::norm(arma::join_rows(arma::ones(data_->n_obs(), 1), data_->cx()), 2);
} else {
norm_x = arma::norm(data_->cx(), 2);
}
UpdateStepSize(norm_x);
}
//! Update the step-size based on the 2-norm of the predictors
void UpdateStepSize(const double norm_x) {
if (config_.tau < 0) {
step_size_ = std::sqrt(norm_x);
return (v + step_size_ * (loss_->data().cy() - coefs_.intercept)) / (1 + step_size_);
} else {
step_size_ = 1 / config_.tau;
return (v + step_size_ * loss_->data().cy()) / (1 + step_size_);
}
norm_x_sq_inv_ = 1 / (norm_x * norm_x);
}
//! Apply the proximal operator to vector `v + intercept . w` for the weighted LS loss.
arma::vec ProximalLs(const arma::vec& v, const double intercept, std::true_type) const {
return (v + intercept * loss_->sqrt_weights() + step_size_ * data_->cy()) / (1 + step_size_);
}
//! Apply the proximal operator to vector `v + intercept` for the LS loss.
arma::vec ProximalLs(const arma::vec& v, const double intercept, std::false_type) const {
return (v + intercept + step_size_ * data_->cy()) / (1 + step_size_);
}
//! Compute the intercept for weighted LS
double ComputeIntercept(const arma::vec& fitted, std::true_type) const {
return arma::mean((data_->cy() - fitted) % loss_->sqrt_weights());
// Manually square the sqrt-weights to take advantage of them being standardized.
return arma::mean((loss_->data().cy() - fitted) % weights_) / loss_->mean_weight();
}
//! Compute the intercept for unweighted LS
double ComputeIntercept(const arma::vec& fitted, std::false_type) const {
return arma::mean(data_->cy() - fitted);
return arma::mean(loss_->data().cy() - fitted);
}
double InterceptUpdate(std::false_type) const noexcept {
return coefs_.intercept;
//! Update the cached weights.
void UpdateWeights(std::true_type) noexcept {
weights_ = loss_->weights();
}
arma::vec InterceptUpdate(std::true_type) const noexcept {
return coefs_.intercept * loss_->sqrt_weights();
}
//! Update the cached weights. For the unweighted LS, this does nothing.
void UpdateWeights(std::false_type) const noexcept {}
const AdmmConfiguration config_;
LossFunctionPtr loss_;
PenaltyPtr penalty_;
Coefficients coefs_;
State state_;
std::unique_ptr<PredictorResponseData> weighted_data_;
PredictorResponseData const * data_;
WeightsType weights_;
double step_size_;
double norm_x_sq_inv_;
double convergence_tolerance_ = 1e-6;
};
//! Compute the EN regression estimate using the alternating direction method of multiplier (ADMM)
//! with variable step-size.
template <typename LossFunction, typename PenaltyFunction, typename Coefficients>
......
......@@ -100,11 +100,13 @@ class DataProxy {
//! @param the new loss function.
//! @return information on what data changed.
DataChanges Update(const LossFunction& loss) noexcept {
if (data_ != &loss.data()) {
data_ = &loss.data();
return DataChanges {true, 0};
}
return DataChanges {false, 0};
// if (data_ != &loss.data() ||
// (data_ && (data_->n_obs() != loss.data().n_obs() || data_->n_pred() != loss.data().n_pred()))) {
// data_ = &loss.data();
// return DataChanges {true, 0};
// }
data_ = &loss.data();
return DataChanges {true, 0};
}
//! Access the data set.
......@@ -165,23 +167,25 @@ class DataProxy<LossFunction, std::true_type> {
//! @return information on what data changed.
DataChanges Update(const LossFunction& loss) {
// Check if it's actually new data and/or new weights
DataChanges changes {data_ != &loss.data(), 0};
if (sqrt_weights_ != &loss.sqrt_weights()) {
if (sqrt_weights_ && (sqrt_weights_->n_elem == loss.sqrt_weights().n_elem)) {
// Check by how much the weights changed. If the change is small enough, the caller does not need to know.
const double diff_norm = arma::norm(loss.sqrt_weights() - *sqrt_weights_);
if (diff_norm * diff_norm < sqrt_weights_->n_elem * _optim_dal_internal::kMaximumRelativeWeightChange) {
changes.weights_changed = 1;
} else {
changes.weights_changed = 2;
}
sqrt_weights_ = &loss.sqrt_weights();
} else {
// The dimensions of the weights vector changed.
changes.weights_changed = 2;
}
}
// DataChanges changes {data_ != &loss.data() || (data_ && (data_->n_obs() != loss.data().n_obs() ||
// data_->n_pred() != loss.data().n_pred())), 0};
// if (sqrt_weights_ != &loss.sqrt_weights()) {
// if (sqrt_weights_ && (sqrt_weights_->n_elem == loss.sqrt_weights().n_elem)) {
// // Check by how much the weights changed. If the change is small enough, the caller does not need to know.
// const double diff_norm = arma::norm(loss.sqrt_weights() - *sqrt_weights_);
// if (diff_norm * diff_norm < sqrt_weights_->n_elem * _optim_dal_internal::kMaximumRelativeWeightChange) {
// changes.weights_changed = 1;
// } else {
// changes.weights_changed = 2;
// }
// sqrt_weights_ = &loss.sqrt_weights();
// } else {
// // The dimensions of the weights vector changed.
// changes.weights_changed = 2;
// }
// }
DataChanges changes {true, 2};
if (changes.data_changed || changes.weights_changed) {
sqrt_weights_ = &loss.sqrt_weights();
......@@ -398,9 +402,11 @@ class Hessian {
// PCG did not converge. Recompute the preconditiioner.
metric->AddDetail("step_dir_invert_hessian", 1);
metric->AddDetail("step_dir_pcg_iter", (pcg_iters < 0) ? static_cast<int>(gradient.n_elem) : pcg_iters);
const bool success = arma::inv_sympd(preconditioner_, hessian);
if (!arma::inv_sympd(preconditioner_, hessian)) {
return kPhiStepDirInversionFailed;
}
*step_dir = preconditioner_ * gradient;
return success ? kPhiStepDirFullInversion : kPhiStepDirInversionFailed;
return kPhiStepDirFullInversion;
} else {
metric->AddDetail("step_dir_pcg_iter", static_cast<int>(pcg_iters));
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment