cosmetic rewrite of glop

This commit is contained in:
Laurent Perron
2021-09-10 15:11:24 +02:00
parent 8f5bfbc252
commit 29cd1038a1
5 changed files with 417 additions and 237 deletions

View File

@@ -22,7 +22,7 @@ syntax = "proto2";
package operations_research.glop;
// next id = 65
// next id = 66
message GlopParameters {
// Supported algorithms for scaling:
@@ -453,10 +453,17 @@ message GlopParameters {
// Note that by default a FREE variable is assumed to be zero unless a
// starting value was specified via SetStartingVariableValuesForNextSolve().
//
// TODO(user): Note that currently, at the end of the solve, some of these
// FREE variable with bounds and an interior point value might still be left
// in the final solution. We will need an extra step and parameter to clean
// that up if requested.
// Note that, at the end of the solve, some of these FREE variable with bounds
// and an interior point value might still be left in the final solution.
// Enable push_to_vertex to clean these up.
optional double crossover_bound_snapping_distance = 64 [default = inf];
// If the optimization phases finishes with super-basic variables (i.e.,
// variables that either 1) have bounds but are FREE in the basis, or 2) have
// no bounds and are FREE in the basis at a nonzero value), then run a "push"
// phase to push these variables to bounds, obtaining a vertex solution. Note
// this situation can happen only if a starting value was specified via
// SetStartingVariableValuesForNextSolve().
optional bool push_to_vertex = 65 [default = true];
}

View File

@@ -53,9 +53,8 @@ bool ReducedCosts::NeedsBasisRefactorization() const {
return must_refactorize_basis_;
}
bool ReducedCosts::TestEnteringReducedCostPrecision(
ColIndex entering_col, const ScatteredColumn& direction,
Fractional* reduced_cost) {
Fractional ReducedCosts::TestEnteringReducedCostPrecision(
ColIndex entering_col, const ScatteredColumn& direction) {
SCOPED_TIME_STAT(&stats_);
if (recompute_basic_objective_) {
ComputeBasicObjective();
@@ -67,20 +66,6 @@ bool ReducedCosts::TestEnteringReducedCostPrecision(
// Update the reduced cost of the entering variable with the precise version.
reduced_costs_[entering_col] = precise_reduced_cost;
*reduced_cost = precise_reduced_cost;
if (!IsValidPrimalEnteringCandidate(entering_col)) {
VLOG(1) << "Entering candidate is not valid under precise reduced costs.";
// If we don't have the reduced cost with maximum precision, we
// return false and the next ChooseEnteringColumn() will recompute them.
// If they are already precise, we will skip this one (since it is no
// longer a candidate).
if (!are_reduced_costs_precise_) {
MakeReducedCostsPrecise();
}
return false;
}
// At this point, we have an entering variable that will move the objective in
// the good direction. We check the precision of the reduced cost and edges
@@ -102,27 +87,15 @@ bool ReducedCosts::TestEnteringReducedCostPrecision(
MakeReducedCostsPrecise();
}
}
return true;
return precise_reduced_cost;
}
Fractional ReducedCosts::ComputeMaximumDualResidual() const {
Fractional ReducedCosts::ComputeMaximumDualResidual() {
SCOPED_TIME_STAT(&stats_);
DCHECK(!recompute_reduced_costs_);
if (recompute_reduced_costs_) return 0.0;
// The current reduced costs of the slack columns are the opposite of the dual
// values. Note that they are updated by UpdateBeforeBasisPivot().
const RowIndex num_rows = matrix_.num_rows();
const ColIndex first_slack_col = matrix_.num_cols() - RowToColIndex(num_rows);
DenseRow dual_values(RowToColIndex(num_rows), 0.0);
for (RowIndex row(0); row < num_rows; ++row) {
const ColIndex row_as_col = RowToColIndex(row);
const ColIndex slack_col = first_slack_col + row_as_col;
dual_values[row_as_col] = objective_[slack_col] +
cost_perturbations_[slack_col] -
reduced_costs_[slack_col];
}
Fractional dual_residual_error(0.0);
const RowIndex num_rows = matrix_.num_rows();
const DenseRow& dual_values = Transpose(GetDualValues());
for (RowIndex row(0); row < num_rows; ++row) {
const ColIndex basic_col = basis_[row];
const Fractional residual =
@@ -133,10 +106,12 @@ Fractional ReducedCosts::ComputeMaximumDualResidual() const {
return dual_residual_error;
}
Fractional ReducedCosts::ComputeMaximumDualInfeasibility() const {
Fractional ReducedCosts::ComputeMaximumDualInfeasibility() {
SCOPED_TIME_STAT(&stats_);
DCHECK(!recompute_reduced_costs_);
if (recompute_reduced_costs_) return 0.0;
// Trigger a recomputation if needed so that reduced_costs_ is valid.
GetReducedCosts();
Fractional maximum_dual_infeasibility = 0.0;
const DenseBitRow& can_decrease = variables_info_.GetCanDecreaseBitRow();
const DenseBitRow& can_increase = variables_info_.GetCanIncreaseBitRow();
@@ -151,9 +126,12 @@ Fractional ReducedCosts::ComputeMaximumDualInfeasibility() const {
return maximum_dual_infeasibility;
}
Fractional ReducedCosts::ComputeMaximumDualInfeasibilityOnNonBoxedVariables()
const {
Fractional ReducedCosts::ComputeMaximumDualInfeasibilityOnNonBoxedVariables() {
SCOPED_TIME_STAT(&stats_);
// Trigger a recomputation if needed so that reduced_costs_ is valid.
GetReducedCosts();
Fractional maximum_dual_infeasibility = 0.0;
const DenseBitRow& can_decrease = variables_info_.GetCanDecreaseBitRow();
const DenseBitRow& can_increase = variables_info_.GetCanIncreaseBitRow();
@@ -170,10 +148,12 @@ Fractional ReducedCosts::ComputeMaximumDualInfeasibilityOnNonBoxedVariables()
return maximum_dual_infeasibility;
}
Fractional ReducedCosts::ComputeSumOfDualInfeasibilities() const {
Fractional ReducedCosts::ComputeSumOfDualInfeasibilities() {
SCOPED_TIME_STAT(&stats_);
DCHECK(!recompute_reduced_costs_);
if (recompute_reduced_costs_) return 0.0;
// Trigger a recomputation if needed so that reduced_costs_ is valid.
GetReducedCosts();
Fractional dual_infeasibility_sum = 0.0;
const DenseBitRow& can_decrease = variables_info_.GetCanDecreaseBitRow();
const DenseBitRow& can_increase = variables_info_.GetCanIncreaseBitRow();
@@ -484,43 +464,19 @@ void ReducedCosts::UpdateReducedCosts(ColIndex entering_col,
}
are_reduced_costs_recomputed_ = false;
are_reduced_costs_precise_ = false;
update_row->ComputeUpdateRow(leaving_row);
SCOPED_TIME_STAT(&stats_);
const ColIndex first_slack_col =
matrix_.num_cols() - RowToColIndex(matrix_.num_rows());
// Update the leaving variable reduced cost.
// '-pivot' is the value of the entering_edge at 'leaving_row'.
// The edge of the 'leaving_col' in the new basis is equal to
// 'entering_edge / -pivot'.
const Fractional new_leaving_reduced_cost = entering_reduced_cost / -pivot;
for (const ColIndex col : update_row->GetNonZeroPositions()) {
// Because the columns are in order, it is safe to break here.
if (col >= first_slack_col) break;
const Fractional coeff = update_row->GetCoefficient(col);
reduced_costs_[col] += new_leaving_reduced_cost * coeff;
}
are_reduced_costs_precise_ = false;
// Always update the slack variable position so we have the dual values and
// we can use them in ComputeCurrentDualResidualError().
const ScatteredRow& unit_row_left_inverse =
update_row->GetUnitRowLeftInverse();
if (unit_row_left_inverse.non_zeros.empty()) {
const ColIndex num_cols = unit_row_left_inverse.values.size();
for (ColIndex col(0); col < num_cols; ++col) {
const ColIndex slack_col = first_slack_col + col;
const Fractional coeff = unit_row_left_inverse[col];
reduced_costs_[slack_col] += new_leaving_reduced_cost * coeff;
}
} else {
for (const ColIndex col : unit_row_left_inverse.non_zeros) {
const ColIndex slack_col = first_slack_col + col;
const Fractional coeff = unit_row_left_inverse[col];
reduced_costs_[slack_col] += new_leaving_reduced_cost * coeff;
}
}
reduced_costs_[leaving_col] = new_leaving_reduced_cost;
// In the dual, since we compute the update before selecting the entering

View File

@@ -60,26 +60,27 @@ class ReducedCosts {
bool NeedsBasisRefactorization() const;
// Checks the precision of the entering variable choice now that the direction
// is computed, and return true if we can continue with this entering column,
// or false if this column is actually not good and ChooseEnteringColumn()
// need to be called again.
bool TestEnteringReducedCostPrecision(ColIndex entering_col,
const ScatteredColumn& direction,
Fractional* reduced_cost);
// is computed. Returns its precise version. This will also trigger a
// reduced cost recomputation if it was deemed too imprecise.
Fractional TestEnteringReducedCostPrecision(ColIndex entering_col,
const ScatteredColumn& direction);
// Computes the current dual residual and infeasibility. Note that these
// functions are not really fast (many scalar products will be computed) and
// shouldn't be called at each iteration. They will return 0.0 if the reduced
// costs need to be recomputed first and fail in debug mode.
Fractional ComputeMaximumDualResidual() const;
Fractional ComputeMaximumDualInfeasibility() const;
Fractional ComputeSumOfDualInfeasibilities() const;
// shouldn't be called at each iteration.
//
// These function will compute the reduced costs if needed.
// ComputeMaximumDualResidual() also needs ComputeBasicObjectiveLeftInverse()
// and do not depends on reduced costs.
Fractional ComputeMaximumDualResidual();
Fractional ComputeMaximumDualInfeasibility();
Fractional ComputeSumOfDualInfeasibilities();
// Same as ComputeMaximumDualInfeasibility() but ignore boxed variables.
// Because we can always switch bounds of boxed variables, if this is under
// the dual tolerance, then we can easily have a dual feasible solution and do
// not need to run a dual phase I algorithm.
Fractional ComputeMaximumDualInfeasibilityOnNonBoxedVariables() const;
Fractional ComputeMaximumDualInfeasibilityOnNonBoxedVariables();
// Updates any internal data BEFORE the given simplex pivot is applied to B.
// Note that no updates are needed in case of a bound flip.

View File

@@ -32,6 +32,7 @@
#include "ortools/glop/parameters.pb.h"
#include "ortools/lp_data/lp_data.h"
#include "ortools/lp_data/lp_print_utils.h"
#include "ortools/lp_data/lp_types.h"
#include "ortools/lp_data/lp_utils.h"
#include "ortools/lp_data/matrix_utils.h"
#include "ortools/lp_data/permutation.h"
@@ -79,9 +80,6 @@ constexpr const uint64_t kDeterministicSeed = 42;
RevisedSimplex::RevisedSimplex()
: problem_status_(ProblemStatus::INIT),
num_rows_(0),
num_cols_(0),
first_slack_col_(0),
objective_(),
basis_(),
variable_name_(),
@@ -104,19 +102,11 @@ RevisedSimplex::RevisedSimplex()
entering_variable_(variables_info_, random_, &reduced_costs_),
primal_prices_(random_, variables_info_, &primal_edge_norms_,
&reduced_costs_),
num_iterations_(0),
num_feasibility_iterations_(0),
num_optimization_iterations_(0),
total_time_(0.0),
feasibility_time_(0.0),
optimization_time_(0.0),
last_deterministic_time_update_(0.0),
iteration_stats_(),
ratio_test_stats_(),
function_stats_("SimplexFunctionStats"),
parameters_(),
test_lu_(),
feasibility_phase_(true) {
test_lu_() {
SetParameters(parameters_);
}
@@ -157,12 +147,14 @@ Status RevisedSimplex::Solve(const LinearProgram& lp, TimeLimit* time_limit) {
update_row_.Invalidate();
test_lu_.Clear();
problem_status_ = ProblemStatus::INIT;
feasibility_phase_ = true;
phase_ = Phase::FEASIBILITY;
num_iterations_ = 0;
num_feasibility_iterations_ = 0;
num_optimization_iterations_ = 0;
num_push_iterations_ = 0;
feasibility_time_ = 0.0;
optimization_time_ = 0.0;
push_time_ = 0.0;
total_time_ = 0.0;
// In case we abort because of an error, we cannot assume that the current
@@ -189,13 +181,8 @@ Status RevisedSimplex::Solve(const LinearProgram& lp, TimeLimit* time_limit) {
LOG(INFO) << "The matrix has " << compact_matrix_.num_rows() << " rows, "
<< compact_matrix_.num_cols() << " columns, "
<< compact_matrix_.num_entries() << " entries.";
int num_free_variables = 0;
for (const VariableStatus status : variables_info_.GetStatusRow()) {
if (status == VariableStatus::FREE) ++num_free_variables;
}
LOG(INFO) << "Initial number of non-basic free variables: "
<< num_free_variables;
LOG(INFO) << "Initial number of super-basic variables: "
<< ComputeNumberOfSuperBasicVariables();
}
// TODO(user): Avoid doing the first phase checks when we know from the
@@ -209,7 +196,8 @@ Status RevisedSimplex::Solve(const LinearProgram& lp, TimeLimit* time_limit) {
if (parameters_.use_dedicated_dual_feasibility_algorithm()) {
variables_info_.MakeBoxedVariableRelevant(false);
GLOP_RETURN_IF_ERROR(DualMinimize(feasibility_phase_, time_limit));
GLOP_RETURN_IF_ERROR(
DualMinimize(phase_ == Phase::FEASIBILITY, time_limit));
DisplayIterationInfo();
if (problem_status_ != ProblemStatus::DUAL_INFEASIBLE) {
@@ -239,7 +227,6 @@ Status RevisedSimplex::Solve(const LinearProgram& lp, TimeLimit* time_limit) {
reduced_costs_.MakeReducedCostsPrecise();
bool refactorize = reduced_costs_.NeedsBasisRefactorization();
GLOP_RETURN_IF_ERROR(RefactorizeBasisIfNeeded(&refactorize));
reduced_costs_.GetReducedCosts();
const Fractional initial_infeasibility =
reduced_costs_.ComputeMaximumDualInfeasibilityOnNonBoxedVariables();
@@ -256,7 +243,7 @@ Status RevisedSimplex::Solve(const LinearProgram& lp, TimeLimit* time_limit) {
variables_info_.TransformToDualPhaseIProblem(
reduced_costs_.GetDualFeasibilityTolerance(),
reduced_costs_.GetReducedCosts());
DenseRow zero; // We want the FREE variables at zero here.
DenseRow zero; // We want the FREE variable at zero here.
variable_values_.ResetAllNonBasicVariableValues(zero);
variable_values_.RecomputeBasicVariableValues();
@@ -293,7 +280,7 @@ Status RevisedSimplex::Solve(const LinearProgram& lp, TimeLimit* time_limit) {
}
}
} else {
GLOP_RETURN_IF_ERROR(Minimize(time_limit));
GLOP_RETURN_IF_ERROR(PrimalMinimize(time_limit));
DisplayIterationInfo();
// After the primal phase I, we need to restore the objective.
@@ -303,13 +290,9 @@ Status RevisedSimplex::Solve(const LinearProgram& lp, TimeLimit* time_limit) {
}
}
// Reduced costs must be explicitly recomputed because DisplayErrors() is
// const.
// TODO(user): This API is not really nice.
reduced_costs_.GetReducedCosts();
DisplayErrors();
feasibility_phase_ = false;
phase_ = Phase::OPTIMIZATION;
feasibility_time_ = time_limit->GetElapsedTime() - start_time;
primal_edge_norms_.SetPricingRule(parameters_.optimization_rule());
num_feasibility_iterations_ = num_iterations_;
@@ -343,14 +326,15 @@ Status RevisedSimplex::Solve(const LinearProgram& lp, TimeLimit* time_limit) {
++num_optims) {
if (problem_status_ == ProblemStatus::PRIMAL_FEASIBLE) {
// Run the primal simplex.
GLOP_RETURN_IF_ERROR(Minimize(time_limit));
GLOP_RETURN_IF_ERROR(PrimalMinimize(time_limit));
} else {
// Run the dual simplex.
GLOP_RETURN_IF_ERROR(DualMinimize(feasibility_phase_, time_limit));
GLOP_RETURN_IF_ERROR(
DualMinimize(phase_ == Phase::FEASIBILITY, time_limit));
}
// Minimize() or DualMinimize() always double check the result with maximum
// precision by refactoring the basis before exiting (except if an
// PrimalMinimize() or DualMinimize() always double check the result with
// maximum precision by refactoring the basis before exiting (except if an
// iteration or time limit was reached).
DCHECK(problem_status_ == ProblemStatus::PRIMAL_FEASIBLE ||
problem_status_ == ProblemStatus::DUAL_FEASIBLE ||
@@ -372,10 +356,6 @@ Status RevisedSimplex::Solve(const LinearProgram& lp, TimeLimit* time_limit) {
variable_values_.RecomputeBasicVariableValues();
reduced_costs_.ClearAndRemoveCostShifts();
// Reduced costs must be explicitly recomputed because DisplayErrors() is
// const.
// TODO(user): This API is not really nice.
reduced_costs_.GetReducedCosts();
DisplayIterationInfo();
DisplayErrors();
@@ -496,6 +476,38 @@ Status RevisedSimplex::Solve(const LinearProgram& lp, TimeLimit* time_limit) {
}
}
total_time_ = time_limit->GetElapsedTime() - start_time;
optimization_time_ = total_time_ - feasibility_time_;
num_optimization_iterations_ = num_iterations_ - num_feasibility_iterations_;
// If the user didn't provide starting variable values, then there is no need
// to check for super-basic variables.
if (!variable_starting_values_.empty()) {
const int num_super_basic = ComputeNumberOfSuperBasicVariables();
if (num_super_basic > 0) {
if (log_info) {
LOG(INFO) << "Num super-basic variables left after optimize phase: "
<< num_super_basic;
}
if (parameters_.push_to_vertex()) {
if (problem_status_ == ProblemStatus::OPTIMAL) {
if (log_info) LOG(INFO) << "------ Third phase: push.";
phase_ = Phase::PUSH;
GLOP_RETURN_IF_ERROR(PrimalPush(time_limit));
// TODO(user): We should re-check for feasibility at this point and
// apply clean-up as needed.
} else if (log_info) {
LOG(INFO) << "Skipping push phase because optimize didn't succeed.";
}
}
}
}
total_time_ = time_limit->GetElapsedTime() - start_time;
push_time_ = total_time_ - feasibility_time_ - optimization_time_;
num_push_iterations_ = num_iterations_ - num_feasibility_iterations_ -
num_optimization_iterations_;
// Store the result for the solution getters.
solution_objective_value_ = ComputeInitialProblemObjectiveValue();
solution_dual_values_ = reduced_costs_.GetDualValues();
@@ -518,26 +530,6 @@ Status RevisedSimplex::Solve(const LinearProgram& lp, TimeLimit* time_limit) {
}
}
total_time_ = time_limit->GetElapsedTime() - start_time;
optimization_time_ = total_time_ - feasibility_time_;
num_optimization_iterations_ = num_iterations_ - num_feasibility_iterations_;
// If some free (aka super-basic) variables are still left in the final
// solution, we display their number.
if (log_info) {
int num_super_basic = 0;
const VariableStatusRow& variable_statuses = variables_info_.GetStatusRow();
for (ColIndex col(0); col < num_cols_; ++col) {
if (variable_statuses[col] == VariableStatus::FREE) {
++num_super_basic;
}
}
if (num_super_basic > 0) {
LOG(INFO) << "Num super-basic variables left in final solution: "
<< num_super_basic;
}
}
variable_starting_values_.clear();
DisplayAllStats();
return Status::OK();
@@ -1541,6 +1533,18 @@ ColIndex RevisedSimplex::ComputeNumberOfEmptyColumns() {
return num_empty_cols;
}
int RevisedSimplex::ComputeNumberOfSuperBasicVariables() const {
const VariableStatusRow& variable_statuses = variables_info_.GetStatusRow();
int num_super_basic = 0;
for (ColIndex col(0); col < num_cols_; ++col) {
if (variable_statuses[col] == VariableStatus::FREE &&
variable_values_.Get(col) != 0.0) {
++num_super_basic;
}
}
return num_super_basic;
}
void RevisedSimplex::CorrectErrorsOnVariableValues() {
SCOPED_TIME_STAT(&function_stats_);
DCHECK(basis_factorization_.IsRefactorized());
@@ -2574,7 +2578,7 @@ Status RevisedSimplex::Polish(TimeLimit* time_limit) {
// enter the basis, and a variable from x_B is selected to leave the basis.
// To avoid explicit inversion of B, the algorithm solves two sub-systems:
// y.B = c_B and B.d = a (a being the entering column).
Status RevisedSimplex::Minimize(TimeLimit* time_limit) {
Status RevisedSimplex::PrimalMinimize(TimeLimit* time_limit) {
GLOP_RETURN_ERROR_IF_NULL(time_limit);
Cleanup update_deterministic_time_on_return(
[this, time_limit]() { AdvanceDeterministicTime(time_limit); });
@@ -2586,7 +2590,7 @@ Status RevisedSimplex::Minimize(TimeLimit* time_limit) {
// lets always reset them for the first iteration below.
primal_prices_.ForceRecomputation();
if (feasibility_phase_) {
if (phase_ == Phase::FEASIBILITY) {
// Initialize the primal phase-I objective.
// Note that this temporarily erases the problem objective.
objective_.AssignToZero(num_cols_);
@@ -2610,7 +2614,7 @@ Status RevisedSimplex::Minimize(TimeLimit* time_limit) {
CorrectErrorsOnVariableValues();
DisplayIterationInfo();
if (feasibility_phase_) {
if (phase_ == Phase::FEASIBILITY) {
// Since the variable values may have been recomputed, we need to
// recompute the primal infeasible variables and update their costs.
if (variable_values_.UpdatePrimalPhaseICosts(
@@ -2622,7 +2626,7 @@ Status RevisedSimplex::Minimize(TimeLimit* time_limit) {
// Computing the objective at each iteration takes time, so we just
// check the limit when the basis is refactorized.
if (!feasibility_phase_ &&
if (phase_ == Phase::OPTIMIZATION &&
ComputeObjectiveValue() < primal_objective_limit_) {
VLOG(1) << "Stopping the primal simplex because"
<< " the objective limit " << primal_objective_limit_
@@ -2631,7 +2635,7 @@ Status RevisedSimplex::Minimize(TimeLimit* time_limit) {
objective_limit_reached_ = true;
return Status::OK();
}
} else if (feasibility_phase_) {
} else if (phase_ == Phase::FEASIBILITY) {
// Note that direction_.non_zeros contains the positions of the basic
// variables whose values were updated during the last iteration.
if (variable_values_.UpdatePrimalPhaseICosts(direction_.non_zeros,
@@ -2640,12 +2644,11 @@ Status RevisedSimplex::Minimize(TimeLimit* time_limit) {
}
}
Fractional reduced_cost = 0.0;
const ColIndex entering_col = primal_prices_.GetBestEnteringColumn();
if (entering_col == kInvalidCol) {
if (reduced_costs_.AreReducedCostsPrecise() &&
basis_factorization_.IsRefactorized()) {
if (feasibility_phase_) {
if (phase_ == Phase::FEASIBILITY) {
const Fractional primal_infeasibility =
variable_values_.ComputeMaximumPrimalInfeasibility();
if (primal_infeasibility <
@@ -2660,28 +2663,36 @@ Status RevisedSimplex::Minimize(TimeLimit* time_limit) {
problem_status_ = ProblemStatus::OPTIMAL;
}
break;
} else {
VLOG(1) << "Optimal reached, double checking...";
reduced_costs_.MakeReducedCostsPrecise();
refactorize = true;
continue;
}
} else {
reduced_cost = reduced_costs_.GetReducedCosts()[entering_col];
DCHECK(reduced_costs_.IsValidPrimalEnteringCandidate(entering_col));
// Solve the system B.d = a with a the entering column.
ComputeDirection(entering_col);
primal_edge_norms_.TestEnteringEdgeNormPrecision(entering_col,
direction_);
if (!reduced_costs_.TestEnteringReducedCostPrecision(
entering_col, direction_, &reduced_cost)) {
primal_prices_.RecomputePriceAt(entering_col);
VLOG(1) << "Skipping col #" << entering_col << " whose reduced cost is "
<< reduced_cost;
continue;
}
primal_prices_.RecomputePriceAt(entering_col);
VLOG(1) << "Optimal reached, double checking...";
reduced_costs_.MakeReducedCostsPrecise();
refactorize = true;
continue;
}
DCHECK(reduced_costs_.IsValidPrimalEnteringCandidate(entering_col));
// Solve the system B.d = a with a the entering column.
ComputeDirection(entering_col);
// This might trigger a recomputation on the next iteration, but we
// finish this one even if the price is imprecise.
primal_edge_norms_.TestEnteringEdgeNormPrecision(entering_col, direction_);
const Fractional reduced_cost =
reduced_costs_.TestEnteringReducedCostPrecision(entering_col,
direction_);
// The test might have changed the reduced cost of the entering_col.
// If it is no longer a valid entering candidate, we loop.
primal_prices_.RecomputePriceAt(entering_col);
if (!reduced_costs_.IsValidPrimalEnteringCandidate(entering_col)) {
reduced_costs_.MakeReducedCostsPrecise();
VLOG(1) << "Skipping col #" << entering_col
<< " whose reduced cost is no longer valid under precise reduced "
"cost: "
<< reduced_cost;
continue;
}
// This test takes place after the check for optimality/feasibility because
@@ -2697,7 +2708,7 @@ Status RevisedSimplex::Minimize(TimeLimit* time_limit) {
Fractional step_length;
RowIndex leaving_row;
Fractional target_bound;
if (feasibility_phase_) {
if (phase_ == Phase::FEASIBILITY) {
PrimalPhaseIChooseLeavingVariableRow(entering_col, reduced_cost,
&refactorize, &leaving_row,
&step_length, &target_bound);
@@ -2715,7 +2726,7 @@ Status RevisedSimplex::Minimize(TimeLimit* time_limit) {
reduced_costs_.MakeReducedCostsPrecise();
continue;
}
if (feasibility_phase_) {
if (phase_ == Phase::FEASIBILITY) {
// This shouldn't happen by construction.
VLOG(1) << "Unbounded feasibility problem !?";
problem_status_ = ProblemStatus::ABNORMAL;
@@ -2736,7 +2747,7 @@ Status RevisedSimplex::Minimize(TimeLimit* time_limit) {
}
Fractional step = (reduced_cost > 0.0) ? -step_length : step_length;
if (feasibility_phase_ && leaving_row != kInvalidRow) {
if (phase_ == Phase::FEASIBILITY && leaving_row != kInvalidRow) {
// For phase-I we currently always set the leaving variable to its exact
// bound even if by doing so we may take a small step in the wrong
// direction and may increase the overall infeasibility.
@@ -2814,7 +2825,7 @@ Status RevisedSimplex::Minimize(TimeLimit* time_limit) {
IF_STATS_ENABLED(timer.AlsoUpdate(&iteration_stats_.bound_flip));
}
if (feasibility_phase_ && leaving_row != kInvalidRow) {
if (phase_ == Phase::FEASIBILITY && leaving_row != kInvalidRow) {
// Set the leaving variable to its exact bound.
variable_values_.SetNonBasicVariableValueFromStatus(leaving_col);
@@ -2890,23 +2901,16 @@ Status RevisedSimplex::DualMinimize(bool feasibility_phase,
// because that may break the overall direction taken by the last steps
// and may lead to less improvement on degenerate problems.
//
// For now, we just recompute them if refactorize was set during the
// loop and not because of normal refactorization.
//
// During phase-I, we do want the reduced costs to be as precise as
// possible. TODO(user): Investigate why and fix the TODO in
// PermuteBasis().
//
// Reduced costs are needed by MakeBoxedVariableDualFeasible(), so if we
// do recompute them, it is better to do that first.
if (!feasibility_phase && !reduced_costs_.AreReducedCostsRecomputed() &&
!old_refactorize_value) {
const Fractional dual_residual_error =
reduced_costs_.ComputeMaximumDualResidual();
if (dual_residual_error >
reduced_costs_.GetDualFeasibilityTolerance()) {
VLOG(1) << "Recomputing reduced costs. Dual residual = "
<< dual_residual_error;
reduced_costs_.MakeReducedCostsPrecise();
}
} else {
if (feasibility_phase || old_refactorize_value) {
reduced_costs_.MakeReducedCostsPrecise();
}
@@ -2928,11 +2932,12 @@ Status RevisedSimplex::DualMinimize(bool feasibility_phase,
// Computing the objective at each iteration takes time, so we just
// check the limit when the basis is refactorized.
//
// Hack: We need the feasibility_phase_ here and not the local variable
// because this must not be checked for the dual phase I algo that use
// the same code as the dual phase II (i.e. the local feasibility_phase
// will be false).
if (!feasibility_phase_ && dual_objective_limit_ != kInfinity &&
// Hack: We need phase_ here and not the local feasibility_phase
// variable because this must not be checked for the dual phase I algo
// that use the same code as the dual phase II (i.e. the local
// feasibility_phase will be false).
if (phase_ == Phase::OPTIMIZATION &&
dual_objective_limit_ != kInfinity &&
ComputeObjectiveValue() > dual_objective_limit_) {
VLOG(1) << "Stopping the dual simplex because"
<< " the objective limit " << dual_objective_limit_
@@ -2943,7 +2948,6 @@ Status RevisedSimplex::DualMinimize(bool feasibility_phase,
}
}
reduced_costs_.GetReducedCosts();
DisplayIterationInfo();
} else {
// Updates from the previous iteration that can be skipped if we
@@ -3148,6 +3152,185 @@ Status RevisedSimplex::DualMinimize(bool feasibility_phase,
return Status::OK();
}
Status RevisedSimplex::PrimalPush(TimeLimit* time_limit) {
GLOP_RETURN_ERROR_IF_NULL(time_limit);
Cleanup update_deterministic_time_on_return(
[this, time_limit]() { AdvanceDeterministicTime(time_limit); });
DisplayIterationInfo();
bool refactorize = false;
// We clear all the quantities that we don't update so they will be recomputed
// later if needed.
primal_edge_norms_.Clear();
dual_edge_norms_.Clear();
reduced_costs_.ClearAndRemoveCostShifts();
std::vector<ColIndex> super_basic_cols;
for (const ColIndex col : variables_info_.GetNotBasicBitRow()) {
if (variables_info_.GetStatusRow()[col] == VariableStatus::FREE &&
variable_values_.Get(col) != 0) {
super_basic_cols.push_back(col);
}
}
while (!super_basic_cols.empty()) {
AdvanceDeterministicTime(time_limit);
if (time_limit->LimitReached()) break;
IF_STATS_ENABLED(
ScopedTimeDistributionUpdater timer(&iteration_stats_.total));
GLOP_RETURN_IF_ERROR(RefactorizeBasisIfNeeded(&refactorize));
if (basis_factorization_.IsRefactorized()) {
CorrectErrorsOnVariableValues();
DisplayIterationInfo();
}
// TODO(user): Select at random like in Polish().
ColIndex entering_col = super_basic_cols.back();
DCHECK(variables_info_.GetCanDecreaseBitRow()[entering_col]);
DCHECK(variables_info_.GetCanIncreaseBitRow()[entering_col]);
// Decide which direction to send the entering column.
// UNCONSTRAINED variables go towards zero. Other variables go towards their
// closest bound. We assume that we're at an optimal solution, so all FREE
// variables have approximately zero reduced cost, which means that the
// objective value won't change from moving this column into the basis.
// TODO(user): As an improvement for variables with two bounds, try both
// and pick one that doesn't require a basis change (if possible), otherwise
// pick the closer bound.
Fractional fake_rc;
const Fractional entering_value = variable_values_.Get(entering_col);
if (variables_info_.GetTypeRow()[entering_col] ==
VariableType::UNCONSTRAINED) {
if (entering_value > 0) {
fake_rc = 1.0;
} else {
fake_rc = -1.0;
}
} else {
const Fractional diff_ub =
variables_info_.GetVariableUpperBounds()[entering_col] -
entering_value;
const Fractional diff_lb =
entering_value -
variables_info_.GetVariableLowerBounds()[entering_col];
if (diff_lb <= diff_ub) {
fake_rc = 1.0;
} else {
fake_rc = -1.0;
}
}
// Solve the system B.d = a with a the entering column.
ComputeDirection(entering_col);
Fractional step_length;
RowIndex leaving_row;
Fractional target_bound;
GLOP_RETURN_IF_ERROR(ChooseLeavingVariableRow(entering_col, fake_rc,
&refactorize, &leaving_row,
&step_length, &target_bound));
if (refactorize) continue;
// At this point, we know the iteration will finish or stop with an error.
super_basic_cols.pop_back();
if (step_length == kInfinity || step_length == -kInfinity) {
if (variables_info_.GetTypeRow()[entering_col] ==
VariableType::UNCONSTRAINED) {
step_length = std::fabs(entering_value);
} else {
VLOG(1) << "Infinite step for bounded variable ?!";
problem_status_ = ProblemStatus::ABNORMAL;
break;
}
}
const Fractional step = (fake_rc > 0.0) ? -step_length : step_length;
// Store the leaving_col before basis_ change.
const ColIndex leaving_col =
(leaving_row == kInvalidRow) ? kInvalidCol : basis_[leaving_row];
// An iteration is called 'degenerate' if the leaving variable is already
// primal-infeasible and we make it even more infeasible or if we do a zero
// step.
// TODO(user): Test setting the step size to zero for degenerate steps.
// We don't need to force a positive step because each super-basic variable
// is pivoted in exactly once.
bool is_degenerate = false;
if (leaving_row != kInvalidRow) {
Fractional dir = -direction_[leaving_row] * step;
is_degenerate =
(dir == 0.0) ||
(dir > 0.0 && variable_values_.Get(leaving_col) >= target_bound) ||
(dir < 0.0 && variable_values_.Get(leaving_col) <= target_bound);
// If the iteration is not degenerate, the leaving variable should go to
// its exact target bound (it is how the step is computed).
if (!is_degenerate) {
DCHECK_EQ(step, ComputeStepToMoveBasicVariableToBound(leaving_row,
target_bound));
}
}
variable_values_.UpdateOnPivoting(direction_, entering_col, step);
if (leaving_row != kInvalidRow) {
update_row_.ComputeUpdateRow(leaving_row);
if (!is_degenerate) {
// On a non-degenerate iteration, the leaving variable should be at its
// exact bound. This corrects an eventual small numerical error since
// 'value + direction * step' where step is
// '(target_bound - value) / direction'
// may be slighlty different from target_bound.
variable_values_.Set(leaving_col, target_bound);
}
GLOP_RETURN_IF_ERROR(
UpdateAndPivot(entering_col, leaving_row, target_bound));
IF_STATS_ENABLED({
if (is_degenerate) {
timer.AlsoUpdate(&iteration_stats_.degenerate);
} else {
timer.AlsoUpdate(&iteration_stats_.normal);
}
});
} else {
// Snap the super-basic variable to its bound. Note that
// variable_values_.UpdateOnPivoting() should already be close to that but
// here we make sure it is exact and remove any small numerical errors.
if (variables_info_.GetTypeRow()[entering_col] ==
VariableType::UNCONSTRAINED) {
variable_values_.Set(entering_col, 0.0);
} else if (step > 0.0) {
SetNonBasicVariableStatusAndDeriveValue(entering_col,
VariableStatus::AT_UPPER_BOUND);
} else if (step < 0.0) {
SetNonBasicVariableStatusAndDeriveValue(entering_col,
VariableStatus::AT_LOWER_BOUND);
}
IF_STATS_ENABLED(timer.AlsoUpdate(&iteration_stats_.bound_flip));
}
++num_iterations_;
}
if (!super_basic_cols.empty() > 0 &&
(parameters_.log_search_progress() || VLOG_IS_ON(1))) {
LOG(INFO) << "Push terminated early with " << super_basic_cols.size()
<< " super-basic variables remaining.";
}
// TODO(user): What status should be returned if the time limit is hit?
// If the optimization phase finished, then OPTIMAL is technically correct
// but also misleading.
return Status::OK();
}
ColIndex RevisedSimplex::SlackColIndex(RowIndex row) const {
DCHECK_ROW_BOUNDS(row);
return first_slack_col_ + RowToColIndex(row);
@@ -3208,42 +3391,57 @@ void RevisedSimplex::PropagateParameters() {
update_row_.SetParameters(parameters_);
}
void RevisedSimplex::DisplayIterationInfo() const {
void RevisedSimplex::DisplayIterationInfo() {
if (parameters_.log_search_progress() || VLOG_IS_ON(1)) {
Fractional objective;
std::string name;
if (!feasibility_phase_) {
// Note that in the dual phase II, ComputeObjectiveValue() is also
// computing the dual objective even if it uses the variable values. This
// is because if we modify the bounds to make the problem primal-feasible,
// we are at the optimal and hence the two objectives are the same.
objective = ComputeInitialProblemObjectiveValue();
name = "objective";
} else if (parameters_.use_dual_simplex()) {
if (parameters_.use_dedicated_dual_feasibility_algorithm()) {
objective = reduced_costs_.ComputeSumOfDualInfeasibilities();
} else {
// The internal objective of the transformed problem is the negation
// of the sum of the dual infeasibility of the original problem.
objective = -PreciseScalarProduct(
objective_, Transpose(variable_values_.GetDenseRow()));
}
name = "sum_dual_infeasibilities";
} else {
objective = variable_values_.ComputeSumOfPrimalInfeasibilities();
name = "sum_primal_infeasibilities";
int iter = 0;
std::string phase_name;
switch (phase_) {
case Phase::FEASIBILITY:
phase_name = "Feasibility";
iter = num_iterations_;
if (parameters_.use_dual_simplex()) {
if (parameters_.use_dedicated_dual_feasibility_algorithm()) {
objective = reduced_costs_.ComputeSumOfDualInfeasibilities();
} else {
// The internal objective of the transformed problem is the negation
// of the sum of the dual infeasibility of the original problem.
objective = -PreciseScalarProduct(
objective_, Transpose(variable_values_.GetDenseRow()));
}
name = "sum_dual_infeasibilities";
} else {
objective = variable_values_.ComputeSumOfPrimalInfeasibilities();
name = "sum_primal_infeasibilities";
}
break;
case Phase::OPTIMIZATION:
phase_name = "Optimization";
iter = num_iterations_ - num_feasibility_iterations_;
// Note that in the dual phase II, ComputeObjectiveValue() is also
// computing the dual objective even if it uses the variable values.
// This is because if we modify the bounds to make the problem
// primal-feasible, we are at the optimal and hence the two objectives
// are the same.
objective = ComputeInitialProblemObjectiveValue();
name = "objective";
break;
case Phase::PUSH:
phase_name = "Push";
iter = num_iterations_ - num_feasibility_iterations_ -
num_optimization_iterations_;
name = "remaining_variables_to_push";
objective = ComputeNumberOfSuperBasicVariables();
}
const int iter = feasibility_phase_
? num_iterations_
: num_iterations_ - num_feasibility_iterations_;
LOG(INFO) << (feasibility_phase_ ? "Feasibility" : "Optimization")
<< " phase, iteration # " << iter << ", " << name << " = "
<< absl::StrFormat("%.15E", objective);
LOG(INFO) << phase_name << " phase, iteration # " << iter << ", " << name
<< " = " << absl::StrFormat("%.15E", objective);
}
}
void RevisedSimplex::DisplayErrors() const {
void RevisedSimplex::DisplayErrors() {
if (parameters_.log_search_progress() || VLOG_IS_ON(1)) {
LOG(INFO) << "Primal infeasibility (bounds) = "
<< variable_values_.ComputeMaximumPrimalInfeasibility();

View File

@@ -273,6 +273,8 @@ class RevisedSimplex {
IntegerDistribution num_perfect_ties;
};
enum class Phase { FEASIBILITY, OPTIMIZATION, PUSH };
// Propagates parameters_ to all the other classes that need it.
//
// TODO(user): Maybe a better design is for them to have a reference to a
@@ -299,10 +301,10 @@ class RevisedSimplex {
std::string SimpleVariableInfo(ColIndex col) const;
// Displays a short string with the current iteration and objective value.
void DisplayIterationInfo() const;
void DisplayIterationInfo();
// Displays the error bounds of the current solution.
void DisplayErrors() const;
void DisplayErrors();
// Displays the status of the variables.
void DisplayInfoOnVariables() const;
@@ -416,6 +418,11 @@ class RevisedSimplex {
// the coefficients are zero.
ColIndex ComputeNumberOfEmptyColumns();
// Returns the number of super-basic variables. These are non-basic variables
// that are not at their bounds (if they have bounds), or non-basic free
// variables that are not at zero.
int ComputeNumberOfSuperBasicVariables() const;
// This method transforms a basis for the first phase, with the optimal
// value at zero, into a feasible basis for the initial problem, thus
// preparing the execution of phase-II of the algorithm.
@@ -565,15 +572,20 @@ class RevisedSimplex {
// every component can take advantage of it.
Status RefactorizeBasisIfNeeded(bool* refactorize);
// Minimize the objective function, be it for satisfiability or for
// optimization. Used by Solve().
ABSL_MUST_USE_RESULT Status Minimize(TimeLimit* time_limit);
// Main iteration loop of the primal simplex.
ABSL_MUST_USE_RESULT Status PrimalMinimize(TimeLimit* time_limit);
// Same as Minimize() for the dual simplex algorithm.
// TODO(user): remove duplicate code between the two functions.
// Main iteration loop of the dual simplex.
ABSL_MUST_USE_RESULT Status DualMinimize(bool feasibility_phase,
TimeLimit* time_limit);
// Pushes all super-basic variables to bounds (if applicable) or to zero (if
// unconstrained). This is part of a "crossover" procedure to find a vertex
// solution given a (near) optimal solution. Assumes that Minimize() or
// DualMinimize() has already run, i.e., that we are at an optimal solution
// within numerical tolerances.
ABSL_MUST_USE_RESULT Status PrimalPush(TimeLimit* time_limit);
// Experimental. This is useful in a MIP context. It performs a few degenerate
// pivot to try to mimize the fractionality of the optimal basis.
//
@@ -600,15 +612,15 @@ class RevisedSimplex {
ProblemStatus problem_status_;
// Current number of rows in the problem.
RowIndex num_rows_;
RowIndex num_rows_ = RowIndex(0);
// Current number of columns in the problem.
ColIndex num_cols_;
ColIndex num_cols_ = ColIndex(0);
// Index of the first slack variable in the input problem. We assume that all
// variables with index greater or equal to first_slack_col_ are slack
// variables.
ColIndex first_slack_col_;
ColIndex first_slack_col_ = ColIndex(0);
// We're using vectors after profiling and looking at the generated assembly
// it's as fast as std::unique_ptr as long as the size is properly reserved
@@ -714,31 +726,37 @@ class RevisedSimplex {
std::vector<ColIndex> bound_flip_candidates_;
// Total number of iterations performed.
uint64_t num_iterations_;
uint64_t num_iterations_ = 0;
// Number of iterations performed during the first (feasibility) phase.
uint64_t num_feasibility_iterations_;
uint64_t num_feasibility_iterations_ = 0;
// Number of iterations performed during the second (optimization) phase.
uint64_t num_optimization_iterations_;
uint64_t num_optimization_iterations_ = 0;
// Number of iterations performed during the push/crossover phase.
uint64_t num_push_iterations_ = 0;
// Deterministic time for DualPhaseIUpdatePriceOnReducedCostChange().
int64_t num_update_price_operations_ = 0;
// Total time spent in Solve().
double total_time_;
double total_time_ = 0.0;
// Time spent in the first (feasibility) phase.
double feasibility_time_;
double feasibility_time_ = 0.0;
// Time spent in the second (optimization) phase.
double optimization_time_;
double optimization_time_ = 0.0;
// Time spent in the push/crossover phase.
double push_time_ = 0.0;
// The internal deterministic time during the most recent call to
// RevisedSimplex::AdvanceDeterministicTime.
double last_deterministic_time_update_;
double last_deterministic_time_update_ = 0.0;
// Statistics about the iterations done by Minimize().
// Statistics about the iterations done by PrimalMinimize().
IterationStats iteration_stats_;
mutable RatioTestStats ratio_test_stats_;
@@ -763,8 +781,8 @@ class RevisedSimplex {
// Number of degenerate iterations made just before the current iteration.
int num_consecutive_degenerate_iterations_;
// Indicate if we are in the feasibility_phase (1st phase) or not.
bool feasibility_phase_;
// Indicate the current phase of the solve.
Phase phase_ = Phase::FEASIBILITY;
// Indicates whether simplex ended due to the objective limit being reached.
// Note that it's not enough to compare the final objective value with the