[GLOP] add solution polishing to increase its integrality; [CP-SAT] new stats exported, fix java samples, add quick_restart_no_lp to search diversification

This commit is contained in:
Laurent Perron
2020-11-16 08:44:14 +01:00
parent fb0e490af3
commit 94d61a273b
16 changed files with 291 additions and 11 deletions

View File

@@ -282,6 +282,13 @@ Status RevisedSimplex::Solve(const LinearProgram& lp, TimeLimit* time_limit) {
problem_status_ == ProblemStatus::DUAL_FEASIBLE ||
basis_factorization_.IsRefactorized());
// If SetIntegralityScale() was called, we preform a polish operation.
if (!integrality_scale_.empty() &&
problem_status_ == ProblemStatus::OPTIMAL) {
reduced_costs_.MaintainDualInfeasiblePositions(true);
GLOP_RETURN_IF_ERROR(Polish(time_limit));
}
// Remove the bound and cost shifts (or perturbations).
//
// Note(user): Currently, we never do both at the same time, so we could
@@ -2335,6 +2342,137 @@ Status RevisedSimplex::RefactorizeBasisIfNeeded(bool* refactorize) {
return Status::OK();
}
void RevisedSimplex::SetIntegralityScale(ColIndex col, Fractional scale) {
if (col >= integrality_scale_.size()) {
integrality_scale_.resize(col + 1, 0.0);
}
integrality_scale_[col] = scale;
}
Status RevisedSimplex::Polish(TimeLimit* time_limit) {
GLOP_RETURN_ERROR_IF_NULL(time_limit);
Cleanup update_deterministic_time_on_return(
[this, time_limit]() { AdvanceDeterministicTime(time_limit); });
// Get all non-basic variables with a reduced costs close to zero.
// Note that because we only choose entering candidate with a cost of zero,
// this set will not change (modulo epsilons).
const DenseRow& rc = reduced_costs_.GetReducedCosts();
std::vector<ColIndex> candidates;
for (const ColIndex col : variables_info_.GetNotBasicBitRow()) {
if (!variables_info_.GetIsRelevantBitRow()[col]) continue;
if (std::abs(rc[col]) < 1e-9) candidates.push_back(col);
}
bool refactorize = false;
int num_pivots = 0;
Fractional total_gain = 0.0;
for (int i = 0; i < 10; ++i) {
AdvanceDeterministicTime(time_limit);
if (time_limit->LimitReached()) break;
if (num_pivots >= 5) break;
if (candidates.empty()) break;
// Pick a random one and remove it from the list.
const int index =
std::uniform_int_distribution<int>(0, candidates.size() - 1)(random_);
const ColIndex entering_col = candidates[index];
std::swap(candidates[index], candidates.back());
candidates.pop_back();
// We need the entering variable to move in the correct direction.
Fractional fake_rc = 1.0;
if (!variables_info_.GetCanDecreaseBitRow()[entering_col]) {
CHECK(variables_info_.GetCanIncreaseBitRow()[entering_col]);
fake_rc = -1.0;
}
// Compute the direction and by how much we can move along it.
GLOP_RETURN_IF_ERROR(RefactorizeBasisIfNeeded(&refactorize));
ComputeDirection(entering_col);
Fractional step_length;
RowIndex leaving_row;
Fractional target_bound;
bool local_refactorize = false;
GLOP_RETURN_IF_ERROR(
ChooseLeavingVariableRow(entering_col, fake_rc, &local_refactorize,
&leaving_row, &step_length, &target_bound));
if (local_refactorize) continue;
if (step_length == kInfinity || step_length == -kInfinity) continue;
if (std::abs(step_length) <= 1e-6) continue;
if (leaving_row != kInvalidRow && std::abs(direction_[leaving_row]) < 0.1) {
continue;
}
const Fractional step = (fake_rc > 0.0) ? -step_length : step_length;
// Evaluate if pivot reduce the fractionality of the basis.
//
// TODO(user): Count with more weight variable with a small domain, i.e.
// binary variable, compared to a variable in [0, 1k] ?
const auto get_diff = [this](ColIndex col, Fractional old_value,
Fractional new_value) {
if (col >= integrality_scale_.size() || integrality_scale_[col] == 0.0) {
return 0.0;
}
const Fractional s = integrality_scale_[col];
return (std::abs(new_value * s - std::round(new_value * s)) -
std::abs(old_value * s - std::round(old_value * s)));
};
Fractional diff = get_diff(entering_col, variable_values_.Get(entering_col),
variable_values_.Get(entering_col) + step);
for (const auto e : direction_) {
const ColIndex col = basis_[e.row()];
const Fractional old_value = variable_values_.Get(col);
const Fractional new_value = old_value - e.coefficient() * step;
diff += get_diff(col, old_value, new_value);
}
// Ignore low decrease in integrality.
if (diff > -1e-2) continue;
total_gain -= diff;
// We perform the change.
num_pivots++;
variable_values_.UpdateOnPivoting(direction_, entering_col, step);
// This is a bound flip of the entering column.
if (leaving_row == kInvalidRow) {
if (step > 0.0) {
SetNonBasicVariableStatusAndDeriveValue(entering_col,
VariableStatus::AT_UPPER_BOUND);
} else if (step < 0.0) {
SetNonBasicVariableStatusAndDeriveValue(entering_col,
VariableStatus::AT_LOWER_BOUND);
}
reduced_costs_.SetAndDebugCheckThatColumnIsDualFeasible(entering_col);
continue;
}
// Perform the pivot.
const ColIndex leaving_col = basis_[leaving_row];
update_row_.ComputeUpdateRow(leaving_row);
primal_edge_norms_.UpdateBeforeBasisPivot(
entering_col, leaving_col, leaving_row, direction_, &update_row_);
reduced_costs_.UpdateBeforeBasisPivot(entering_col, leaving_row, direction_,
&update_row_);
const Fractional dir = -direction_[leaving_row] * step;
const bool is_degenerate =
(dir == 0.0) ||
(dir > 0.0 && variable_values_.Get(leaving_col) >= target_bound) ||
(dir < 0.0 && variable_values_.Get(leaving_col) <= target_bound);
if (!is_degenerate) {
variable_values_.Set(leaving_col, target_bound);
}
GLOP_RETURN_IF_ERROR(
UpdateAndPivot(entering_col, leaving_row, target_bound));
}
VLOG(1) << "Polish num_pivots: " << num_pivots << " gain:" << total_gain;
return Status::OK();
}
// Minimizes c.x subject to A.x = 0 where A is an mxn-matrix, c an n-vector, and
// x an n-vector.
//

View File

@@ -244,6 +244,12 @@ class RevisedSimplex {
void ComputeBasicVariablesForState(const LinearProgram& linear_program,
const BasisState& state);
// This is used in a MIP context to polish the final basis. We assume that the
// columns for which SetIntegralityScale() has been called correspond to
// integral variable once multiplied by the given factor.
void ClearIntegralityScales() { integrality_scale_.clear(); }
void SetIntegralityScale(ColIndex col, Fractional scale);
private:
// Propagates parameters_ to all the other classes that need it.
//
@@ -550,6 +556,16 @@ class RevisedSimplex {
// TODO(user): remove duplicate code between the two functions.
ABSL_MUST_USE_RESULT Status DualMinimize(TimeLimit* time_limit);
// Experimental. This is useful in a MIP context. It performs a few degenerate
// pivot to try to mimize the fractionality of the optimal basis.
//
// We assume that the columns for which SetIntegralityScale() has been called
// correspond to integral variable once scaled by the given factor.
//
// I could only find slides for the reference of this "LP Solution Polishing
// to improve MIP Performance", Matthias Miltenberger, Zuse Institute Berlin.
ABSL_MUST_USE_RESULT Status Polish(TimeLimit* time_limit);
// Utility functions to return the current ColIndex of the slack column with
// given number. Note that currently, such columns are always present in the
// internal representation of a linear program.
@@ -781,6 +797,9 @@ class RevisedSimplex {
// A random number generator.
random_engine_t random_;
// This is used by Polish().
DenseRow integrality_scale_;
DISALLOW_COPY_AND_ASSIGN(RevisedSimplex);
};

View File

@@ -94,6 +94,7 @@ cc_library(
":cp_model_loader",
":cp_model_utils",
":integer",
":linear_programming_constraint",
":model",
":sat_base",
"//ortools/base",

View File

@@ -562,7 +562,7 @@ enum CpSolverStatus {
//
// TODO(user): support returning multiple solutions. Look at the Stubby
// streaming API as we probably wants to get them as they are found.
// Next id: 24
// Next id: 26
message CpSolverResponse {
// The status of the solve.
CpSolverStatus status = 1;
@@ -638,6 +638,9 @@ message CpSolverResponse {
int64 num_branches = 12;
int64 num_binary_propagations = 13;
int64 num_integer_propagations = 14;
int64 num_restarts = 24;
int64 num_lp_iterations = 25;
double wall_time = 15;
double user_time = 16;
double deterministic_time = 17;

View File

@@ -321,6 +321,23 @@ std::vector<SatParameters> GetDiverseSetOfParameters(
strategies["core"] = new_params;
}
// It can be interesting to try core and lp.
{
SatParameters new_params = base_params;
new_params.set_search_branching(SatParameters::AUTOMATIC_SEARCH);
new_params.set_optimize_with_core(true);
new_params.set_linearization_level(1);
strategies["core_default_lp"] = new_params;
}
{
SatParameters new_params = base_params;
new_params.set_search_branching(SatParameters::AUTOMATIC_SEARCH);
new_params.set_optimize_with_core(true);
new_params.set_linearization_level(2);
strategies["core_max_lp"] = new_params;
}
// Search variation.
{
SatParameters new_params = base_params;
@@ -334,6 +351,11 @@ std::vector<SatParameters> GetDiverseSetOfParameters(
SatParameters::PORTFOLIO_WITH_QUICK_RESTART_SEARCH);
strategies["quick_restart"] = new_params;
new_params.set_search_branching(
SatParameters::PORTFOLIO_WITH_QUICK_RESTART_SEARCH);
new_params.set_linearization_level(0);
strategies["quick_restart_no_lp"] = new_params;
// We force the max lp here too.
new_params.set_linearization_level(2);
new_params.set_search_branching(SatParameters::LP_SEARCH);
@@ -382,11 +404,15 @@ std::vector<SatParameters> GetDiverseSetOfParameters(
names.push_back("no_lp");
names.push_back("max_lp");
if (cp_model.objective().vars_size() > 1) names.push_back("core");
// TODO(user): Experiment with core and LP.
// Only add this strategy if we have enough worker left for LNS.
if (num_workers > 8 || base_params.interleave_search()) {
names.push_back("quick_restart");
}
if (num_workers > 10) {
names.push_back("quick_restart_no_lp");
}
} else {
names.push_back("default_lp");
if (cp_model.search_strategy_size() > 0) names.push_back("fixed");
@@ -394,6 +420,9 @@ std::vector<SatParameters> GetDiverseSetOfParameters(
names.push_back("no_lp");
names.push_back("max_lp");
names.push_back("quick_restart");
if (num_workers > 10) {
names.push_back("quick_restart_no_lp");
}
}
// Creates the diverse set of parameters with names and seed. We remove the

View File

@@ -307,6 +307,9 @@ std::string CpSolverResponseStats(const CpSolverResponse& response,
"\npropagations: ", response.num_binary_propagations());
absl::StrAppend(
&result, "\ninteger_propagations: ", response.num_integer_propagations());
absl::StrAppend(&result, "\nrestarts: ", response.num_restarts());
absl::StrAppend(&result, "\nlp_iterations: ", response.num_lp_iterations());
absl::StrAppend(&result, "\nwalltime: ", response.wall_time());
absl::StrAppend(&result, "\nusertime: ", response.user_time());
absl::StrAppend(&result,

View File

@@ -95,6 +95,9 @@ bool IntegerSumLE::Propagate() {
// Save the current sum of fixed variables.
if (is_registered_) {
rev_integer_value_repository_->SaveState(&rev_lb_fixed_vars_);
} else {
rev_num_fixed_vars_ = 0;
rev_lb_fixed_vars_ = 0;
}
// Compute the new lower bound and update the reversible structures.

View File

@@ -183,6 +183,11 @@ LinearProgrammingConstraint::LinearProgrammingConstraint(Model* model)
LinearProgrammingConstraint::~LinearProgrammingConstraint() {
VLOG(1) << "Total number of simplex iterations: "
<< total_num_simplex_iterations_;
for (int i = 0; i < num_solves_by_status_.size(); ++i) {
if (num_solves_by_status_[i] == 0) continue;
VLOG(1) << "#" << glop::ProblemStatus(i) << " : "
<< num_solves_by_status_[i];
}
}
void LinearProgrammingConstraint::AddLinearConstraint(
@@ -296,9 +301,28 @@ bool LinearProgrammingConstraint::CreateLpFromConstraintManager() {
for (int i = 0; i < integer_variables_.size(); ++i) {
CHECK_EQ(glop::ColIndex(i), lp_data_.CreateNewVariable());
}
// We remove fixed variables from the objective. This should help the LP
// scaling, but also our integer reason computation.
int new_size = 0;
objective_infinity_norm_ = 0;
for (const auto entry : integer_objective_) {
const IntegerVariable var = integer_variables_[entry.first.value()];
if (integer_trail_->IsFixedAtLevelZero(var)) {
integer_objective_offset_ +=
entry.second * integer_trail_->LevelZeroLowerBound(var);
continue;
}
objective_infinity_norm_ =
std::max(objective_infinity_norm_, IntTypeAbs(entry.second));
integer_objective_[new_size++] = entry;
lp_data_.SetObjectiveCoefficient(entry.first, ToDouble(entry.second));
}
objective_infinity_norm_ =
std::max(objective_infinity_norm_, IntTypeAbs(integer_objective_offset_));
integer_objective_.resize(new_size);
lp_data_.SetObjectiveOffset(ToDouble(integer_objective_offset_));
for (const LinearConstraintInternal& ct : integer_lp_) {
const ConstraintIndex row = lp_data_.CreateNewConstraint();
lp_data_.SetConstraintBounds(row, ToDouble(ct.lb), ToDouble(ct.ub));
@@ -322,14 +346,29 @@ bool LinearProgrammingConstraint::CreateLpFromConstraintManager() {
lp_data_.SetVariableBounds(glop::ColIndex(i), lb, ub);
}
// TODO(user): Better result if we remove fixed variables from the objective?
// Another option, because we have an idea of the actual optimal value as we
// do more and more solve, is that we can scale according to this value.
// TODO(user): As we have an idea of the LP optimal after the first solves,
// maybe we can adapt the scaling accordingly.
glop::GlopParameters params;
params.set_cost_scaling(glop::GlopParameters::MEAN_COST_SCALING);
scaler_.Scale(params, &lp_data_);
UpdateBoundsOfLpVariables();
// Set the information for the step to polish the LP basis. All our variables
// are integer, but for now, we just try to minimize the fractionality of the
// binary variables.
if (sat_parameters_.polish_lp_solution()) {
simplex_.ClearIntegralityScales();
for (int i = 0; i < num_vars; ++i) {
const IntegerVariable cp_var = integer_variables_[i];
const IntegerValue lb = integer_trail_->LevelZeroLowerBound(cp_var);
const IntegerValue ub = integer_trail_->LevelZeroUpperBound(cp_var);
if (lb != 0 || ub != 1) continue;
simplex_.SetIntegralityScale(
glop::ColIndex(i),
1.0 / scaler_.VariableScalingFactor(glop::ColIndex(i)));
}
}
lp_data_.NotifyThatColumnsAreClean();
lp_data_.AddSlackVariablesWhereNecessary(false);
VLOG(1) << "LP relaxation: " << lp_data_.GetDimensionString() << ". "
@@ -624,6 +663,16 @@ bool LinearProgrammingConstraint::SolveLp() {
<< average_degeneracy_.CurrentAverage();
}
const int status_as_int = static_cast<int>(simplex_.GetProblemStatus());
if (status_as_int >= num_solves_by_status_.size()) {
num_solves_by_status_.resize(status_as_int + 1);
}
num_solves_by_status_[status_as_int]++;
VLOG(2) << "lvl:" << trail_->CurrentDecisionLevel() << " "
<< simplex_.GetProblemStatus()
<< " iter:" << simplex_.GetNumberOfIterations()
<< " obj:" << simplex_.GetObjectiveValue();
if (simplex_.GetProblemStatus() == glop::ProblemStatus::OPTIMAL) {
lp_solution_is_set_ = true;
lp_solution_level_ = trail_->CurrentDecisionLevel();
@@ -1996,6 +2045,7 @@ bool LinearProgrammingConstraint::ExactLpReasonning() {
}
CHECK(tmp_scattered_vector_.AddLinearExpressionMultiple(obj_scale,
integer_objective_));
CHECK(AddProductTo(-obj_scale, integer_objective_offset_, &rc_ub));
AdjustNewLinearConstraint(&integer_multipliers, &tmp_scattered_vector_,
&rc_ub);

View File

@@ -214,6 +214,10 @@ class LinearProgrammingConstraint : public PropagatorInterface,
return average_degeneracy_.CurrentAverage();
}
int64 total_num_simplex_iterations() const {
return total_num_simplex_iterations_;
}
private:
// Helper methods for branching. Returns true if branching on the given
// variable helps with more propagation or finds a conflict.
@@ -384,6 +388,7 @@ class LinearProgrammingConstraint : public PropagatorInterface,
LinearExpression terms;
};
LinearExpression integer_objective_;
IntegerValue integer_objective_offset_ = IntegerValue(0);
IntegerValue objective_infinity_norm_ = IntegerValue(0);
gtl::ITIVector<glop::RowIndex, LinearConstraintInternal> integer_lp_;
gtl::ITIVector<glop::RowIndex, IntegerValue> infinity_norms_;
@@ -505,6 +510,9 @@ class LinearProgrammingConstraint : public PropagatorInterface,
// Sum of all simplex iterations performed by this class. This is useful to
// test the incrementality and compare to other solvers.
int64 total_num_simplex_iterations_ = 0;
// Some stats on the LP statuses encountered.
std::vector<int64> num_solves_by_status_;
};
// A class that stores which LP propagator is associated to each variable.

View File

@@ -41,7 +41,7 @@ public class RabbitsAndPheasantsSat {
CpSolver solver = new CpSolver();
CpSolverStatus status = solver.solve(model);
if (status == CpSolverStatus.FEASIBLE) {
if (status == CpSolverStatus.OPTIMAL) {
System.out.println(solver.value(r) + " rabbits, and " + solver.value(p) + " pheasants");
}
}

View File

@@ -49,7 +49,7 @@ public class SimpleSatProgram {
CpSolverStatus status = solver.solve(model);
// [END solve]
if (status == CpSolverStatus.FEASIBLE) {
if (status == CpSolverStatus.OPTIMAL) {
System.out.println("x = " + solver.value(x));
System.out.println("y = " + solver.value(y));
System.out.println("z = " + solver.value(z));

View File

@@ -39,7 +39,7 @@ public class SolveWithTimeLimitSampleSat {
solver.getParameters().setMaxTimeInSeconds(10.0);
CpSolverStatus status = solver.solve(model);
if (status == CpSolverStatus.FEASIBLE) {
if (status == CpSolverStatus.OPTIMAL) {
System.out.println("x = " + solver.value(x));
System.out.println("y = " + solver.value(y));
System.out.println("z = " + solver.value(z));

View File

@@ -21,7 +21,7 @@ option java_multiple_files = true;
// Contains the definitions for all the sat algorithm parameters and their
// default values.
//
// NEXT TAG: 175
// NEXT TAG: 176
message SatParameters {
// In some context, like in a portfolio of search, it makes sense to name a
// given parameters set for logging purpose.
@@ -933,6 +933,12 @@ message SatParameters {
// stronger cuts.
optional bool use_implied_bounds = 144 [default = true];
// Whether we try to do a few degenerate iteration at the end of an LP solve
// to minimize the fractionality of the integer variable in the basis. This
// helps on some problems, but not so much on others. It also cost of bit of
// time to do such polish step.
optional bool polish_lp_solution = 175 [default = false];
// ==========================================================================
// MIP -> CP-SAT (i.e. IP with integer coeff) conversion parameters that are
// used by our automatic "scaling" algorithm.

View File

@@ -87,6 +87,8 @@ int64 SatSolver::num_propagations() const {
return trail_->NumberOfEnqueues() - counters_.num_branches;
}
int64 SatSolver::num_restarts() const { return counters_.num_restarts; }
double SatSolver::deterministic_time() const {
// Each of these counters mesure really basic operations. The weight are just
// an estimate of the operation complexity. Note that these counters are never
@@ -898,6 +900,9 @@ void SatSolver::Backtrack(int target_level) {
DCHECK_GE(target_level, 0);
DCHECK_LE(target_level, CurrentDecisionLevel());
// Any backtrack to the root from a positive one is counted as a restart.
if (target_level == 0) counters_.num_restarts++;
// Per the SatPropagator interface, this is needed before calling Untrail.
trail_->SetDecisionLevel(target_level);

View File

@@ -151,14 +151,14 @@ class SatSolver {
void SetAssignmentPreference(Literal literal, double weight) {
decision_policy_->SetAssignmentPreference(literal, weight);
}
std::vector<std::pair<Literal, double> > AllPreferences() const {
std::vector<std::pair<Literal, double>> AllPreferences() const {
return decision_policy_->AllPreferences();
}
void ResetDecisionHeuristic() {
return decision_policy_->ResetDecisionHeuristic();
}
void ResetDecisionHeuristicAndSetAllPreferences(
const std::vector<std::pair<Literal, double> >& prefs) {
const std::vector<std::pair<Literal, double>>& prefs) {
decision_policy_->ResetDecisionHeuristic();
for (const std::pair<Literal, double> p : prefs) {
decision_policy_->SetAssignmentPreference(p.first, p.second);
@@ -367,6 +367,11 @@ class SatSolver {
int64 num_failures() const;
int64 num_propagations() const;
// Note that we count the number of backtrack to level zero from a positive
// level. Those can corresponds to actual restarts, or conflicts that learn
// unit clauses or any other reason that trigger such backtrack.
int64 num_restarts() const;
// A deterministic number that should be correlated with the time spent in
// the Solve() function. The order of magnitude should be close to the time
// in seconds.
@@ -687,7 +692,7 @@ class SatSolver {
SatPropagator* last_propagator_ = nullptr;
// For the old, non-model interface.
std::vector<std::unique_ptr<SatPropagator> > owned_propagators_;
std::vector<std::unique_ptr<SatPropagator>> owned_propagators_;
// Keep track of all binary clauses so they can be exported.
bool track_binary_clauses_;
@@ -730,6 +735,7 @@ class SatSolver {
struct Counters {
int64 num_branches = 0;
int64 num_failures = 0;
int64 num_restarts = 0;
// Minimization stats.
int64 num_minimizations = 0;

View File

@@ -25,6 +25,7 @@
#include "ortools/sat/cp_model.pb.h"
#include "ortools/sat/cp_model_utils.h"
#include "ortools/sat/integer.h"
#include "ortools/sat/linear_programming_constraint.h"
#include "ortools/sat/model.h"
#include "ortools/sat/sat_base.h"
#include "ortools/util/time_limit.h"
@@ -520,12 +521,20 @@ void SharedResponseManager::SetStatsFromModelInternal(Model* model) {
best_response_.set_num_branches(sat_solver->num_branches());
best_response_.set_num_conflicts(sat_solver->num_failures());
best_response_.set_num_binary_propagations(sat_solver->num_propagations());
best_response_.set_num_restarts(sat_solver->num_restarts());
best_response_.set_num_integer_propagations(
integer_trail == nullptr ? 0 : integer_trail->num_enqueues());
auto* time_limit = model->Get<TimeLimit>();
best_response_.set_wall_time(time_limit->GetElapsedTime());
best_response_.set_deterministic_time(
time_limit->GetElapsedDeterministicTime());
int64 num_lp_iters = 0;
for (const LinearProgrammingConstraint* lp :
*model->GetOrCreate<LinearProgrammingConstraintCollection>()) {
num_lp_iters += lp->total_num_simplex_iterations();
}
best_response_.set_num_lp_iterations(num_lp_iters);
}
bool SharedResponseManager::ProblemIsSolved() const {