2018-11-10 18:00:53 +01:00
|
|
|
// Copyright 2010-2018 Google LLC
|
2017-03-28 16:11:06 +02:00
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
|
//
|
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
|
//
|
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
|
// limitations under the License.
|
|
|
|
|
|
2017-04-26 17:30:25 +02:00
|
|
|
#include "ortools/sat/linear_programming_constraint.h"
|
2017-03-28 16:11:06 +02:00
|
|
|
|
2017-07-27 11:28:55 -07:00
|
|
|
#include <cmath>
|
|
|
|
|
#include <limits>
|
|
|
|
|
#include <string>
|
|
|
|
|
|
2018-10-31 16:18:18 +01:00
|
|
|
#include "absl/container/flat_hash_map.h"
|
2017-04-26 17:30:25 +02:00
|
|
|
#include "ortools/base/commandlineflags.h"
|
2019-04-18 13:29:21 +02:00
|
|
|
#include "ortools/base/int128.h"
|
2018-06-08 16:40:43 +02:00
|
|
|
#include "ortools/base/int_type_indexed_vector.h"
|
2017-07-27 11:28:55 -07:00
|
|
|
#include "ortools/base/integral_types.h"
|
|
|
|
|
#include "ortools/base/logging.h"
|
|
|
|
|
#include "ortools/base/map_util.h"
|
|
|
|
|
#include "ortools/glop/parameters.pb.h"
|
2018-10-31 16:18:18 +01:00
|
|
|
#include "ortools/glop/preprocessor.h"
|
2017-07-27 11:28:55 -07:00
|
|
|
#include "ortools/glop/status.h"
|
2018-06-08 16:40:43 +02:00
|
|
|
#include "ortools/graph/strongly_connected_components.h"
|
2019-05-03 16:30:50 +02:00
|
|
|
#include "ortools/lp_data/lp_types.h"
|
2018-10-31 16:18:18 +01:00
|
|
|
#include "ortools/util/saturated_arithmetic.h"
|
2017-03-28 16:11:06 +02:00
|
|
|
|
|
|
|
|
namespace operations_research {
|
|
|
|
|
namespace sat {
|
|
|
|
|
|
2018-11-05 16:24:47 +01:00
|
|
|
using glop::ColIndex;
|
|
|
|
|
using glop::Fractional;
|
|
|
|
|
using glop::RowIndex;
|
|
|
|
|
|
2018-02-12 11:36:18 +01:00
|
|
|
const double LinearProgrammingConstraint::kCpEpsilon = 1e-4;
|
|
|
|
|
const double LinearProgrammingConstraint::kLpEpsilon = 1e-6;
|
2017-04-18 00:08:19 +02:00
|
|
|
|
2017-12-06 11:23:11 +01:00
|
|
|
// TODO(user): make SatParameters singleton too, otherwise changing them after
|
|
|
|
|
// a constraint was added will have no effect on this class.
|
2017-06-28 14:33:56 +02:00
|
|
|
LinearProgrammingConstraint::LinearProgrammingConstraint(Model* model)
|
2019-04-05 17:00:40 +02:00
|
|
|
: constraint_manager_(model),
|
|
|
|
|
sat_parameters_(*(model->GetOrCreate<SatParameters>())),
|
2018-12-21 13:59:58 +01:00
|
|
|
model_(model),
|
2017-12-06 11:23:11 +01:00
|
|
|
time_limit_(model->GetOrCreate<TimeLimit>()),
|
2017-10-18 15:19:19 +02:00
|
|
|
integer_trail_(model->GetOrCreate<IntegerTrail>()),
|
|
|
|
|
trail_(model->GetOrCreate<Trail>()),
|
2017-12-08 14:52:49 +01:00
|
|
|
model_heuristics_(model->GetOrCreate<SearchHeuristicsVector>()),
|
2018-02-12 11:36:18 +01:00
|
|
|
integer_encoder_(model->GetOrCreate<IntegerEncoder>()),
|
2018-12-10 17:33:20 +01:00
|
|
|
dispatcher_(model->GetOrCreate<LinearProgrammingDispatcher>()),
|
|
|
|
|
expanded_lp_solution_(
|
|
|
|
|
*model->GetOrCreate<LinearProgrammingConstraintLpSolution>()) {
|
2017-03-28 16:11:06 +02:00
|
|
|
// Tweak the default parameters to make the solve incremental.
|
|
|
|
|
glop::GlopParameters parameters;
|
|
|
|
|
parameters.set_use_dual_simplex(true);
|
|
|
|
|
simplex_.SetParameters(parameters);
|
|
|
|
|
}
|
|
|
|
|
|
2018-12-03 14:26:31 +01:00
|
|
|
void LinearProgrammingConstraint::AddLinearConstraint(
|
|
|
|
|
const LinearConstraint& ct) {
|
2017-03-28 16:11:06 +02:00
|
|
|
DCHECK(!lp_constraint_is_registered_);
|
2018-12-03 14:26:31 +01:00
|
|
|
constraint_manager_.Add(ct);
|
|
|
|
|
|
|
|
|
|
// We still create the mirror variable right away though.
|
|
|
|
|
//
|
|
|
|
|
// TODO(user): clean this up? Note that it is important that the variable
|
|
|
|
|
// in lp_data_ never changes though, so we can restart from the current
|
|
|
|
|
// lp solution and be incremental (even if the constraints changed).
|
|
|
|
|
for (const IntegerVariable var : ct.vars) {
|
|
|
|
|
GetOrCreateMirrorVariable(PositiveVariable(var));
|
|
|
|
|
}
|
2017-03-28 16:11:06 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
glop::ColIndex LinearProgrammingConstraint::GetOrCreateMirrorVariable(
|
2017-07-07 11:13:35 -07:00
|
|
|
IntegerVariable positive_variable) {
|
|
|
|
|
DCHECK(VariableIsPositive(positive_variable));
|
2019-03-12 17:41:14 +01:00
|
|
|
const auto it = mirror_lp_variable_.find(positive_variable);
|
|
|
|
|
if (it == mirror_lp_variable_.end()) {
|
2018-12-03 14:26:31 +01:00
|
|
|
const glop::ColIndex col(integer_variables_.size());
|
2018-01-10 13:21:06 +01:00
|
|
|
mirror_lp_variable_[positive_variable] = col;
|
2017-07-07 11:13:35 -07:00
|
|
|
integer_variables_.push_back(positive_variable);
|
2017-03-28 16:11:06 +02:00
|
|
|
lp_solution_.push_back(std::numeric_limits<double>::infinity());
|
2017-08-03 10:20:59 -07:00
|
|
|
lp_reduced_cost_.push_back(0.0);
|
|
|
|
|
(*dispatcher_)[positive_variable] = this;
|
2018-12-03 14:26:31 +01:00
|
|
|
|
|
|
|
|
const int index = std::max(positive_variable.value(),
|
|
|
|
|
NegationOf(positive_variable).value());
|
2018-12-04 14:36:46 +01:00
|
|
|
if (index >= expanded_lp_solution_.size()) {
|
|
|
|
|
expanded_lp_solution_.resize(index + 1, 0.0);
|
2018-12-03 14:26:31 +01:00
|
|
|
}
|
2018-01-10 13:21:06 +01:00
|
|
|
return col;
|
2017-03-28 16:11:06 +02:00
|
|
|
}
|
2019-03-12 17:41:14 +01:00
|
|
|
return it->second;
|
2017-03-28 16:11:06 +02:00
|
|
|
}
|
|
|
|
|
|
2017-06-28 14:33:56 +02:00
|
|
|
void LinearProgrammingConstraint::SetObjectiveCoefficient(IntegerVariable ivar,
|
2018-11-05 16:24:47 +01:00
|
|
|
IntegerValue coeff) {
|
2017-03-28 16:11:06 +02:00
|
|
|
CHECK(!lp_constraint_is_registered_);
|
|
|
|
|
objective_is_defined_ = true;
|
2017-07-07 11:13:35 -07:00
|
|
|
IntegerVariable pos_var = VariableIsPositive(ivar) ? ivar : NegationOf(ivar);
|
2018-11-05 16:24:47 +01:00
|
|
|
if (ivar != pos_var) coeff = -coeff;
|
|
|
|
|
|
2019-07-11 12:03:31 -07:00
|
|
|
constraint_manager_.SetObjectiveCoefficient(pos_var, coeff);
|
2018-11-05 16:24:47 +01:00
|
|
|
const glop::ColIndex col = GetOrCreateMirrorVariable(pos_var);
|
|
|
|
|
integer_objective_.push_back({col, coeff});
|
2019-03-01 11:11:36 +01:00
|
|
|
objective_infinity_norm_ =
|
|
|
|
|
std::max(objective_infinity_norm_, IntTypeAbs(coeff));
|
2017-03-28 16:11:06 +02:00
|
|
|
}
|
|
|
|
|
|
2018-12-03 14:26:31 +01:00
|
|
|
// TODO(user): As the search progress, some variables might get fixed. Exploit
|
|
|
|
|
// this to reduce the number of variables in the LP and in the
|
|
|
|
|
// ConstraintManager? We might also detect during the search that two variable
|
|
|
|
|
// are equivalent.
|
2019-07-12 10:22:51 -07:00
|
|
|
//
|
|
|
|
|
// TODO(user): On TSP/VRP with a lot of cuts, this can take 20% of the overall
|
|
|
|
|
// running time. We should be able to almost remove most of this from the
|
|
|
|
|
// profile by being more incremental (modulo LP scaling).
|
|
|
|
|
//
|
|
|
|
|
// TODO(user): A longer term idea for LP with a lot of variables is to not
|
|
|
|
|
// add all variables to each LP solve and do some "sifting". That can be useful
|
|
|
|
|
// for TSP for instance where the number of edges is large, but only a small
|
|
|
|
|
// fraction will be used in the optimal solution.
|
2018-12-03 14:26:31 +01:00
|
|
|
void LinearProgrammingConstraint::CreateLpFromConstraintManager() {
|
|
|
|
|
// Fill integer_lp_.
|
|
|
|
|
integer_lp_.clear();
|
2019-03-01 11:11:36 +01:00
|
|
|
infinity_norms_.clear();
|
2018-12-04 14:36:46 +01:00
|
|
|
const auto& all_constraints = constraint_manager_.AllConstraints();
|
|
|
|
|
for (const auto index : constraint_manager_.LpConstraints()) {
|
2019-07-15 10:26:19 -07:00
|
|
|
const LinearConstraint& ct = all_constraints[index].constraint;
|
2018-12-03 14:26:31 +01:00
|
|
|
integer_lp_.push_back(LinearConstraintInternal());
|
|
|
|
|
LinearConstraintInternal& new_ct = integer_lp_.back();
|
2018-12-04 14:36:46 +01:00
|
|
|
new_ct.lb = ct.lb;
|
|
|
|
|
new_ct.ub = ct.ub;
|
|
|
|
|
const int size = ct.vars.size();
|
2019-03-01 11:11:36 +01:00
|
|
|
IntegerValue infinity_norm(0);
|
|
|
|
|
if (ct.lb > kMinIntegerValue) {
|
|
|
|
|
infinity_norm = std::max(infinity_norm, IntTypeAbs(ct.lb));
|
|
|
|
|
}
|
|
|
|
|
if (ct.ub < kMaxIntegerValue) {
|
|
|
|
|
infinity_norm = std::max(infinity_norm, IntTypeAbs(ct.ub));
|
|
|
|
|
}
|
2018-12-03 14:26:31 +01:00
|
|
|
for (int i = 0; i < size; ++i) {
|
|
|
|
|
// We only use positive variable inside this class.
|
2018-12-04 14:36:46 +01:00
|
|
|
IntegerVariable var = ct.vars[i];
|
|
|
|
|
IntegerValue coeff = ct.coeffs[i];
|
2018-12-03 14:26:31 +01:00
|
|
|
if (!VariableIsPositive(var)) {
|
|
|
|
|
var = NegationOf(var);
|
|
|
|
|
coeff = -coeff;
|
2018-11-05 16:24:47 +01:00
|
|
|
}
|
2019-03-01 11:11:36 +01:00
|
|
|
infinity_norm = std::max(infinity_norm, IntTypeAbs(coeff));
|
2018-12-03 14:26:31 +01:00
|
|
|
new_ct.terms.push_back({GetOrCreateMirrorVariable(var), coeff});
|
2018-11-05 16:24:47 +01:00
|
|
|
}
|
2019-03-01 11:11:36 +01:00
|
|
|
infinity_norms_.push_back(infinity_norm);
|
2018-12-03 14:26:31 +01:00
|
|
|
|
|
|
|
|
// Important to keep lp_data_ "clean".
|
|
|
|
|
std::sort(new_ct.terms.begin(), new_ct.terms.end());
|
2018-11-05 16:24:47 +01:00
|
|
|
}
|
|
|
|
|
|
2018-12-03 14:26:31 +01:00
|
|
|
// Copy the integer_lp_ into lp_data_.
|
|
|
|
|
lp_data_.Clear();
|
|
|
|
|
for (int i = 0; i < integer_variables_.size(); ++i) {
|
|
|
|
|
CHECK_EQ(glop::ColIndex(i), lp_data_.CreateNewVariable());
|
|
|
|
|
}
|
|
|
|
|
for (const auto entry : integer_objective_) {
|
|
|
|
|
lp_data_.SetObjectiveCoefficient(entry.first, ToDouble(entry.second));
|
|
|
|
|
}
|
2018-11-05 16:24:47 +01:00
|
|
|
for (const LinearConstraintInternal& ct : integer_lp_) {
|
|
|
|
|
const ConstraintIndex row = lp_data_.CreateNewConstraint();
|
|
|
|
|
lp_data_.SetConstraintBounds(row, ToDouble(ct.lb), ToDouble(ct.ub));
|
|
|
|
|
for (const auto& term : ct.terms) {
|
|
|
|
|
lp_data_.SetCoefficient(row, term.first, ToDouble(term.second));
|
2017-06-28 14:33:56 +02:00
|
|
|
}
|
2017-03-28 16:11:06 +02:00
|
|
|
}
|
2019-07-12 10:22:51 -07:00
|
|
|
lp_data_.NotifyThatColumnsAreClean();
|
2018-11-05 16:24:47 +01:00
|
|
|
|
|
|
|
|
// Scale lp_data_.
|
2018-12-03 14:26:31 +01:00
|
|
|
scaler_.Clear();
|
2018-02-16 17:08:01 +01:00
|
|
|
Scale(&lp_data_, &scaler_, glop::GlopParameters::DEFAULT);
|
2017-07-12 11:38:46 -07:00
|
|
|
lp_data_.ScaleObjective();
|
2018-02-12 11:36:18 +01:00
|
|
|
|
|
|
|
|
// ScaleBounds() looks at both the constraints and variable bounds, so we
|
|
|
|
|
// initialize the LP variable bounds before scaling them.
|
|
|
|
|
//
|
|
|
|
|
// TODO(user): As part of the scaling, we may also want to shift the initial
|
|
|
|
|
// variable bounds so that each variable contain the value zero in their
|
|
|
|
|
// domain. Maybe just once and for all at the beginning.
|
|
|
|
|
bound_scaling_factor_ = 1.0;
|
|
|
|
|
UpdateBoundsOfLpVariables();
|
|
|
|
|
bound_scaling_factor_ = lp_data_.ScaleBounds();
|
2018-11-05 16:24:47 +01:00
|
|
|
|
2018-12-03 14:26:31 +01:00
|
|
|
lp_data_.NotifyThatColumnsAreClean();
|
2017-03-28 16:11:06 +02:00
|
|
|
lp_data_.AddSlackVariablesWhereNecessary(false);
|
2018-12-04 14:36:46 +01:00
|
|
|
VLOG(1) << "LP relaxation: " << lp_data_.GetDimensionString() << ". "
|
|
|
|
|
<< constraint_manager_.AllConstraints().size()
|
|
|
|
|
<< " Managed constraints.";
|
2018-12-03 14:26:31 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void LinearProgrammingConstraint::RegisterWith(Model* model) {
|
|
|
|
|
DCHECK(!lp_constraint_is_registered_);
|
|
|
|
|
lp_constraint_is_registered_ = true;
|
|
|
|
|
model->GetOrCreate<LinearProgrammingConstraintCollection>()->push_back(this);
|
|
|
|
|
|
2018-12-04 14:36:46 +01:00
|
|
|
// Note fdid, this is not really needed by should lead to better cache
|
|
|
|
|
// locality.
|
2018-12-03 14:26:31 +01:00
|
|
|
std::sort(integer_objective_.begin(), integer_objective_.end());
|
2018-12-04 14:36:46 +01:00
|
|
|
|
|
|
|
|
// Set the LP to its initial content.
|
|
|
|
|
if (!sat_parameters_.add_lp_constraints_lazily()) {
|
|
|
|
|
constraint_manager_.AddAllConstraintsToLp();
|
|
|
|
|
}
|
2018-12-03 14:26:31 +01:00
|
|
|
CreateLpFromConstraintManager();
|
2017-03-28 16:11:06 +02:00
|
|
|
|
2017-12-08 14:52:49 +01:00
|
|
|
GenericLiteralWatcher* watcher = model->GetOrCreate<GenericLiteralWatcher>();
|
2017-03-28 16:11:06 +02:00
|
|
|
const int watcher_id = watcher->Register(this);
|
|
|
|
|
const int num_vars = integer_variables_.size();
|
|
|
|
|
for (int i = 0; i < num_vars; i++) {
|
|
|
|
|
watcher->WatchIntegerVariable(integer_variables_[i], watcher_id, i);
|
|
|
|
|
}
|
2017-07-06 04:57:49 -07:00
|
|
|
if (objective_is_defined_) {
|
|
|
|
|
watcher->WatchUpperBound(objective_cp_, watcher_id);
|
|
|
|
|
}
|
2017-03-28 16:11:06 +02:00
|
|
|
watcher->SetPropagatorPriority(watcher_id, 2);
|
2019-03-25 11:26:14 +01:00
|
|
|
watcher->AlwaysCallAtLevelZero(watcher_id);
|
2017-12-08 14:52:49 +01:00
|
|
|
|
|
|
|
|
if (integer_variables_.size() >= 20) { // Do not use on small subparts.
|
|
|
|
|
auto* container = model->GetOrCreate<SearchHeuristicsVector>();
|
|
|
|
|
container->push_back(HeuristicLPPseudoCostBinary(model));
|
|
|
|
|
container->push_back(HeuristicLPMostInfeasibleBinary(model));
|
|
|
|
|
}
|
2018-01-10 13:21:06 +01:00
|
|
|
|
|
|
|
|
// Registering it with the trail make sure this class is always in sync when
|
|
|
|
|
// it is used in the decision heuristics.
|
|
|
|
|
integer_trail_->RegisterReversibleClass(this);
|
2018-12-21 13:59:58 +01:00
|
|
|
watcher->RegisterReversibleInt(watcher_id, &rev_optimal_constraints_size_);
|
2018-01-10 13:21:06 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void LinearProgrammingConstraint::SetLevel(int level) {
|
2018-12-21 13:59:58 +01:00
|
|
|
optimal_constraints_.resize(rev_optimal_constraints_size_);
|
2018-01-10 13:21:06 +01:00
|
|
|
if (lp_solution_is_set_ && level < lp_solution_level_) {
|
|
|
|
|
lp_solution_is_set_ = false;
|
|
|
|
|
}
|
2018-12-04 14:36:46 +01:00
|
|
|
|
|
|
|
|
// Special case for level zero, we "reload" any previously known optimal
|
|
|
|
|
// solution from that level.
|
|
|
|
|
//
|
|
|
|
|
// TODO(user): Keep all optimal solution in the current branch?
|
2019-01-09 11:50:34 +01:00
|
|
|
// TODO(user): Still try to add cuts/constraints though!
|
2018-12-04 14:36:46 +01:00
|
|
|
if (level == 0 && !level_zero_lp_solution_.empty()) {
|
|
|
|
|
lp_solution_is_set_ = true;
|
|
|
|
|
lp_solution_ = level_zero_lp_solution_;
|
|
|
|
|
lp_solution_level_ = 0;
|
|
|
|
|
for (int i = 0; i < lp_solution_.size(); i++) {
|
|
|
|
|
expanded_lp_solution_[integer_variables_[i]] = lp_solution_[i];
|
|
|
|
|
expanded_lp_solution_[NegationOf(integer_variables_[i])] =
|
|
|
|
|
-lp_solution_[i];
|
|
|
|
|
}
|
|
|
|
|
}
|
2017-03-28 16:11:06 +02:00
|
|
|
}
|
|
|
|
|
|
2017-10-18 11:09:13 +02:00
|
|
|
void LinearProgrammingConstraint::AddCutGenerator(CutGenerator generator) {
|
|
|
|
|
for (const IntegerVariable var : generator.vars) {
|
|
|
|
|
GetOrCreateMirrorVariable(VariableIsPositive(var) ? var : NegationOf(var));
|
|
|
|
|
}
|
|
|
|
|
cut_generators_.push_back(std::move(generator));
|
|
|
|
|
}
|
|
|
|
|
|
2017-03-28 16:11:06 +02:00
|
|
|
bool LinearProgrammingConstraint::IncrementalPropagate(
|
|
|
|
|
const std::vector<int>& watch_indices) {
|
2018-01-10 13:21:06 +01:00
|
|
|
if (!lp_solution_is_set_) return Propagate();
|
2018-12-04 14:36:46 +01:00
|
|
|
|
2019-01-10 15:56:33 +01:00
|
|
|
// At level zero, if there is still a chance to add cuts or lazy constraints,
|
|
|
|
|
// we re-run the LP.
|
|
|
|
|
if (trail_->CurrentDecisionLevel() == 0 && !lp_at_level_zero_is_final_) {
|
|
|
|
|
return Propagate();
|
|
|
|
|
}
|
|
|
|
|
|
2018-12-04 14:36:46 +01:00
|
|
|
// Check whether the change breaks the current LP solution. If it does, call
|
|
|
|
|
// Propagate() on the current LP.
|
2017-03-28 16:11:06 +02:00
|
|
|
for (const int index : watch_indices) {
|
2018-11-05 16:24:47 +01:00
|
|
|
const double lb =
|
|
|
|
|
ToDouble(integer_trail_->LowerBound(integer_variables_[index]));
|
|
|
|
|
const double ub =
|
|
|
|
|
ToDouble(integer_trail_->UpperBound(integer_variables_[index]));
|
2017-03-28 16:11:06 +02:00
|
|
|
const double value = lp_solution_[index];
|
2018-02-12 11:36:18 +01:00
|
|
|
if (value < lb - kCpEpsilon || value > ub + kCpEpsilon) return Propagate();
|
2017-03-28 16:11:06 +02:00
|
|
|
}
|
2018-12-04 14:36:46 +01:00
|
|
|
|
|
|
|
|
// TODO(user): The saved lp solution is still valid given the current variable
|
|
|
|
|
// bounds, so the LP optimal didn't change. However we might still want to add
|
|
|
|
|
// new cuts or new lazy constraints?
|
2018-12-21 13:59:58 +01:00
|
|
|
//
|
|
|
|
|
// TODO(user): Propagate the last optimal_constraint? Note that we need
|
|
|
|
|
// to be careful since the reversible int in IntegerSumLE are not registered.
|
|
|
|
|
// However, because we delete "optimalconstraints" on backtrack, we might not
|
|
|
|
|
// care.
|
2017-03-28 16:11:06 +02:00
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
2018-02-12 11:36:18 +01:00
|
|
|
glop::Fractional LinearProgrammingConstraint::CpToLpScalingFactor(
|
|
|
|
|
glop::ColIndex col) const {
|
|
|
|
|
return scaler_.col_scale(col) / bound_scaling_factor_;
|
|
|
|
|
}
|
|
|
|
|
glop::Fractional LinearProgrammingConstraint::LpToCpScalingFactor(
|
|
|
|
|
glop::ColIndex col) const {
|
|
|
|
|
return bound_scaling_factor_ / scaler_.col_scale(col);
|
|
|
|
|
}
|
|
|
|
|
|
2017-03-28 16:11:06 +02:00
|
|
|
glop::Fractional LinearProgrammingConstraint::GetVariableValueAtCpScale(
|
|
|
|
|
glop::ColIndex var) {
|
2018-02-12 11:36:18 +01:00
|
|
|
return simplex_.GetVariableValue(var) * LpToCpScalingFactor(var);
|
2017-03-28 16:11:06 +02:00
|
|
|
}
|
|
|
|
|
|
2017-08-03 10:20:59 -07:00
|
|
|
double LinearProgrammingConstraint::GetSolutionValue(
|
|
|
|
|
IntegerVariable variable) const {
|
2018-04-11 13:00:30 +02:00
|
|
|
return lp_solution_[gtl::FindOrDie(mirror_lp_variable_, variable).value()];
|
2017-08-03 10:20:59 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
double LinearProgrammingConstraint::GetSolutionReducedCost(
|
|
|
|
|
IntegerVariable variable) const {
|
2018-04-11 13:00:30 +02:00
|
|
|
return lp_reduced_cost_[gtl::FindOrDie(mirror_lp_variable_, variable)
|
|
|
|
|
.value()];
|
2017-08-03 10:20:59 -07:00
|
|
|
}
|
|
|
|
|
|
2018-02-12 11:36:18 +01:00
|
|
|
void LinearProgrammingConstraint::UpdateBoundsOfLpVariables() {
|
2017-03-28 16:11:06 +02:00
|
|
|
const int num_vars = integer_variables_.size();
|
|
|
|
|
for (int i = 0; i < num_vars; i++) {
|
|
|
|
|
const IntegerVariable cp_var = integer_variables_[i];
|
2018-11-05 16:24:47 +01:00
|
|
|
const double lb = ToDouble(integer_trail_->LowerBound(cp_var));
|
|
|
|
|
const double ub = ToDouble(integer_trail_->UpperBound(cp_var));
|
2018-02-12 11:36:18 +01:00
|
|
|
const double factor = CpToLpScalingFactor(glop::ColIndex(i));
|
2018-01-10 13:21:06 +01:00
|
|
|
lp_data_.SetVariableBounds(glop::ColIndex(i), lb * factor, ub * factor);
|
2017-03-28 16:11:06 +02:00
|
|
|
}
|
2018-02-12 11:36:18 +01:00
|
|
|
}
|
|
|
|
|
|
2018-12-03 14:26:31 +01:00
|
|
|
bool LinearProgrammingConstraint::SolveLp() {
|
2019-01-10 15:56:33 +01:00
|
|
|
if (trail_->CurrentDecisionLevel() == 0) {
|
|
|
|
|
lp_at_level_zero_is_final_ = false;
|
|
|
|
|
}
|
|
|
|
|
|
2018-12-03 14:26:31 +01:00
|
|
|
const auto status = simplex_.Solve(lp_data_, time_limit_);
|
|
|
|
|
if (!status.ok()) {
|
2019-05-16 16:50:11 +02:00
|
|
|
VLOG(1) << "The LP solver encountered an error: " << status.error_message();
|
2018-12-03 14:26:31 +01:00
|
|
|
simplex_.ClearStateForNextSolve();
|
|
|
|
|
return false;
|
|
|
|
|
}
|
2019-05-05 18:02:49 +02:00
|
|
|
average_degeneracy_.AddData(CalculateDegeneracy());
|
2019-05-03 16:30:50 +02:00
|
|
|
if (average_degeneracy_.CurrentAverage() >= 1000.0) {
|
|
|
|
|
VLOG(1) << "High average degeneracy: "
|
|
|
|
|
<< average_degeneracy_.CurrentAverage();
|
|
|
|
|
}
|
2018-12-03 14:26:31 +01:00
|
|
|
|
|
|
|
|
if (simplex_.GetProblemStatus() == glop::ProblemStatus::OPTIMAL) {
|
2018-12-04 14:36:46 +01:00
|
|
|
lp_solution_is_set_ = true;
|
|
|
|
|
lp_solution_level_ = trail_->CurrentDecisionLevel();
|
2018-12-03 14:26:31 +01:00
|
|
|
const int num_vars = integer_variables_.size();
|
|
|
|
|
for (int i = 0; i < num_vars; i++) {
|
|
|
|
|
const glop::Fractional value =
|
|
|
|
|
GetVariableValueAtCpScale(glop::ColIndex(i));
|
2018-12-04 14:36:46 +01:00
|
|
|
lp_solution_[i] = value;
|
|
|
|
|
expanded_lp_solution_[integer_variables_[i]] = value;
|
|
|
|
|
expanded_lp_solution_[NegationOf(integer_variables_[i])] = -value;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (lp_solution_level_ == 0) {
|
|
|
|
|
level_zero_lp_solution_ = lp_solution_;
|
2018-12-03 14:26:31 +01:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
2019-01-09 11:50:34 +01:00
|
|
|
LinearConstraint LinearProgrammingConstraint::ConvertToLinearConstraint(
|
|
|
|
|
const gtl::ITIVector<ColIndex, IntegerValue>& dense_vector,
|
|
|
|
|
IntegerValue upper_bound) {
|
|
|
|
|
LinearConstraint result;
|
|
|
|
|
for (ColIndex col(0); col < dense_vector.size(); ++col) {
|
|
|
|
|
const IntegerValue coeff = dense_vector[col];
|
|
|
|
|
if (coeff == 0) continue;
|
|
|
|
|
const IntegerVariable var = integer_variables_[col.value()];
|
|
|
|
|
result.vars.push_back(var);
|
|
|
|
|
result.coeffs.push_back(coeff);
|
|
|
|
|
}
|
|
|
|
|
result.lb = kMinIntegerValue;
|
|
|
|
|
result.ub = upper_bound;
|
|
|
|
|
return result;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
namespace {
|
|
|
|
|
|
|
|
|
|
// Returns false in case of overflow
|
|
|
|
|
bool AddLinearExpressionMultiple(
|
|
|
|
|
IntegerValue multiplier,
|
|
|
|
|
const std::vector<std::pair<ColIndex, IntegerValue>>& terms,
|
|
|
|
|
gtl::ITIVector<ColIndex, IntegerValue>* dense_vector) {
|
|
|
|
|
for (const std::pair<ColIndex, IntegerValue> term : terms) {
|
|
|
|
|
if (!AddProductTo(multiplier, term.second, &(*dense_vector)[term.first])) {
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
} // namespace
|
|
|
|
|
|
2019-01-17 16:12:52 +01:00
|
|
|
void LinearProgrammingConstraint::AddCutFromConstraints(
|
|
|
|
|
const std::string& name,
|
|
|
|
|
const std::vector<std::pair<RowIndex, IntegerValue>>& integer_multipliers) {
|
|
|
|
|
// This is initialized to a valid linear contraint (by taking linear
|
|
|
|
|
// combination of the LP rows) and will be transformed into a cut if
|
|
|
|
|
// possible.
|
|
|
|
|
//
|
|
|
|
|
// TODO(user): Ideally this linear combination should have only one
|
|
|
|
|
// fractional variable (basis_col). But because of imprecision, we get a
|
|
|
|
|
// bunch of fractional entry with small coefficient (relative to the one of
|
|
|
|
|
// basis_col). We try to handle that in IntegerRoundingCut(), but it might
|
|
|
|
|
// be better to add small multiple of the involved rows to get rid of them.
|
|
|
|
|
LinearConstraint cut;
|
|
|
|
|
{
|
|
|
|
|
gtl::ITIVector<ColIndex, IntegerValue> dense_cut;
|
|
|
|
|
IntegerValue cut_ub;
|
|
|
|
|
if (!ComputeNewLinearConstraint(
|
|
|
|
|
/*use_constraint_status=*/true, integer_multipliers, &dense_cut,
|
|
|
|
|
&cut_ub)) {
|
|
|
|
|
VLOG(1) << "Issue, overflow!";
|
|
|
|
|
return;
|
|
|
|
|
}
|
2019-03-01 11:11:36 +01:00
|
|
|
|
|
|
|
|
// Important: because we use integer_multipliers below, we cannot just
|
|
|
|
|
// divide by GCD or call PreventOverflow() here.
|
2019-01-17 16:12:52 +01:00
|
|
|
cut = ConvertToLinearConstraint(dense_cut, cut_ub);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// This should be tight!
|
2019-03-01 11:11:36 +01:00
|
|
|
if (std::abs(ComputeActivity(cut, expanded_lp_solution_) - ToDouble(cut.ub)) /
|
|
|
|
|
std::max(1.0, std::abs(ToDouble(cut.ub))) >
|
|
|
|
|
1e-2) {
|
2019-05-23 13:53:26 +02:00
|
|
|
VLOG(1) << "Cut not tight " << ComputeActivity(cut, expanded_lp_solution_)
|
2019-01-17 16:12:52 +01:00
|
|
|
<< " " << ToDouble(cut.ub);
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Fills data for IntegerRoundingCut().
|
|
|
|
|
//
|
|
|
|
|
// Note(user): we use the current bound here, so the reasonement will only
|
|
|
|
|
// produce locally valid cut if we call this at a non-root node. We could
|
|
|
|
|
// use the level zero bounds if we wanted to generate a globally valid cut
|
|
|
|
|
// at another level, but we will likely not genereate a constraint violating
|
|
|
|
|
// the current lp solution in that case.
|
|
|
|
|
std::vector<double> lp_values;
|
|
|
|
|
std::vector<IntegerValue> var_lbs;
|
|
|
|
|
std::vector<IntegerValue> var_ubs;
|
|
|
|
|
for (const IntegerVariable var : cut.vars) {
|
|
|
|
|
lp_values.push_back(expanded_lp_solution_[var]);
|
|
|
|
|
var_lbs.push_back(integer_trail_->LowerBound(var));
|
|
|
|
|
var_ubs.push_back(integer_trail_->UpperBound(var));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Add slack.
|
|
|
|
|
// definition: integer_lp_[row] + slack_row == bound;
|
|
|
|
|
const IntegerVariable first_slack(expanded_lp_solution_.size());
|
|
|
|
|
for (const auto pair : integer_multipliers) {
|
|
|
|
|
const RowIndex row = pair.first;
|
|
|
|
|
const IntegerValue coeff = pair.second;
|
|
|
|
|
const auto status = simplex_.GetConstraintStatus(row);
|
|
|
|
|
if (status == glop::ConstraintStatus::FIXED_VALUE) continue;
|
|
|
|
|
|
|
|
|
|
lp_values.push_back(0.0);
|
|
|
|
|
cut.vars.push_back(first_slack + IntegerVariable(row.value()));
|
|
|
|
|
cut.coeffs.push_back(coeff);
|
|
|
|
|
|
|
|
|
|
const IntegerValue diff(CapSub(integer_lp_[row.value()].ub.value(),
|
|
|
|
|
integer_lp_[row.value()].lb.value()));
|
|
|
|
|
if (status == glop::ConstraintStatus::AT_UPPER_BOUND) {
|
|
|
|
|
var_lbs.push_back(IntegerValue(0));
|
|
|
|
|
var_ubs.push_back(diff);
|
|
|
|
|
} else {
|
|
|
|
|
CHECK_EQ(status, glop::ConstraintStatus::AT_LOWER_BOUND);
|
|
|
|
|
var_lbs.push_back(-diff);
|
|
|
|
|
var_ubs.push_back(IntegerValue(0));
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Get the cut using some integer rounding heuristic.
|
|
|
|
|
RoundingOptions options;
|
|
|
|
|
options.use_mir = sat_parameters_.use_mir_rounding();
|
|
|
|
|
options.max_scaling = sat_parameters_.max_integer_rounding_scaling();
|
|
|
|
|
IntegerRoundingCut(options, lp_values, var_lbs, var_ubs, &cut);
|
|
|
|
|
|
|
|
|
|
// Compute the activity. Warning: the cut no longer have the same size so we
|
|
|
|
|
// cannot use lp_values. Note that the substitution below shouldn't change
|
|
|
|
|
// the activity by definition.
|
|
|
|
|
double activity = 0.0;
|
|
|
|
|
for (int i = 0; i < cut.vars.size(); ++i) {
|
|
|
|
|
if (cut.vars[i] < first_slack) {
|
|
|
|
|
activity += ToDouble(cut.coeffs[i]) * expanded_lp_solution_[cut.vars[i]];
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
const double kMinViolation = 1e-4;
|
|
|
|
|
const double violation = activity - ToDouble(cut.ub);
|
|
|
|
|
if (violation < kMinViolation) {
|
2019-07-12 10:22:51 -07:00
|
|
|
VLOG(3) << "Bad cut " << activity << " <= " << ToDouble(cut.ub);
|
2019-01-17 16:12:52 +01:00
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Substitute any slack left.
|
|
|
|
|
{
|
|
|
|
|
int num_slack = 0;
|
|
|
|
|
gtl::ITIVector<ColIndex, IntegerValue> dense_cut(integer_variables_.size(),
|
|
|
|
|
IntegerValue(0));
|
|
|
|
|
IntegerValue cut_ub = cut.ub;
|
|
|
|
|
bool overflow = false;
|
|
|
|
|
for (int i = 0; i < cut.vars.size(); ++i) {
|
|
|
|
|
if (cut.vars[i] < first_slack) {
|
|
|
|
|
CHECK(VariableIsPositive(cut.vars[i]));
|
|
|
|
|
const glop::ColIndex col =
|
|
|
|
|
gtl::FindOrDie(mirror_lp_variable_, cut.vars[i]);
|
|
|
|
|
dense_cut[col] = cut.coeffs[i];
|
|
|
|
|
} else {
|
|
|
|
|
++num_slack;
|
|
|
|
|
|
|
|
|
|
// Update the constraint.
|
|
|
|
|
const glop::RowIndex row(cut.vars[i].value() - first_slack.value());
|
|
|
|
|
const IntegerValue multiplier = -cut.coeffs[i];
|
|
|
|
|
if (!AddLinearExpressionMultiple(
|
|
|
|
|
multiplier, integer_lp_[row.value()].terms, &dense_cut)) {
|
|
|
|
|
overflow = true;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Update rhs.
|
|
|
|
|
const auto status = simplex_.GetConstraintStatus(row);
|
|
|
|
|
if (status == glop::ConstraintStatus::AT_LOWER_BOUND) {
|
|
|
|
|
if (!AddProductTo(multiplier, integer_lp_[row.value()].lb, &cut_ub)) {
|
|
|
|
|
overflow = true;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
CHECK_EQ(status, glop::ConstraintStatus::AT_UPPER_BOUND);
|
|
|
|
|
if (!AddProductTo(multiplier, integer_lp_[row.value()].ub, &cut_ub)) {
|
|
|
|
|
overflow = true;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (overflow) {
|
|
|
|
|
VLOG(1) << "Overflow in slack removal.";
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
2019-04-05 14:58:33 +02:00
|
|
|
VLOG(3) << " num_slack: " << num_slack;
|
2019-01-17 16:12:52 +01:00
|
|
|
cut = ConvertToLinearConstraint(dense_cut, cut_ub);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
const double new_violation =
|
|
|
|
|
ComputeActivity(cut, expanded_lp_solution_) - ToDouble(cut.ub);
|
|
|
|
|
if (std::abs(violation - new_violation) >= 1e-4) {
|
|
|
|
|
VLOG(1) << "Violation discrepancy after slack removal. "
|
|
|
|
|
<< " before = " << violation << " after = " << new_violation;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
DivideByGCD(&cut);
|
|
|
|
|
constraint_manager_.AddCut(cut, name, expanded_lp_solution_);
|
|
|
|
|
}
|
|
|
|
|
|
2019-01-09 11:50:34 +01:00
|
|
|
void LinearProgrammingConstraint::AddCGCuts() {
|
|
|
|
|
CHECK_EQ(trail_->CurrentDecisionLevel(), 0);
|
|
|
|
|
const RowIndex num_rows = lp_data_.num_constraints();
|
|
|
|
|
for (RowIndex row(0); row < num_rows; ++row) {
|
|
|
|
|
ColIndex basis_col = simplex_.GetBasis(row);
|
|
|
|
|
const Fractional lp_value = GetVariableValueAtCpScale(basis_col);
|
|
|
|
|
|
|
|
|
|
// TODO(user): We could just look at the diff with std::floor() in the hope
|
|
|
|
|
// that when we are just under an integer, the exact computation below will
|
|
|
|
|
// also be just under it.
|
|
|
|
|
if (std::abs(lp_value - std::round(lp_value)) < 0.01) continue;
|
|
|
|
|
|
|
|
|
|
// This is optional, but taking the negation allow to change the
|
|
|
|
|
// fractionality to 1 - fractionality. And having a fractionality close
|
|
|
|
|
// to 1.0 result in smaller coefficients in IntegerRoundingCut().
|
|
|
|
|
//
|
|
|
|
|
// TODO(user): Perform more experiments. Provide an option?
|
|
|
|
|
const bool take_negation = lp_value - std::floor(lp_value) < 0.5;
|
|
|
|
|
|
|
|
|
|
// If this variable is a slack, we ignore it. This is because the
|
|
|
|
|
// corresponding row is not tight under the given lp values.
|
|
|
|
|
if (basis_col >= integer_variables_.size()) continue;
|
|
|
|
|
|
|
|
|
|
const glop::ScatteredRow& lambda = simplex_.GetUnitRowLeftInverse(row);
|
|
|
|
|
glop::DenseColumn lp_multipliers(num_rows, 0.0);
|
|
|
|
|
double magnitude = 0.0;
|
|
|
|
|
int num_non_zeros = 0;
|
|
|
|
|
for (RowIndex row(0); row < num_rows; ++row) {
|
|
|
|
|
lp_multipliers[row] = lambda.values[glop::RowToColIndex(row)];
|
|
|
|
|
if (lp_multipliers[row] == 0.0) continue;
|
|
|
|
|
|
|
|
|
|
if (take_negation) lp_multipliers[row] = -lp_multipliers[row];
|
|
|
|
|
|
|
|
|
|
// There should be no BASIC status, but they could be imprecision
|
|
|
|
|
// in the GetUnitRowLeftInverse() code? not sure, so better be safe.
|
|
|
|
|
const auto status = simplex_.GetConstraintStatus(row);
|
|
|
|
|
if (status == glop::ConstraintStatus::BASIC) {
|
|
|
|
|
VLOG(1) << "BASIC row not expected! " << lp_multipliers[row];
|
|
|
|
|
lp_multipliers[row] = 0.0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
magnitude = std::max(magnitude, std::abs(lp_multipliers[row]));
|
|
|
|
|
if (lp_multipliers[row] != 0.0) ++num_non_zeros;
|
|
|
|
|
}
|
|
|
|
|
if (num_non_zeros == 0) continue;
|
|
|
|
|
|
2019-01-17 16:12:52 +01:00
|
|
|
Fractional scaling;
|
2019-03-01 11:11:36 +01:00
|
|
|
|
|
|
|
|
// TODO(user): We use a lower value here otherwise we might run into
|
|
|
|
|
// overflow while computing the cut. This should be fixable.
|
2019-01-17 16:12:52 +01:00
|
|
|
const std::vector<std::pair<RowIndex, IntegerValue>> integer_multipliers =
|
|
|
|
|
ScaleLpMultiplier(/*take_objective_into_account=*/false,
|
|
|
|
|
/*use_constraint_status=*/true, lp_multipliers,
|
2019-03-01 11:11:36 +01:00
|
|
|
&scaling, /*max_pow=*/52);
|
2019-01-17 16:12:52 +01:00
|
|
|
AddCutFromConstraints("CG", integer_multipliers);
|
|
|
|
|
}
|
|
|
|
|
}
|
2019-01-09 11:50:34 +01:00
|
|
|
|
2019-01-17 16:12:52 +01:00
|
|
|
void LinearProgrammingConstraint::AddMirCuts() {
|
|
|
|
|
CHECK_EQ(trail_->CurrentDecisionLevel(), 0);
|
|
|
|
|
const RowIndex num_rows = lp_data_.num_constraints();
|
|
|
|
|
for (RowIndex row(0); row < num_rows; ++row) {
|
|
|
|
|
const auto status = simplex_.GetConstraintStatus(row);
|
|
|
|
|
if (status == glop::ConstraintStatus::BASIC) continue;
|
2019-04-10 10:35:42 -07:00
|
|
|
if (status == glop::ConstraintStatus::FREE) continue;
|
2019-01-09 11:50:34 +01:00
|
|
|
|
2019-01-17 16:12:52 +01:00
|
|
|
// TODO(user): Do not consider just one constraint, but take linear
|
|
|
|
|
// combination of a small number of constraints. There is a lot of
|
|
|
|
|
// literature on the possible heuristics here.
|
|
|
|
|
std::vector<std::pair<RowIndex, IntegerValue>> integer_multipliers;
|
|
|
|
|
integer_multipliers.push_back({row, IntegerValue(1)});
|
|
|
|
|
AddCutFromConstraints("MIR1", integer_multipliers);
|
2019-01-09 11:50:34 +01:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2019-05-23 13:53:26 +02:00
|
|
|
void LinearProgrammingConstraint::UpdateSimplexIterationLimit(
|
|
|
|
|
const int64 min_iter, const int64 max_iter) {
|
|
|
|
|
if (sat_parameters_.linearization_level() < 2) return;
|
|
|
|
|
const int64 num_degenerate_columns = CalculateDegeneracy();
|
|
|
|
|
const int64 num_cols = simplex_.GetProblemNumCols().value();
|
2019-05-29 22:39:15 +02:00
|
|
|
if (num_cols <= 0) {
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
CHECK_GT(num_cols, 0);
|
2019-05-23 18:55:20 +02:00
|
|
|
const bool is_degenerate = num_degenerate_columns >= 0.3 * num_cols;
|
2019-05-29 22:39:15 +02:00
|
|
|
const int64 decrease_factor = (10 * num_degenerate_columns) / num_cols;
|
2019-05-23 13:53:26 +02:00
|
|
|
if (simplex_.GetProblemStatus() == glop::ProblemStatus::DUAL_FEASIBLE) {
|
|
|
|
|
// We reached here probably because we predicted wrong. We use this as a
|
|
|
|
|
// signal to increase the iterations or punish less for degeneracy compare
|
|
|
|
|
// to the other part.
|
2019-05-23 18:55:20 +02:00
|
|
|
if (is_degenerate) {
|
2019-05-29 22:48:54 +02:00
|
|
|
next_simplex_iter_ /= std::max(int64{1}, decrease_factor);
|
2019-05-23 13:53:26 +02:00
|
|
|
} else {
|
|
|
|
|
next_simplex_iter_ *= 2;
|
|
|
|
|
}
|
|
|
|
|
} else if (simplex_.GetProblemStatus() == glop::ProblemStatus::OPTIMAL) {
|
2019-05-23 18:55:20 +02:00
|
|
|
if (is_degenerate) {
|
2019-05-29 22:48:54 +02:00
|
|
|
next_simplex_iter_ /= std::max(int64{1}, 2 * decrease_factor);
|
2019-05-23 13:53:26 +02:00
|
|
|
} else {
|
|
|
|
|
// This is the most common case. We use the size of the problem to
|
|
|
|
|
// determine the limit and ignore the previous limit.
|
|
|
|
|
next_simplex_iter_ = num_cols / 40;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
next_simplex_iter_ =
|
|
|
|
|
std::max(min_iter, std::min(max_iter, next_simplex_iter_));
|
|
|
|
|
}
|
|
|
|
|
|
2018-02-12 11:36:18 +01:00
|
|
|
bool LinearProgrammingConstraint::Propagate() {
|
|
|
|
|
UpdateBoundsOfLpVariables();
|
2017-03-28 16:11:06 +02:00
|
|
|
|
2018-02-12 11:36:18 +01:00
|
|
|
// TODO(user): It seems the time we loose by not stopping early might be worth
|
|
|
|
|
// it because we end up with a better explanation at optimality.
|
2017-07-12 11:38:46 -07:00
|
|
|
glop::GlopParameters parameters = simplex_.GetParameters();
|
2018-02-12 11:36:18 +01:00
|
|
|
if (/* DISABLES CODE */ (false) && objective_is_defined_) {
|
2017-07-12 11:38:46 -07:00
|
|
|
// We put a limit on the dual objective since there is no point increasing
|
|
|
|
|
// it past our current objective upper-bound (we will already fail as soon
|
|
|
|
|
// as we pass it). Note that this limit is properly transformed using the
|
|
|
|
|
// objective scaling factor and offset stored in lp_data_.
|
2017-10-11 03:05:13 -07:00
|
|
|
//
|
|
|
|
|
// Note that we use a bigger epsilon here to be sure that if we abort
|
|
|
|
|
// because of this, we will report a conflict.
|
2018-02-12 11:36:18 +01:00
|
|
|
parameters.set_objective_upper_limit(
|
|
|
|
|
static_cast<double>(integer_trail_->UpperBound(objective_cp_).value() +
|
|
|
|
|
100.0 * kCpEpsilon));
|
2017-07-12 11:38:46 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Put an iteration limit on the work we do in the simplex for this call. Note
|
|
|
|
|
// that because we are "incremental", even if we don't solve it this time we
|
|
|
|
|
// will make progress towards a solve in the lower node of the tree search.
|
2019-05-03 16:30:50 +02:00
|
|
|
if (trail_->CurrentDecisionLevel() == 0) {
|
2019-05-23 13:53:26 +02:00
|
|
|
// TODO(user): Dynamically change the iteration limit for root node as
|
|
|
|
|
// well.
|
2019-05-03 16:30:50 +02:00
|
|
|
parameters.set_max_number_of_iterations(2000);
|
|
|
|
|
} else {
|
2019-05-23 13:53:26 +02:00
|
|
|
parameters.set_max_number_of_iterations(next_simplex_iter_);
|
2019-05-03 16:30:50 +02:00
|
|
|
}
|
2018-11-05 16:24:47 +01:00
|
|
|
if (sat_parameters_.use_exact_lp_reason()) {
|
|
|
|
|
parameters.set_change_status_to_imprecise(false);
|
|
|
|
|
parameters.set_primal_feasibility_tolerance(1e-7);
|
|
|
|
|
parameters.set_dual_feasibility_tolerance(1e-7);
|
|
|
|
|
}
|
2017-07-12 11:38:46 -07:00
|
|
|
|
|
|
|
|
simplex_.SetParameters(parameters);
|
2017-10-18 11:09:13 +02:00
|
|
|
simplex_.NotifyThatMatrixIsUnchangedForNextSolve();
|
2018-12-03 14:26:31 +01:00
|
|
|
if (!SolveLp()) return true;
|
2017-03-28 16:11:06 +02:00
|
|
|
|
2018-12-04 14:36:46 +01:00
|
|
|
// Add new constraints to the LP and resolve?
|
|
|
|
|
//
|
|
|
|
|
// TODO(user): We might want to do that more than once. Currently we rely on
|
|
|
|
|
// this beeing called again on the next IncrementalPropagate() call, but that
|
|
|
|
|
// might not always happen at level zero.
|
2018-12-03 14:26:31 +01:00
|
|
|
if (simplex_.GetProblemStatus() == glop::ProblemStatus::OPTIMAL) {
|
2018-12-04 14:36:46 +01:00
|
|
|
// First add any new lazy constraints or cuts that where previsouly
|
|
|
|
|
// generated and are now cutting the current solution.
|
|
|
|
|
if (constraint_manager_.ChangeLp(expanded_lp_solution_)) {
|
2018-12-03 14:26:31 +01:00
|
|
|
CreateLpFromConstraintManager();
|
|
|
|
|
if (!SolveLp()) return true;
|
2019-01-17 16:12:52 +01:00
|
|
|
} else if (constraint_manager_.num_cuts() <
|
|
|
|
|
sat_parameters_.max_num_cuts()) {
|
2019-01-09 11:50:34 +01:00
|
|
|
const int old_num_cuts = constraint_manager_.num_cuts();
|
2019-01-17 16:12:52 +01:00
|
|
|
|
|
|
|
|
// The "generic" cuts are currently part of this class as they are using
|
|
|
|
|
// data from the current LP.
|
|
|
|
|
//
|
|
|
|
|
// TODO(user): Refactor so that they are just normal cut generators?
|
|
|
|
|
if (trail_->CurrentDecisionLevel() == 0) {
|
|
|
|
|
if (sat_parameters_.add_mir_cuts()) AddMirCuts();
|
|
|
|
|
if (sat_parameters_.add_cg_cuts()) AddCGCuts();
|
2019-01-09 11:50:34 +01:00
|
|
|
}
|
|
|
|
|
|
2018-12-04 14:36:46 +01:00
|
|
|
// Try to add cuts.
|
|
|
|
|
if (!cut_generators_.empty() &&
|
|
|
|
|
(trail_->CurrentDecisionLevel() == 0 ||
|
|
|
|
|
!sat_parameters_.only_add_cuts_at_level_zero())) {
|
|
|
|
|
for (const CutGenerator& generator : cut_generators_) {
|
2019-04-05 14:58:33 +02:00
|
|
|
generator.generate_cuts(expanded_lp_solution_, &constraint_manager_);
|
2018-12-04 14:36:46 +01:00
|
|
|
}
|
2019-01-09 11:50:34 +01:00
|
|
|
}
|
2018-12-04 14:36:46 +01:00
|
|
|
|
2019-01-09 11:50:34 +01:00
|
|
|
if (constraint_manager_.num_cuts() > old_num_cuts &&
|
|
|
|
|
constraint_manager_.ChangeLp(expanded_lp_solution_)) {
|
|
|
|
|
CreateLpFromConstraintManager();
|
|
|
|
|
const double old_obj = simplex_.GetObjectiveValue();
|
|
|
|
|
if (!SolveLp()) return true;
|
|
|
|
|
if (simplex_.GetProblemStatus() == glop::ProblemStatus::OPTIMAL) {
|
|
|
|
|
VLOG(1) << "Cuts relaxation improvement " << old_obj << " -> "
|
|
|
|
|
<< simplex_.GetObjectiveValue()
|
2019-07-12 10:22:51 -07:00
|
|
|
<< " diff: " << simplex_.GetObjectiveValue() - old_obj
|
|
|
|
|
<< " level: " << trail_->CurrentDecisionLevel();
|
2018-12-04 14:36:46 +01:00
|
|
|
}
|
2019-01-10 15:56:33 +01:00
|
|
|
} else {
|
|
|
|
|
if (trail_->CurrentDecisionLevel() == 0) {
|
|
|
|
|
lp_at_level_zero_is_final_ = true;
|
|
|
|
|
}
|
2018-12-04 14:36:46 +01:00
|
|
|
}
|
2017-10-18 11:09:13 +02:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2017-03-28 16:11:06 +02:00
|
|
|
// A dual-unbounded problem is infeasible. We use the dual ray reason.
|
|
|
|
|
if (simplex_.GetProblemStatus() == glop::ProblemStatus::DUAL_UNBOUNDED) {
|
2018-11-05 16:24:47 +01:00
|
|
|
if (sat_parameters_.use_exact_lp_reason()) {
|
|
|
|
|
if (!FillExactDualRayReason()) return true;
|
|
|
|
|
} else {
|
|
|
|
|
FillDualRayReason();
|
|
|
|
|
}
|
2017-03-28 16:11:06 +02:00
|
|
|
return integer_trail_->ReportConflict(integer_reason_);
|
|
|
|
|
}
|
|
|
|
|
|
2019-05-23 13:53:26 +02:00
|
|
|
// TODO(user): Update limits for DUAL_UNBOUNDED status as well.
|
|
|
|
|
UpdateSimplexIterationLimit(/*min_iter=*/10, /*max_iter=*/1000);
|
|
|
|
|
|
2017-03-28 16:11:06 +02:00
|
|
|
// Optimality deductions if problem has an objective.
|
|
|
|
|
if (objective_is_defined_ &&
|
2017-07-12 11:38:46 -07:00
|
|
|
(simplex_.GetProblemStatus() == glop::ProblemStatus::OPTIMAL ||
|
|
|
|
|
simplex_.GetProblemStatus() == glop::ProblemStatus::DUAL_FEASIBLE)) {
|
2017-06-28 14:33:56 +02:00
|
|
|
// Try to filter optimal objective value. Note that GetObjectiveValue()
|
|
|
|
|
// already take care of the scaling so that it returns an objective in the
|
|
|
|
|
// CP world.
|
|
|
|
|
const double relaxed_optimal_objective = simplex_.GetObjectiveValue();
|
2018-11-05 16:24:47 +01:00
|
|
|
const IntegerValue approximate_new_lb(
|
|
|
|
|
static_cast<int64>(std::ceil(relaxed_optimal_objective - kCpEpsilon)));
|
|
|
|
|
|
|
|
|
|
// TODO(user): Maybe do a bit less computation when we cannot propagate
|
|
|
|
|
// anything.
|
|
|
|
|
if (sat_parameters_.use_exact_lp_reason()) {
|
2018-12-21 13:59:58 +01:00
|
|
|
if (!ExactLpReasonning()) return false;
|
2018-02-12 11:36:18 +01:00
|
|
|
|
2019-03-01 11:11:36 +01:00
|
|
|
// Display when the inexact bound would have propagated more.
|
2018-12-21 13:59:58 +01:00
|
|
|
const IntegerValue propagated_lb =
|
|
|
|
|
integer_trail_->LowerBound(objective_cp_);
|
2019-03-01 11:11:36 +01:00
|
|
|
if (approximate_new_lb > propagated_lb) {
|
2019-07-18 11:36:47 -07:00
|
|
|
VLOG(2) << "LP objective [ " << ToDouble(propagated_lb) << ", "
|
2019-03-01 11:11:36 +01:00
|
|
|
<< ToDouble(integer_trail_->UpperBound(objective_cp_))
|
|
|
|
|
<< " ] approx_lb += "
|
|
|
|
|
<< ToDouble(approximate_new_lb - propagated_lb);
|
2018-11-05 16:24:47 +01:00
|
|
|
}
|
|
|
|
|
} else {
|
2017-06-28 14:33:56 +02:00
|
|
|
FillReducedCostsReason();
|
2018-11-05 16:24:47 +01:00
|
|
|
const double objective_cp_ub =
|
|
|
|
|
ToDouble(integer_trail_->UpperBound(objective_cp_));
|
|
|
|
|
ReducedCostStrengtheningDeductions(objective_cp_ub -
|
|
|
|
|
relaxed_optimal_objective);
|
|
|
|
|
if (!deductions_.empty()) {
|
|
|
|
|
deductions_reason_ = integer_reason_;
|
|
|
|
|
deductions_reason_.push_back(
|
|
|
|
|
integer_trail_->UpperBoundAsLiteral(objective_cp_));
|
|
|
|
|
}
|
|
|
|
|
|
2018-12-21 13:59:58 +01:00
|
|
|
// Push new objective lb.
|
|
|
|
|
if (approximate_new_lb > integer_trail_->LowerBound(objective_cp_)) {
|
|
|
|
|
const IntegerLiteral deduction =
|
|
|
|
|
IntegerLiteral::GreaterOrEqual(objective_cp_, approximate_new_lb);
|
|
|
|
|
if (!integer_trail_->Enqueue(deduction, {}, integer_reason_)) {
|
|
|
|
|
return false;
|
|
|
|
|
}
|
2017-03-28 16:11:06 +02:00
|
|
|
}
|
|
|
|
|
|
2018-12-21 13:59:58 +01:00
|
|
|
// Push reduced cost strengthening bounds.
|
|
|
|
|
if (!deductions_.empty()) {
|
|
|
|
|
const int trail_index_with_same_reason = integer_trail_->Index();
|
|
|
|
|
for (const IntegerLiteral deduction : deductions_) {
|
|
|
|
|
if (!integer_trail_->Enqueue(deduction, {}, deductions_reason_,
|
|
|
|
|
trail_index_with_same_reason)) {
|
|
|
|
|
return false;
|
|
|
|
|
}
|
2017-03-28 16:11:06 +02:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2018-12-04 14:36:46 +01:00
|
|
|
// Copy more info about the current solution.
|
2017-07-12 11:38:46 -07:00
|
|
|
if (simplex_.GetProblemStatus() == glop::ProblemStatus::OPTIMAL) {
|
2018-12-04 14:36:46 +01:00
|
|
|
CHECK(lp_solution_is_set_);
|
|
|
|
|
|
2018-01-10 13:21:06 +01:00
|
|
|
lp_objective_ = simplex_.GetObjectiveValue();
|
|
|
|
|
lp_solution_is_integer_ = true;
|
2018-02-12 11:36:18 +01:00
|
|
|
const int num_vars = integer_variables_.size();
|
2018-12-04 14:36:46 +01:00
|
|
|
const double objective_scale = lp_data_.objective_scaling_factor();
|
2017-07-12 11:38:46 -07:00
|
|
|
for (int i = 0; i < num_vars; i++) {
|
2018-02-12 11:36:18 +01:00
|
|
|
// The reduced cost need to be divided by LpToCpScalingFactor().
|
2018-01-10 13:21:06 +01:00
|
|
|
lp_reduced_cost_[i] = simplex_.GetReducedCost(glop::ColIndex(i)) *
|
2018-02-12 11:36:18 +01:00
|
|
|
CpToLpScalingFactor(glop::ColIndex(i)) *
|
2017-08-03 10:20:59 -07:00
|
|
|
objective_scale;
|
2018-02-12 11:36:18 +01:00
|
|
|
if (std::abs(lp_solution_[i] - std::round(lp_solution_[i])) >
|
|
|
|
|
kCpEpsilon) {
|
2018-01-10 13:21:06 +01:00
|
|
|
lp_solution_is_integer_ = false;
|
|
|
|
|
}
|
2017-07-12 11:38:46 -07:00
|
|
|
}
|
2018-02-12 11:36:18 +01:00
|
|
|
|
|
|
|
|
if (compute_reduced_cost_averages_) {
|
|
|
|
|
// Decay averages.
|
|
|
|
|
num_calls_since_reduced_cost_averages_reset_++;
|
|
|
|
|
if (num_calls_since_reduced_cost_averages_reset_ == 10000) {
|
|
|
|
|
for (int i = 0; i < num_vars; i++) {
|
|
|
|
|
sum_cost_up_[i] /= 2;
|
|
|
|
|
num_cost_up_[i] /= 2;
|
|
|
|
|
sum_cost_down_[i] /= 2;
|
|
|
|
|
num_cost_down_[i] /= 2;
|
|
|
|
|
}
|
|
|
|
|
num_calls_since_reduced_cost_averages_reset_ = 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Accumulate pseudo-costs of all unassigned variables.
|
|
|
|
|
for (int i = 0; i < num_vars; i++) {
|
|
|
|
|
const IntegerVariable var = this->integer_variables_[i];
|
|
|
|
|
|
|
|
|
|
// Skip ignored and fixed variables.
|
|
|
|
|
if (integer_trail_->IsCurrentlyIgnored(var)) continue;
|
|
|
|
|
const IntegerValue lb = integer_trail_->LowerBound(var);
|
|
|
|
|
const IntegerValue ub = integer_trail_->UpperBound(var);
|
|
|
|
|
if (lb == ub) continue;
|
|
|
|
|
|
|
|
|
|
// Skip reduced costs that are zero or close.
|
|
|
|
|
const double rc = this->GetSolutionReducedCost(var);
|
|
|
|
|
if (std::abs(rc) < kCpEpsilon) continue;
|
|
|
|
|
|
|
|
|
|
if (rc < 0.0) {
|
|
|
|
|
sum_cost_down_[i] -= rc;
|
|
|
|
|
num_cost_down_[i]++;
|
|
|
|
|
} else {
|
|
|
|
|
sum_cost_up_[i] += rc;
|
|
|
|
|
num_cost_up_[i]++;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
2017-03-28 16:11:06 +02:00
|
|
|
}
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
2018-11-05 16:24:47 +01:00
|
|
|
// Returns kMinIntegerValue in case of overflow.
|
|
|
|
|
//
|
2019-03-01 11:11:36 +01:00
|
|
|
// TODO(user): Because of PreventOverflow(), this should actually never happen.
|
2018-11-05 16:24:47 +01:00
|
|
|
IntegerValue LinearProgrammingConstraint::GetImpliedLowerBound(
|
2019-01-09 11:50:34 +01:00
|
|
|
const LinearConstraint& terms) const {
|
2018-11-05 16:24:47 +01:00
|
|
|
IntegerValue lower_bound(0);
|
2019-01-09 11:50:34 +01:00
|
|
|
const int size = terms.vars.size();
|
|
|
|
|
for (int i = 0; i < size; ++i) {
|
|
|
|
|
const IntegerVariable var = terms.vars[i];
|
|
|
|
|
const IntegerValue coeff = terms.coeffs[i];
|
2018-11-05 16:24:47 +01:00
|
|
|
CHECK_NE(coeff, 0);
|
|
|
|
|
const IntegerValue bound = coeff > 0 ? integer_trail_->LowerBound(var)
|
|
|
|
|
: integer_trail_->UpperBound(var);
|
2019-01-09 11:50:34 +01:00
|
|
|
if (!AddProductTo(bound, coeff, &lower_bound)) return kMinIntegerValue;
|
2018-11-05 16:24:47 +01:00
|
|
|
}
|
|
|
|
|
return lower_bound;
|
|
|
|
|
}
|
|
|
|
|
|
2018-12-21 13:59:58 +01:00
|
|
|
bool LinearProgrammingConstraint::PossibleOverflow(
|
2019-01-09 11:50:34 +01:00
|
|
|
const LinearConstraint& constraint) {
|
2018-12-21 13:59:58 +01:00
|
|
|
IntegerValue lower_bound(0);
|
2019-01-09 11:50:34 +01:00
|
|
|
const int size = constraint.vars.size();
|
2018-12-21 13:59:58 +01:00
|
|
|
for (int i = 0; i < size; ++i) {
|
2019-01-09 11:50:34 +01:00
|
|
|
const IntegerVariable var = constraint.vars[i];
|
|
|
|
|
const IntegerValue coeff = constraint.coeffs[i];
|
2018-12-21 13:59:58 +01:00
|
|
|
CHECK_NE(coeff, 0);
|
|
|
|
|
const IntegerValue bound = coeff > 0 ? integer_trail_->LowerBound(var)
|
|
|
|
|
: integer_trail_->UpperBound(var);
|
2019-03-01 11:11:36 +01:00
|
|
|
if (!AddProductTo(bound, coeff, &lower_bound)) {
|
|
|
|
|
return true;
|
|
|
|
|
}
|
2018-12-21 13:59:58 +01:00
|
|
|
}
|
2019-01-09 11:50:34 +01:00
|
|
|
const int64 slack = CapAdd(lower_bound.value(), -constraint.ub.value());
|
2019-03-01 11:11:36 +01:00
|
|
|
if (slack == kint64min || slack == kint64max) {
|
|
|
|
|
return true;
|
|
|
|
|
}
|
2018-12-21 13:59:58 +01:00
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
2019-04-18 13:29:21 +02:00
|
|
|
namespace {
|
2019-04-18 14:17:35 +02:00
|
|
|
|
2019-04-18 13:29:21 +02:00
|
|
|
absl::int128 FloorRatio128(absl::int128 x, IntegerValue positive_div) {
|
|
|
|
|
absl::int128 div128(positive_div.value());
|
|
|
|
|
absl::int128 result = x / div128;
|
|
|
|
|
if (result * div128 > x) return result - 1;
|
|
|
|
|
return result;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
} // namespace
|
|
|
|
|
|
|
|
|
|
void LinearProgrammingConstraint::PreventOverflow(LinearConstraint* constraint,
|
|
|
|
|
int max_pow) {
|
|
|
|
|
// Compute the min/max possible partial sum.
|
|
|
|
|
double sum_min = std::min(0.0, ToDouble(-constraint->ub));
|
|
|
|
|
double sum_max = std::max(0.0, ToDouble(-constraint->ub));
|
|
|
|
|
const int size = constraint->vars.size();
|
|
|
|
|
for (int i = 0; i < size; ++i) {
|
|
|
|
|
const IntegerVariable var = constraint->vars[i];
|
|
|
|
|
const double coeff = ToDouble(constraint->coeffs[i]);
|
|
|
|
|
const double prod1 = coeff * ToDouble(integer_trail_->LowerBound(var));
|
|
|
|
|
const double prod2 = coeff * ToDouble(integer_trail_->UpperBound(var));
|
|
|
|
|
sum_min += std::min(0.0, std::min(prod1, prod2));
|
|
|
|
|
sum_max += std::max(0.0, std::max(prod1, prod2));
|
|
|
|
|
}
|
|
|
|
|
const double max_value = std::max(sum_max, -sum_min);
|
|
|
|
|
|
|
|
|
|
const IntegerValue divisor(std::ceil(std::ldexp(max_value, -max_pow)));
|
|
|
|
|
if (divisor <= 1) return;
|
|
|
|
|
|
|
|
|
|
// To be correct, we need to shift all variable so that they are positive.
|
|
|
|
|
//
|
|
|
|
|
// TODO(user): This code is tricky and similar to the one to generate cuts.
|
|
|
|
|
// Test and may reduce the duplication? note however that here we use int128
|
|
|
|
|
// to deal with potential overflow.
|
|
|
|
|
int new_size = 0;
|
|
|
|
|
absl::int128 adjust = 0;
|
|
|
|
|
for (int i = 0; i < size; ++i) {
|
|
|
|
|
const IntegerValue old_coeff = constraint->coeffs[i];
|
|
|
|
|
const IntegerValue new_coeff = FloorRatio(old_coeff, divisor);
|
|
|
|
|
|
|
|
|
|
// Compute the rhs adjustement.
|
|
|
|
|
const absl::int128 remainder =
|
|
|
|
|
absl::int128(old_coeff.value()) -
|
|
|
|
|
absl::int128(new_coeff.value()) * absl::int128(divisor.value());
|
|
|
|
|
adjust +=
|
|
|
|
|
remainder *
|
|
|
|
|
absl::int128(integer_trail_->LowerBound(constraint->vars[i]).value());
|
|
|
|
|
|
|
|
|
|
if (new_coeff == 0) continue;
|
|
|
|
|
constraint->vars[new_size] = constraint->vars[i];
|
|
|
|
|
constraint->coeffs[new_size] = new_coeff;
|
|
|
|
|
++new_size;
|
|
|
|
|
}
|
|
|
|
|
constraint->vars.resize(new_size);
|
|
|
|
|
constraint->coeffs.resize(new_size);
|
|
|
|
|
|
|
|
|
|
constraint->ub = IntegerValue(static_cast<int64>(
|
|
|
|
|
FloorRatio128(absl::int128(constraint->ub.value()) - adjust, divisor)));
|
|
|
|
|
}
|
|
|
|
|
|
2018-11-05 16:24:47 +01:00
|
|
|
// TODO(user): combine this with RelaxLinearReason() to avoid the extra
|
|
|
|
|
// magnitude vector and the weird precondition of RelaxLinearReason().
|
|
|
|
|
void LinearProgrammingConstraint::SetImpliedLowerBoundReason(
|
2019-01-09 11:50:34 +01:00
|
|
|
const LinearConstraint& terms, IntegerValue slack) {
|
2018-11-05 16:24:47 +01:00
|
|
|
integer_reason_.clear();
|
|
|
|
|
std::vector<IntegerValue> magnitudes;
|
2019-01-09 11:50:34 +01:00
|
|
|
const int size = terms.vars.size();
|
|
|
|
|
for (int i = 0; i < size; ++i) {
|
|
|
|
|
const IntegerVariable var = terms.vars[i];
|
|
|
|
|
const IntegerValue coeff = terms.coeffs[i];
|
2018-11-05 16:24:47 +01:00
|
|
|
CHECK_NE(coeff, 0);
|
|
|
|
|
if (coeff > 0) {
|
|
|
|
|
magnitudes.push_back(coeff);
|
|
|
|
|
integer_reason_.push_back(integer_trail_->LowerBoundAsLiteral(var));
|
|
|
|
|
} else {
|
|
|
|
|
magnitudes.push_back(-coeff);
|
|
|
|
|
integer_reason_.push_back(integer_trail_->UpperBoundAsLiteral(var));
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
CHECK_GE(slack, 0);
|
|
|
|
|
if (slack > 0) {
|
|
|
|
|
integer_trail_->RelaxLinearReason(slack, magnitudes, &integer_reason_);
|
|
|
|
|
}
|
|
|
|
|
integer_trail_->RemoveLevelZeroBounds(&integer_reason_);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// TODO(user): Provide a sparse interface.
|
2019-01-17 16:12:52 +01:00
|
|
|
std::vector<std::pair<RowIndex, IntegerValue>>
|
|
|
|
|
LinearProgrammingConstraint::ScaleLpMultiplier(
|
2019-01-09 11:50:34 +01:00
|
|
|
bool take_objective_into_account, bool use_constraint_status,
|
2019-03-01 11:11:36 +01:00
|
|
|
const glop::DenseColumn& dense_lp_multipliers, Fractional* scaling,
|
|
|
|
|
int max_pow) const {
|
|
|
|
|
const Fractional global_scaling =
|
|
|
|
|
bound_scaling_factor_ / lp_data_.objective_scaling_factor();
|
|
|
|
|
|
|
|
|
|
double max_sum = 0.0;
|
|
|
|
|
std::vector<std::pair<RowIndex, Fractional>> cp_multipliers;
|
2018-11-05 16:24:47 +01:00
|
|
|
for (RowIndex row(0); row < dense_lp_multipliers.size(); ++row) {
|
|
|
|
|
const Fractional lp_multi = dense_lp_multipliers[row];
|
|
|
|
|
if (lp_multi == 0.0) continue;
|
|
|
|
|
|
|
|
|
|
// Remove trivial bad cases.
|
2019-01-09 11:50:34 +01:00
|
|
|
if (!use_constraint_status) {
|
|
|
|
|
if (lp_multi > 0.0 && integer_lp_[row.value()].ub >= kMaxIntegerValue) {
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
if (lp_multi < 0.0 && integer_lp_[row.value()].lb <= kMinIntegerValue) {
|
|
|
|
|
continue;
|
|
|
|
|
}
|
2018-11-05 16:24:47 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
const Fractional cp_multi =
|
|
|
|
|
lp_multi / scaler_.row_scale(row) / global_scaling;
|
|
|
|
|
cp_multipliers.push_back({row, cp_multi});
|
2019-03-01 11:11:36 +01:00
|
|
|
max_sum += ToDouble(infinity_norms_[row]) * std::abs(cp_multi);
|
2018-11-05 16:24:47 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// This behave exactly like if we had another "objective" constraint with
|
|
|
|
|
// an lp_multi of 1.0 and a cp_multi of 1.0.
|
|
|
|
|
if (take_objective_into_account) {
|
2019-03-01 11:11:36 +01:00
|
|
|
max_sum += ToDouble(objective_infinity_norm_);
|
2018-11-05 16:24:47 +01:00
|
|
|
}
|
|
|
|
|
|
2019-03-01 11:11:36 +01:00
|
|
|
// We want max_sum * scaling to be <= 2 ^ max_pow and fit on an int64.
|
|
|
|
|
*scaling = std::ldexp(1, max_pow) / max_sum;
|
2018-12-11 17:03:03 +01:00
|
|
|
|
2018-11-05 16:24:47 +01:00
|
|
|
// Scale the multipliers by *scaling.
|
|
|
|
|
//
|
|
|
|
|
// TODO(user): Maybe use int128 to avoid overflow?
|
2019-01-17 16:12:52 +01:00
|
|
|
std::vector<std::pair<RowIndex, IntegerValue>> integer_multipliers;
|
2018-11-05 16:24:47 +01:00
|
|
|
for (const auto entry : cp_multipliers) {
|
|
|
|
|
const IntegerValue coeff(std::round(entry.second * (*scaling)));
|
2019-01-17 16:12:52 +01:00
|
|
|
if (coeff != 0) integer_multipliers.push_back({entry.first, coeff});
|
2018-11-05 16:24:47 +01:00
|
|
|
}
|
2019-01-17 16:12:52 +01:00
|
|
|
return integer_multipliers;
|
|
|
|
|
}
|
2018-11-05 16:24:47 +01:00
|
|
|
|
2019-01-17 16:12:52 +01:00
|
|
|
bool LinearProgrammingConstraint::ComputeNewLinearConstraint(
|
|
|
|
|
bool use_constraint_status,
|
|
|
|
|
const std::vector<std::pair<RowIndex, IntegerValue>>& integer_multipliers,
|
|
|
|
|
gtl::ITIVector<ColIndex, IntegerValue>* dense_terms,
|
|
|
|
|
IntegerValue* upper_bound) const {
|
2018-11-05 16:24:47 +01:00
|
|
|
// Initialize the new constraint.
|
|
|
|
|
*upper_bound = 0;
|
|
|
|
|
dense_terms->assign(integer_variables_.size(), IntegerValue(0));
|
|
|
|
|
|
|
|
|
|
// Compute the new constraint by taking the linear combination given by
|
|
|
|
|
// integer_multipliers of the integer constraints in integer_lp_.
|
|
|
|
|
const ColIndex num_cols(integer_variables_.size());
|
2019-01-17 16:12:52 +01:00
|
|
|
for (const std::pair<RowIndex, IntegerValue> term : integer_multipliers) {
|
2018-11-05 16:24:47 +01:00
|
|
|
const RowIndex row = term.first;
|
|
|
|
|
const IntegerValue multiplier = term.second;
|
|
|
|
|
CHECK_LT(row, integer_lp_.size());
|
|
|
|
|
|
|
|
|
|
// Update the constraint.
|
|
|
|
|
if (!AddLinearExpressionMultiple(multiplier, integer_lp_[row.value()].terms,
|
|
|
|
|
dense_terms)) {
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Update the upper bound.
|
2019-01-09 11:50:34 +01:00
|
|
|
IntegerValue bound;
|
|
|
|
|
if (use_constraint_status) {
|
|
|
|
|
const auto status = simplex_.GetConstraintStatus(row);
|
|
|
|
|
if (status == glop::ConstraintStatus::FIXED_VALUE ||
|
|
|
|
|
status == glop::ConstraintStatus::AT_LOWER_BOUND) {
|
|
|
|
|
bound = integer_lp_[row.value()].lb;
|
|
|
|
|
} else {
|
|
|
|
|
CHECK_EQ(status, glop::ConstraintStatus::AT_UPPER_BOUND);
|
|
|
|
|
bound = integer_lp_[row.value()].ub;
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
bound = multiplier > 0 ? integer_lp_[row.value()].ub
|
|
|
|
|
: integer_lp_[row.value()].lb;
|
|
|
|
|
}
|
|
|
|
|
if (!AddProductTo(multiplier, bound, upper_bound)) return false;
|
2018-11-05 16:24:47 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
2019-03-01 11:11:36 +01:00
|
|
|
// TODO(user): no need to update the multipliers.
|
|
|
|
|
void LinearProgrammingConstraint::AdjustNewLinearConstraint(
|
|
|
|
|
std::vector<std::pair<glop::RowIndex, IntegerValue>>* integer_multipliers,
|
|
|
|
|
gtl::ITIVector<ColIndex, IntegerValue>* dense_terms,
|
|
|
|
|
IntegerValue* upper_bound) const {
|
|
|
|
|
const IntegerValue kMaxWantedCoeff(1e18);
|
|
|
|
|
for (std::pair<RowIndex, IntegerValue>& term : *integer_multipliers) {
|
|
|
|
|
const RowIndex row = term.first;
|
|
|
|
|
const IntegerValue multiplier = term.second;
|
|
|
|
|
if (multiplier == 0) continue;
|
|
|
|
|
|
|
|
|
|
// We will only allow change of the form "multiplier += to_add" with to_add
|
|
|
|
|
// in [-negative_limit, positive_limit].
|
|
|
|
|
IntegerValue negative_limit = kMaxWantedCoeff;
|
|
|
|
|
IntegerValue positive_limit = kMaxWantedCoeff;
|
|
|
|
|
|
|
|
|
|
// Make sure we never change the sign of the multiplier, except if the
|
|
|
|
|
// row is an equality in which case we don't care.
|
|
|
|
|
if (integer_lp_[row.value()].ub != integer_lp_[row.value()].lb) {
|
|
|
|
|
if (multiplier > 0) {
|
|
|
|
|
negative_limit = std::min(negative_limit, multiplier);
|
|
|
|
|
} else {
|
|
|
|
|
positive_limit = std::min(positive_limit, -multiplier);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Make sure upper_bound + to_add * row_bound never overflow.
|
|
|
|
|
const IntegerValue row_bound = multiplier > 0 ? integer_lp_[row.value()].ub
|
|
|
|
|
: integer_lp_[row.value()].lb;
|
|
|
|
|
if (row_bound != 0) {
|
|
|
|
|
const IntegerValue limit1 = FloorRatio(
|
|
|
|
|
std::max(IntegerValue(0), kMaxWantedCoeff - IntTypeAbs(*upper_bound)),
|
|
|
|
|
IntTypeAbs(row_bound));
|
|
|
|
|
const IntegerValue limit2 =
|
|
|
|
|
FloorRatio(kMaxWantedCoeff, IntTypeAbs(row_bound));
|
|
|
|
|
if (*upper_bound > 0 == row_bound > 0) { // Same sign.
|
|
|
|
|
positive_limit = std::min(positive_limit, limit1);
|
|
|
|
|
negative_limit = std::min(negative_limit, limit2);
|
|
|
|
|
} else {
|
|
|
|
|
negative_limit = std::min(negative_limit, limit1);
|
|
|
|
|
positive_limit = std::min(positive_limit, limit2);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// If we add the row to the dense_terms, diff will indicate by how much
|
|
|
|
|
// |upper_bound - ImpliedLB(dense_terms)| will change. That correspond to
|
|
|
|
|
// increasing the multiplier by 1.
|
|
|
|
|
IntegerValue positive_diff = row_bound;
|
|
|
|
|
IntegerValue negative_diff = row_bound;
|
|
|
|
|
|
|
|
|
|
// TODO(user): we could relax a bit some of the condition and allow a sign
|
|
|
|
|
// change. It is just trickier to compute the diff when we allow such
|
|
|
|
|
// changes.
|
|
|
|
|
for (const auto entry : integer_lp_[row.value()].terms) {
|
|
|
|
|
const ColIndex col = entry.first;
|
|
|
|
|
const IntegerValue coeff = entry.second;
|
|
|
|
|
CHECK_NE(coeff, 0);
|
|
|
|
|
|
|
|
|
|
// Moving a variable away from zero seems to improve the bound even
|
|
|
|
|
// if it reduces the number of non-zero. Note that this is because of
|
|
|
|
|
// this that positive_diff and negative_diff are not the same.
|
|
|
|
|
const IntegerValue current = (*dense_terms)[col];
|
|
|
|
|
if (current == 0) {
|
|
|
|
|
const IntegerValue overflow_limit(
|
|
|
|
|
FloorRatio(kMaxWantedCoeff, IntTypeAbs(coeff)));
|
|
|
|
|
positive_limit = std::min(positive_limit, overflow_limit);
|
|
|
|
|
negative_limit = std::min(negative_limit, overflow_limit);
|
|
|
|
|
const IntegerVariable var = integer_variables_[col.value()];
|
|
|
|
|
if (coeff > 0) {
|
|
|
|
|
positive_diff -= coeff * integer_trail_->LowerBound(var);
|
|
|
|
|
negative_diff -= coeff * integer_trail_->UpperBound(var);
|
|
|
|
|
} else {
|
|
|
|
|
positive_diff -= coeff * integer_trail_->UpperBound(var);
|
|
|
|
|
negative_diff -= coeff * integer_trail_->LowerBound(var);
|
|
|
|
|
}
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// We don't want to change the sign of current or to have an overflow.
|
|
|
|
|
IntegerValue before_sign_change(
|
|
|
|
|
FloorRatio(IntTypeAbs(current), IntTypeAbs(coeff)));
|
|
|
|
|
|
|
|
|
|
// If the variable is fixed, we don't actually care about changing the
|
|
|
|
|
// sign.
|
|
|
|
|
const IntegerVariable var = integer_variables_[col.value()];
|
|
|
|
|
if (integer_trail_->LowerBound(var) == integer_trail_->UpperBound(var)) {
|
|
|
|
|
before_sign_change = kMaxWantedCoeff;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
const IntegerValue overflow_limit(
|
|
|
|
|
FloorRatio(kMaxWantedCoeff - IntTypeAbs(current), IntTypeAbs(coeff)));
|
|
|
|
|
if (current > 0 == coeff > 0) { // Same sign.
|
|
|
|
|
negative_limit = std::min(negative_limit, before_sign_change);
|
|
|
|
|
positive_limit = std::min(positive_limit, overflow_limit);
|
|
|
|
|
} else {
|
|
|
|
|
negative_limit = std::min(negative_limit, overflow_limit);
|
|
|
|
|
positive_limit = std::min(positive_limit, before_sign_change);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// This is how diff change.
|
|
|
|
|
const IntegerValue implied = current > 0
|
|
|
|
|
? integer_trail_->LowerBound(var)
|
|
|
|
|
: integer_trail_->UpperBound(var);
|
|
|
|
|
|
|
|
|
|
positive_diff -= coeff * implied;
|
|
|
|
|
negative_diff -= coeff * implied;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Only add a multiple of this row if it tighten the final constraint.
|
|
|
|
|
IntegerValue to_add(0);
|
|
|
|
|
if (positive_diff < 0 && positive_limit > 0) {
|
|
|
|
|
to_add = positive_limit;
|
|
|
|
|
}
|
|
|
|
|
if (negative_diff > 0 && negative_limit > 0) {
|
|
|
|
|
// Pick this if it is better than the positive sign.
|
|
|
|
|
if (to_add == 0 || IntTypeAbs(negative_limit * negative_diff) >
|
|
|
|
|
IntTypeAbs(positive_limit * positive_diff)) {
|
|
|
|
|
to_add = -negative_limit;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
if (to_add != 0) {
|
|
|
|
|
term.second += to_add;
|
|
|
|
|
*upper_bound += to_add * row_bound;
|
|
|
|
|
for (const auto entry : integer_lp_[row.value()].terms) {
|
|
|
|
|
const ColIndex col = entry.first;
|
|
|
|
|
const IntegerValue coeff = entry.second;
|
|
|
|
|
(*dense_terms)[col] += to_add * coeff;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2018-11-05 16:24:47 +01:00
|
|
|
// The "exact" computation go as follow:
|
|
|
|
|
//
|
|
|
|
|
// Given any INTEGER linear combination of the LP constraints, we can create a
|
|
|
|
|
// new integer constraint that is valid (its computation must not overflow
|
2018-12-21 13:59:58 +01:00
|
|
|
// though). Lets call this "linear_combination <= ub". We can then always add to
|
|
|
|
|
// it the inequality "objective_terms <= objective_var", so we get:
|
|
|
|
|
// ImpliedLB(objective_terms + linear_combination) - ub <= objective_var.
|
2018-11-05 16:24:47 +01:00
|
|
|
// where ImpliedLB() is computed from the variable current bounds.
|
|
|
|
|
//
|
|
|
|
|
// Now, if we use for the linear combination and approximation of the optimal
|
2018-12-21 13:59:58 +01:00
|
|
|
// negated dual LP values (by scaling them and rounding them to integer), we
|
|
|
|
|
// will get an EXACT objective lower bound that is more or less the same as the
|
|
|
|
|
// inexact bound given by the LP relaxation. This allows to derive exact reasons
|
|
|
|
|
// for any propagation done by this constraint.
|
|
|
|
|
bool LinearProgrammingConstraint::ExactLpReasonning() {
|
2018-11-05 16:24:47 +01:00
|
|
|
// Clear old reason and deductions.
|
|
|
|
|
integer_reason_.clear();
|
|
|
|
|
deductions_.clear();
|
|
|
|
|
deductions_reason_.clear();
|
|
|
|
|
|
|
|
|
|
// The row multipliers will be the negation of the LP duals.
|
|
|
|
|
//
|
|
|
|
|
// TODO(user): Provide and use a sparse API in Glop to get the duals.
|
|
|
|
|
const RowIndex num_rows = simplex_.GetProblemNumRows();
|
|
|
|
|
glop::DenseColumn lp_multipliers(num_rows);
|
|
|
|
|
for (RowIndex row(0); row < num_rows; ++row) {
|
|
|
|
|
lp_multipliers[row] = -simplex_.GetDualValue(row);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
Fractional scaling;
|
2019-03-01 11:11:36 +01:00
|
|
|
std::vector<std::pair<RowIndex, IntegerValue>> integer_multipliers =
|
2019-01-17 16:12:52 +01:00
|
|
|
ScaleLpMultiplier(/*take_objective_into_account=*/true,
|
|
|
|
|
/*use_constraint_status=*/false, lp_multipliers,
|
|
|
|
|
&scaling);
|
|
|
|
|
|
2018-11-05 16:24:47 +01:00
|
|
|
gtl::ITIVector<ColIndex, IntegerValue> reduced_costs;
|
2018-12-21 13:59:58 +01:00
|
|
|
IntegerValue rc_ub;
|
2019-04-05 17:00:40 +02:00
|
|
|
if (!ComputeNewLinearConstraint(
|
|
|
|
|
/*use_constraint_status=*/false, integer_multipliers, &reduced_costs,
|
|
|
|
|
&rc_ub)) {
|
|
|
|
|
VLOG(1) << "Issue while computing the exact LP reason. Aborting.";
|
|
|
|
|
return true;
|
|
|
|
|
}
|
2018-11-05 16:24:47 +01:00
|
|
|
|
|
|
|
|
// The "objective constraint" behave like if the unscaled cp multiplier was
|
|
|
|
|
// 1.0, so we will multiply it by this number and add it to reduced_costs.
|
|
|
|
|
const IntegerValue obj_scale(std::round(scaling));
|
2018-12-11 17:03:03 +01:00
|
|
|
if (obj_scale == 0) {
|
2019-03-01 11:11:36 +01:00
|
|
|
VLOG(1) << "Overflow during exact LP reasoning. scaling=" << scaling;
|
2018-12-21 13:59:58 +01:00
|
|
|
return true;
|
2018-11-05 16:24:47 +01:00
|
|
|
}
|
2019-03-01 11:11:36 +01:00
|
|
|
CHECK(AddLinearExpressionMultiple(obj_scale, integer_objective_,
|
|
|
|
|
&reduced_costs));
|
2018-11-05 16:24:47 +01:00
|
|
|
|
2019-03-01 11:11:36 +01:00
|
|
|
AdjustNewLinearConstraint(&integer_multipliers, &reduced_costs, &rc_ub);
|
2018-11-05 16:24:47 +01:00
|
|
|
|
2018-12-21 13:59:58 +01:00
|
|
|
// Create the IntegerSumLE that will allow to propagate the objective and more
|
|
|
|
|
// generally do the reduced cost fixing.
|
2019-01-09 11:50:34 +01:00
|
|
|
LinearConstraint new_constraint =
|
|
|
|
|
ConvertToLinearConstraint(reduced_costs, rc_ub);
|
|
|
|
|
new_constraint.vars.push_back(objective_cp_);
|
|
|
|
|
new_constraint.coeffs.push_back(-obj_scale);
|
|
|
|
|
DivideByGCD(&new_constraint);
|
2019-04-18 13:29:21 +02:00
|
|
|
PreventOverflow(&new_constraint);
|
|
|
|
|
CHECK(!PossibleOverflow(new_constraint));
|
2018-11-05 16:24:47 +01:00
|
|
|
|
2018-12-21 13:59:58 +01:00
|
|
|
IntegerSumLE* cp_constraint =
|
2019-01-09 11:50:34 +01:00
|
|
|
new IntegerSumLE({}, new_constraint.vars, new_constraint.coeffs,
|
|
|
|
|
new_constraint.ub, model_);
|
2018-12-21 13:59:58 +01:00
|
|
|
optimal_constraints_.emplace_back(cp_constraint);
|
|
|
|
|
rev_optimal_constraints_size_ = optimal_constraints_.size();
|
|
|
|
|
return cp_constraint->Propagate();
|
2018-11-05 16:24:47 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
bool LinearProgrammingConstraint::FillExactDualRayReason() {
|
|
|
|
|
Fractional scaling;
|
2019-03-01 11:11:36 +01:00
|
|
|
std::vector<std::pair<RowIndex, IntegerValue>> integer_multipliers =
|
2019-01-17 16:12:52 +01:00
|
|
|
ScaleLpMultiplier(/*take_objective_into_account=*/false,
|
|
|
|
|
/*use_constraint_status=*/false, simplex_.GetDualRay(),
|
|
|
|
|
&scaling);
|
|
|
|
|
|
2018-11-05 16:24:47 +01:00
|
|
|
gtl::ITIVector<ColIndex, IntegerValue> dense_new_constraint;
|
|
|
|
|
IntegerValue new_constraint_ub;
|
2019-04-05 17:00:40 +02:00
|
|
|
if (!ComputeNewLinearConstraint(
|
|
|
|
|
/*use_constraint_status=*/false, integer_multipliers,
|
|
|
|
|
&dense_new_constraint, &new_constraint_ub)) {
|
|
|
|
|
VLOG(1) << "Isse while computing the exact dual ray reason. Aborting.";
|
|
|
|
|
return false;
|
|
|
|
|
}
|
2019-03-01 11:11:36 +01:00
|
|
|
|
|
|
|
|
AdjustNewLinearConstraint(&integer_multipliers, &dense_new_constraint,
|
|
|
|
|
&new_constraint_ub);
|
|
|
|
|
|
|
|
|
|
LinearConstraint new_constraint =
|
2019-01-09 11:50:34 +01:00
|
|
|
ConvertToLinearConstraint(dense_new_constraint, new_constraint_ub);
|
2019-03-01 11:11:36 +01:00
|
|
|
DivideByGCD(&new_constraint);
|
2019-04-18 13:29:21 +02:00
|
|
|
PreventOverflow(&new_constraint);
|
|
|
|
|
CHECK(!PossibleOverflow(new_constraint));
|
|
|
|
|
|
2018-11-05 16:24:47 +01:00
|
|
|
const IntegerValue implied_lb = GetImpliedLowerBound(new_constraint);
|
2019-01-09 11:50:34 +01:00
|
|
|
if (implied_lb <= new_constraint.ub) {
|
2018-12-17 16:50:15 +01:00
|
|
|
VLOG(1) << "LP exact dual ray not infeasible,"
|
|
|
|
|
<< " implied_lb: " << implied_lb.value() / scaling
|
2019-01-09 11:50:34 +01:00
|
|
|
<< " ub: " << new_constraint.ub.value() / scaling;
|
2018-11-05 16:24:47 +01:00
|
|
|
return false;
|
|
|
|
|
}
|
2019-01-09 11:50:34 +01:00
|
|
|
const IntegerValue slack = (implied_lb - new_constraint.ub) - 1;
|
2018-11-05 16:24:47 +01:00
|
|
|
SetImpliedLowerBoundReason(new_constraint, slack);
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
2017-06-28 14:33:56 +02:00
|
|
|
void LinearProgrammingConstraint::FillReducedCostsReason() {
|
2017-03-28 16:11:06 +02:00
|
|
|
integer_reason_.clear();
|
|
|
|
|
const int num_vars = integer_variables_.size();
|
|
|
|
|
for (int i = 0; i < num_vars; i++) {
|
2018-01-10 13:21:06 +01:00
|
|
|
const double rc = simplex_.GetReducedCost(glop::ColIndex(i));
|
2018-02-12 11:36:18 +01:00
|
|
|
if (rc > kLpEpsilon) {
|
2017-03-28 16:11:06 +02:00
|
|
|
integer_reason_.push_back(
|
|
|
|
|
integer_trail_->LowerBoundAsLiteral(integer_variables_[i]));
|
2018-02-12 11:36:18 +01:00
|
|
|
} else if (rc < -kLpEpsilon) {
|
2017-03-28 16:11:06 +02:00
|
|
|
integer_reason_.push_back(
|
|
|
|
|
integer_trail_->UpperBoundAsLiteral(integer_variables_[i]));
|
|
|
|
|
}
|
|
|
|
|
}
|
2018-11-05 16:24:47 +01:00
|
|
|
|
|
|
|
|
integer_trail_->RemoveLevelZeroBounds(&integer_reason_);
|
2017-03-28 16:11:06 +02:00
|
|
|
}
|
|
|
|
|
|
2019-05-05 18:02:49 +02:00
|
|
|
int64 LinearProgrammingConstraint::CalculateDegeneracy() const {
|
|
|
|
|
const glop::ColIndex num_vars = simplex_.GetProblemNumCols();
|
2019-05-03 16:30:50 +02:00
|
|
|
int num_non_basic_with_zero_rc = 0;
|
2019-05-05 18:02:49 +02:00
|
|
|
for (glop::ColIndex i(0); i < num_vars; ++i) {
|
|
|
|
|
const double rc = simplex_.GetReducedCost(i);
|
2019-05-03 16:30:50 +02:00
|
|
|
if (rc != 0.0) continue;
|
2019-05-05 18:02:49 +02:00
|
|
|
if (simplex_.GetVariableStatus(i) == glop::VariableStatus::BASIC) {
|
2019-05-03 16:30:50 +02:00
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
num_non_basic_with_zero_rc++;
|
|
|
|
|
}
|
2019-05-05 18:02:49 +02:00
|
|
|
return num_non_basic_with_zero_rc;
|
2019-05-03 16:30:50 +02:00
|
|
|
}
|
|
|
|
|
|
2017-03-28 16:11:06 +02:00
|
|
|
void LinearProgrammingConstraint::FillDualRayReason() {
|
|
|
|
|
integer_reason_.clear();
|
|
|
|
|
const int num_vars = integer_variables_.size();
|
|
|
|
|
for (int i = 0; i < num_vars; i++) {
|
2017-06-28 14:33:56 +02:00
|
|
|
// TODO(user): Like for FillReducedCostsReason(), the bounds could be
|
2017-03-28 16:11:06 +02:00
|
|
|
// extended here. Actually, the "dual ray cost updates" is the reduced cost
|
|
|
|
|
// of an optimal solution if we were optimizing one direction of one basic
|
|
|
|
|
// variable. The simplex_ interface would need to be slightly extended to
|
|
|
|
|
// retrieve the basis column in question and the variable values though.
|
2018-01-10 13:21:06 +01:00
|
|
|
const double rc = simplex_.GetDualRayRowCombination()[glop::ColIndex(i)];
|
2018-02-12 11:36:18 +01:00
|
|
|
if (rc > kLpEpsilon) {
|
2017-03-28 16:11:06 +02:00
|
|
|
integer_reason_.push_back(
|
|
|
|
|
integer_trail_->LowerBoundAsLiteral(integer_variables_[i]));
|
2018-02-12 11:36:18 +01:00
|
|
|
} else if (rc < -kLpEpsilon) {
|
2017-03-28 16:11:06 +02:00
|
|
|
integer_reason_.push_back(
|
|
|
|
|
integer_trail_->UpperBoundAsLiteral(integer_variables_[i]));
|
|
|
|
|
}
|
|
|
|
|
}
|
2018-11-05 16:24:47 +01:00
|
|
|
integer_trail_->RemoveLevelZeroBounds(&integer_reason_);
|
2017-03-28 16:11:06 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void LinearProgrammingConstraint::ReducedCostStrengtheningDeductions(
|
2017-06-28 14:33:56 +02:00
|
|
|
double cp_objective_delta) {
|
2017-03-28 16:11:06 +02:00
|
|
|
deductions_.clear();
|
|
|
|
|
|
2017-06-28 14:33:56 +02:00
|
|
|
// TRICKY: while simplex_.GetObjectiveValue() use the objective scaling factor
|
|
|
|
|
// stored in the lp_data_, all the other functions like GetReducedCost() or
|
|
|
|
|
// GetVariableValue() do not.
|
|
|
|
|
const double lp_objective_delta =
|
2017-07-12 11:38:46 -07:00
|
|
|
cp_objective_delta / lp_data_.objective_scaling_factor();
|
2017-03-28 16:11:06 +02:00
|
|
|
const int num_vars = integer_variables_.size();
|
|
|
|
|
for (int i = 0; i < num_vars; i++) {
|
|
|
|
|
const IntegerVariable cp_var = integer_variables_[i];
|
2018-01-10 13:21:06 +01:00
|
|
|
const glop::ColIndex lp_var = glop::ColIndex(i);
|
2017-06-28 14:33:56 +02:00
|
|
|
const double rc = simplex_.GetReducedCost(lp_var);
|
2017-03-28 16:11:06 +02:00
|
|
|
const double value = simplex_.GetVariableValue(lp_var);
|
2017-11-07 15:45:52 +01:00
|
|
|
|
|
|
|
|
if (rc == 0.0) continue;
|
2017-03-28 16:11:06 +02:00
|
|
|
const double lp_other_bound = value + lp_objective_delta / rc;
|
2018-02-12 11:36:18 +01:00
|
|
|
const double cp_other_bound = lp_other_bound * LpToCpScalingFactor(lp_var);
|
2017-03-28 16:11:06 +02:00
|
|
|
|
2018-02-12 11:36:18 +01:00
|
|
|
if (rc > kLpEpsilon) {
|
2018-11-05 16:24:47 +01:00
|
|
|
const double ub = ToDouble(integer_trail_->UpperBound(cp_var));
|
2018-02-12 11:36:18 +01:00
|
|
|
const double new_ub = std::floor(cp_other_bound + kCpEpsilon);
|
2017-03-28 16:11:06 +02:00
|
|
|
if (new_ub < ub) {
|
2018-02-12 11:36:18 +01:00
|
|
|
// TODO(user): Because rc > kLpEpsilon, the lower_bound of cp_var
|
|
|
|
|
// will be part of the reason returned by FillReducedCostsReason(), but
|
|
|
|
|
// we actually do not need it here. Same below.
|
2017-03-28 16:11:06 +02:00
|
|
|
const IntegerValue new_ub_int(static_cast<IntegerValue>(new_ub));
|
|
|
|
|
deductions_.push_back(IntegerLiteral::LowerOrEqual(cp_var, new_ub_int));
|
|
|
|
|
}
|
2018-02-12 11:36:18 +01:00
|
|
|
} else if (rc < -kLpEpsilon) {
|
2018-11-05 16:24:47 +01:00
|
|
|
const double lb = ToDouble(integer_trail_->LowerBound(cp_var));
|
2018-02-12 11:36:18 +01:00
|
|
|
const double new_lb = std::ceil(cp_other_bound - kCpEpsilon);
|
2017-03-28 16:11:06 +02:00
|
|
|
if (new_lb > lb) {
|
|
|
|
|
const IntegerValue new_lb_int(static_cast<IntegerValue>(new_lb));
|
|
|
|
|
deductions_.push_back(
|
|
|
|
|
IntegerLiteral::GreaterOrEqual(cp_var, new_lb_int));
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2017-11-07 15:45:52 +01:00
|
|
|
namespace {
|
|
|
|
|
|
2019-07-12 10:22:51 -07:00
|
|
|
// Add a cut of the form Sum_{outgoing arcs from S} lp >= rhs_lower_bound.
|
|
|
|
|
//
|
|
|
|
|
// Note that we used to also add the same cut for the incoming arcs, but because
|
|
|
|
|
// of flow conservation on these problems, the outgoing flow is always the same
|
|
|
|
|
// as the incoming flow, so adding this extra cut doesn't seem relevant.
|
|
|
|
|
void AddOutgoingCut(int num_nodes, int subset_size,
|
|
|
|
|
const std::vector<bool>& in_subset,
|
|
|
|
|
const std::vector<int>& tails,
|
|
|
|
|
const std::vector<int>& heads,
|
2019-07-18 11:36:47 -07:00
|
|
|
const std::vector<Literal>& literals,
|
|
|
|
|
const std::vector<double>& literal_lp_values,
|
2019-07-12 10:22:51 -07:00
|
|
|
int64 rhs_lower_bound,
|
|
|
|
|
const gtl::ITIVector<IntegerVariable, double>& lp_values,
|
2019-07-18 11:36:47 -07:00
|
|
|
LinearConstraintManager* manager, Model* model) {
|
|
|
|
|
LinearConstraintBuilder outgoing(model, IntegerValue(rhs_lower_bound),
|
|
|
|
|
kMaxIntegerValue);
|
2017-11-07 15:45:52 +01:00
|
|
|
double sum_outgoing = 0.0;
|
2018-01-10 13:21:06 +01:00
|
|
|
|
2019-07-12 10:22:51 -07:00
|
|
|
// Add outgoing arcs, compute outgoing flow.
|
2017-11-07 15:45:52 +01:00
|
|
|
for (int i = 0; i < tails.size(); ++i) {
|
2019-07-12 10:22:51 -07:00
|
|
|
if (in_subset[tails[i]] && !in_subset[heads[i]]) {
|
2019-07-18 11:36:47 -07:00
|
|
|
sum_outgoing += literal_lp_values[i];
|
|
|
|
|
CHECK(outgoing.AddLiteralTerm(literals[i], IntegerValue(1)));
|
2017-11-07 15:45:52 +01:00
|
|
|
}
|
|
|
|
|
}
|
2018-01-10 13:21:06 +01:00
|
|
|
|
|
|
|
|
// A node is said to be optional if it can be excluded from the subcircuit,
|
|
|
|
|
// in which case there is a self-loop on that node.
|
|
|
|
|
// If there are optional nodes, use extended formula:
|
|
|
|
|
// sum(cut) >= 1 - optional_loop_in - optional_loop_out
|
|
|
|
|
// where optional_loop_in's node is in subset, optional_loop_out's is out.
|
|
|
|
|
// TODO(user): Favor optional loops fixed to zero at root.
|
|
|
|
|
int num_optional_nodes_in = 0;
|
|
|
|
|
int num_optional_nodes_out = 0;
|
|
|
|
|
int optional_loop_in = -1;
|
|
|
|
|
int optional_loop_out = -1;
|
|
|
|
|
for (int i = 0; i < tails.size(); ++i) {
|
|
|
|
|
if (tails[i] != heads[i]) continue;
|
2019-07-12 10:22:51 -07:00
|
|
|
if (in_subset[tails[i]]) {
|
2018-01-10 13:21:06 +01:00
|
|
|
num_optional_nodes_in++;
|
|
|
|
|
if (optional_loop_in == -1 ||
|
2019-07-18 11:36:47 -07:00
|
|
|
literal_lp_values[i] < literal_lp_values[optional_loop_in]) {
|
2018-01-10 13:21:06 +01:00
|
|
|
optional_loop_in = i;
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
num_optional_nodes_out++;
|
|
|
|
|
if (optional_loop_out == -1 ||
|
2019-07-18 11:36:47 -07:00
|
|
|
literal_lp_values[i] < literal_lp_values[optional_loop_out]) {
|
2018-01-10 13:21:06 +01:00
|
|
|
optional_loop_out = i;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
if (num_optional_nodes_in + num_optional_nodes_out > 0) {
|
|
|
|
|
CHECK_EQ(rhs_lower_bound, 1);
|
|
|
|
|
// When all optionals of one side are excluded in lp solution, no cut.
|
2019-07-12 10:22:51 -07:00
|
|
|
if (num_optional_nodes_in == subset_size &&
|
2018-01-10 13:21:06 +01:00
|
|
|
(optional_loop_in == -1 ||
|
2019-07-18 11:36:47 -07:00
|
|
|
literal_lp_values[optional_loop_in] > 1.0 - 1e-6)) {
|
2018-01-10 13:21:06 +01:00
|
|
|
return;
|
|
|
|
|
}
|
2019-07-12 10:22:51 -07:00
|
|
|
if (num_optional_nodes_out == num_nodes - subset_size &&
|
2018-01-10 13:21:06 +01:00
|
|
|
(optional_loop_out == -1 ||
|
2019-07-18 11:36:47 -07:00
|
|
|
literal_lp_values[optional_loop_out] > 1.0 - 1e-6)) {
|
2018-01-10 13:21:06 +01:00
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// There is no mandatory node in subset, add optional_loop_in.
|
2019-07-12 10:22:51 -07:00
|
|
|
if (num_optional_nodes_in == subset_size) {
|
2019-07-18 11:36:47 -07:00
|
|
|
CHECK(
|
|
|
|
|
outgoing.AddLiteralTerm(literals[optional_loop_in], IntegerValue(1)));
|
|
|
|
|
sum_outgoing += literal_lp_values[optional_loop_in];
|
2018-01-10 13:21:06 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// There is no mandatory node out of subset, add optional_loop_out.
|
2019-07-12 10:22:51 -07:00
|
|
|
if (num_optional_nodes_out == num_nodes - subset_size) {
|
2019-07-18 11:36:47 -07:00
|
|
|
CHECK(outgoing.AddLiteralTerm(literals[optional_loop_out],
|
|
|
|
|
IntegerValue(1)));
|
|
|
|
|
sum_outgoing += literal_lp_values[optional_loop_out];
|
2018-01-10 13:21:06 +01:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2017-11-07 15:45:52 +01:00
|
|
|
if (sum_outgoing < rhs_lower_bound - 1e-6) {
|
2019-07-18 11:36:47 -07:00
|
|
|
manager->AddCut(outgoing.Build(), "Circuit", lp_values);
|
2017-11-07 15:45:52 +01:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
} // namespace
|
|
|
|
|
|
2019-07-12 10:22:51 -07:00
|
|
|
// We roughly follow the algorithm described in section 6 of "The Traveling
|
|
|
|
|
// Salesman Problem, A computational Study", David L. Applegate, Robert E.
|
|
|
|
|
// Bixby, Vasek Chvatal, William J. Cook.
|
|
|
|
|
//
|
|
|
|
|
// Note that this is mainly a "symmetric" case algo, but it does still work for
|
|
|
|
|
// the asymmetric case.
|
|
|
|
|
void SeparateSubtourInequalities(
|
|
|
|
|
int num_nodes, const std::vector<int>& tails, const std::vector<int>& heads,
|
2019-07-18 11:36:47 -07:00
|
|
|
const std::vector<Literal>& literals,
|
2019-07-12 10:22:51 -07:00
|
|
|
const gtl::ITIVector<IntegerVariable, double>& lp_values,
|
|
|
|
|
absl::Span<const int64> demands, int64 capacity,
|
2019-07-18 11:36:47 -07:00
|
|
|
LinearConstraintManager* manager, Model* model) {
|
2019-07-12 10:22:51 -07:00
|
|
|
if (num_nodes <= 2) return;
|
|
|
|
|
|
|
|
|
|
// We will collect only the arcs with a positive lp_values to speed up some
|
|
|
|
|
// computation below.
|
|
|
|
|
struct Arc {
|
|
|
|
|
int tail;
|
|
|
|
|
int head;
|
|
|
|
|
double lp_value;
|
|
|
|
|
};
|
|
|
|
|
std::vector<Arc> relevant_arcs;
|
|
|
|
|
|
|
|
|
|
// Sort the arcs by non-increasing lp_values.
|
2019-07-18 11:36:47 -07:00
|
|
|
std::vector<double> literal_lp_values(literals.size());
|
2019-07-12 10:22:51 -07:00
|
|
|
std::vector<std::pair<double, int>> arc_by_decreasing_lp_values;
|
2019-07-18 11:36:47 -07:00
|
|
|
auto* encoder = model->GetOrCreate<IntegerEncoder>();
|
|
|
|
|
for (int i = 0; i < literals.size(); ++i) {
|
|
|
|
|
double lp_value;
|
|
|
|
|
const IntegerVariable direct_view = encoder->GetLiteralView(literals[i]);
|
|
|
|
|
if (direct_view != kNoIntegerVariable) {
|
|
|
|
|
lp_value = lp_values[direct_view];
|
|
|
|
|
} else {
|
|
|
|
|
lp_value =
|
|
|
|
|
1.0 - lp_values[encoder->GetLiteralView(literals[i].Negated())];
|
|
|
|
|
}
|
|
|
|
|
literal_lp_values[i] = lp_value;
|
|
|
|
|
|
2019-07-12 10:22:51 -07:00
|
|
|
if (lp_value < 1e-6) continue;
|
|
|
|
|
relevant_arcs.push_back({tails[i], heads[i], lp_value});
|
|
|
|
|
arc_by_decreasing_lp_values.push_back({lp_value, i});
|
|
|
|
|
}
|
|
|
|
|
std::sort(arc_by_decreasing_lp_values.begin(),
|
|
|
|
|
arc_by_decreasing_lp_values.end(),
|
|
|
|
|
std::greater<std::pair<double, int>>());
|
|
|
|
|
|
|
|
|
|
// We will do a union-find by adding one by one the arc of the lp solution
|
|
|
|
|
// in the order above. Every intermediate set during this construction will
|
|
|
|
|
// be a candidate for a cut.
|
|
|
|
|
//
|
|
|
|
|
// In parallel to the union-find, to efficiently reconstruct these sets (at
|
|
|
|
|
// most num_nodes), we construct a "decomposition forest" of the different
|
|
|
|
|
// connected components. Note that we don't exploit any asymmetric nature of
|
|
|
|
|
// the graph here. This is exactly the algo 6.3 in the book above.
|
|
|
|
|
int num_components = num_nodes;
|
|
|
|
|
std::vector<int> parent(num_nodes);
|
|
|
|
|
std::vector<int> root(num_nodes);
|
|
|
|
|
for (int i = 0; i < num_nodes; ++i) {
|
|
|
|
|
parent[i] = i;
|
|
|
|
|
root[i] = i;
|
|
|
|
|
}
|
|
|
|
|
auto get_root_and_compress_path = [&root](int node) {
|
|
|
|
|
int r = node;
|
|
|
|
|
while (root[r] != r) r = root[r];
|
|
|
|
|
while (root[node] != r) {
|
|
|
|
|
const int next = root[node];
|
|
|
|
|
root[node] = r;
|
|
|
|
|
node = next;
|
|
|
|
|
}
|
|
|
|
|
return r;
|
|
|
|
|
};
|
|
|
|
|
for (const auto pair : arc_by_decreasing_lp_values) {
|
|
|
|
|
if (num_components == 2) break;
|
|
|
|
|
const int tail = get_root_and_compress_path(tails[pair.second]);
|
|
|
|
|
const int head = get_root_and_compress_path(heads[pair.second]);
|
|
|
|
|
if (tail != head) {
|
|
|
|
|
// Update the decomposition forest, note that the number of nodes is
|
|
|
|
|
// growing.
|
|
|
|
|
const int new_node = parent.size();
|
|
|
|
|
parent.push_back(new_node);
|
|
|
|
|
parent[head] = new_node;
|
|
|
|
|
parent[tail] = new_node;
|
|
|
|
|
--num_components;
|
|
|
|
|
|
|
|
|
|
// It is important that the union-find representative is the same node.
|
|
|
|
|
root.push_back(new_node);
|
|
|
|
|
root[head] = new_node;
|
|
|
|
|
root[tail] = new_node;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// For each node in the decomposition forest, try to add a cut for the set
|
|
|
|
|
// formed by the nodes and its children. To do that efficiently, we first
|
|
|
|
|
// order the nodes so that for each node in a tree, the set of children forms
|
|
|
|
|
// a consecutive span in the pre_order vector. This vector just lists the
|
|
|
|
|
// nodes in the "pre-order" graph traversal order. The Spans will point inside
|
|
|
|
|
// the pre_order vector, it is why we initialize it once and for all.
|
|
|
|
|
int new_size = 0;
|
|
|
|
|
std::vector<int> pre_order(num_nodes);
|
|
|
|
|
std::vector<absl::Span<const int>> subsets;
|
|
|
|
|
{
|
|
|
|
|
std::vector<absl::InlinedVector<int, 2>> graph(parent.size());
|
|
|
|
|
for (int i = 0; i < parent.size(); ++i) {
|
|
|
|
|
if (parent[i] != i) graph[parent[i]].push_back(i);
|
|
|
|
|
}
|
|
|
|
|
std::vector<int> queue;
|
|
|
|
|
std::vector<bool> seen(graph.size(), false);
|
|
|
|
|
std::vector<int> start_index(parent.size());
|
|
|
|
|
for (int i = num_nodes; i < parent.size(); ++i) {
|
|
|
|
|
// Note that because of the way we constructed 'parent', the graph is a
|
|
|
|
|
// binary tree. This is not required for the correctness of the algorithm
|
|
|
|
|
// here though.
|
|
|
|
|
CHECK(graph[i].empty() || graph[i].size() == 2);
|
|
|
|
|
if (parent[i] != i) continue;
|
|
|
|
|
|
|
|
|
|
// Explore the subtree rooted at node i.
|
|
|
|
|
CHECK(!seen[i]);
|
|
|
|
|
queue.push_back(i);
|
|
|
|
|
while (!queue.empty()) {
|
|
|
|
|
const int node = queue.back();
|
|
|
|
|
if (seen[node]) {
|
|
|
|
|
queue.pop_back();
|
|
|
|
|
// All the children of node are in the span [start, end) of the
|
|
|
|
|
// pre_order vector.
|
|
|
|
|
const int start = start_index[node];
|
|
|
|
|
if (new_size - start > 1) {
|
|
|
|
|
subsets.emplace_back(&pre_order[start], new_size - start);
|
|
|
|
|
}
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
seen[node] = true;
|
|
|
|
|
start_index[node] = new_size;
|
|
|
|
|
if (node < num_nodes) pre_order[new_size++] = node;
|
|
|
|
|
for (const int child : graph[node]) {
|
|
|
|
|
if (!seen[child]) queue.push_back(child);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Compute the total demands in order to know the minimum incoming/outgoing
|
|
|
|
|
// flow.
|
|
|
|
|
int64 total_demands = 0;
|
|
|
|
|
if (!demands.empty()) {
|
|
|
|
|
for (const int64 demand : demands) total_demands += demand;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Process each subsets and add any violated cut.
|
|
|
|
|
CHECK_EQ(pre_order.size(), num_nodes);
|
|
|
|
|
std::vector<bool> in_subset(num_nodes, false);
|
|
|
|
|
for (const absl::Span<const int> subset : subsets) {
|
|
|
|
|
CHECK_GT(subset.size(), 1);
|
|
|
|
|
CHECK_LT(subset.size(), num_nodes);
|
|
|
|
|
|
|
|
|
|
// These fields will be left untouched if demands.empty().
|
|
|
|
|
bool contain_depot = false;
|
|
|
|
|
int64 subset_demand = 0;
|
|
|
|
|
|
|
|
|
|
// Initialize "in_subset" and the subset demands.
|
|
|
|
|
for (const int n : subset) {
|
|
|
|
|
in_subset[n] = true;
|
|
|
|
|
if (!demands.empty()) {
|
|
|
|
|
if (n == 0) contain_depot = true;
|
|
|
|
|
subset_demand += demands[n];
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Compute a lower bound on the outgoing flow.
|
|
|
|
|
//
|
|
|
|
|
// TODO(user): This lower bound assume all nodes in subset must be served,
|
|
|
|
|
// which is not the case. For TSP we do the correct thing in
|
|
|
|
|
// AddOutgoingCut() but not for CVRP... Fix!!
|
|
|
|
|
//
|
|
|
|
|
// TODO(user): It could be very interesting to see if this "min outgoing
|
|
|
|
|
// flow" cannot be automatically infered from the constraint in the
|
|
|
|
|
// precedence graph. This might work if we assume that any kind of path
|
|
|
|
|
// cumul constraint is encoded with constraints:
|
|
|
|
|
// [edge => value_head >= value_tail + edge_weight].
|
|
|
|
|
// We could take the minimum incoming edge weight per node in the set, and
|
|
|
|
|
// use the cumul variable domain to infer some capacity.
|
|
|
|
|
int64 min_outgoing_flow = 1;
|
|
|
|
|
if (!demands.empty()) {
|
|
|
|
|
min_outgoing_flow =
|
|
|
|
|
contain_depot
|
|
|
|
|
? (total_demands - subset_demand + capacity - 1) / capacity
|
|
|
|
|
: (subset_demand + capacity - 1) / capacity;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// We still need to serve nodes with a demand of zero, and in the corner
|
|
|
|
|
// case where all node in subset have a zero demand, the formula above
|
|
|
|
|
// result in a min_outgoing_flow of zero.
|
|
|
|
|
min_outgoing_flow = std::max(min_outgoing_flow, int64{1});
|
|
|
|
|
|
|
|
|
|
// Compute the current outgoing flow out of the subset.
|
|
|
|
|
//
|
|
|
|
|
// This can take a significant portion of the running time, it is why it is
|
|
|
|
|
// faster to do it only on arcs with non-zero lp values which should be in
|
|
|
|
|
// linear number rather than the total number of arc which can be quadratic.
|
|
|
|
|
//
|
|
|
|
|
// TODO(user): For the symmetric case there is an even faster algo. See if
|
|
|
|
|
// it can be generalized to the asymmetric one if become needed.
|
|
|
|
|
// Reference is algo 6.4 of the "The Traveling Salesman Problem" book
|
|
|
|
|
// mentionned above.
|
|
|
|
|
double outgoing_flow = 0.0;
|
|
|
|
|
for (const auto arc : relevant_arcs) {
|
|
|
|
|
if (in_subset[arc.tail] && !in_subset[arc.head]) {
|
|
|
|
|
outgoing_flow += arc.lp_value;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Add a cut if the current outgoing flow is not enough.
|
|
|
|
|
if (outgoing_flow < min_outgoing_flow - 1e-6) {
|
2019-07-18 11:36:47 -07:00
|
|
|
AddOutgoingCut(num_nodes, subset.size(), in_subset, tails, heads,
|
|
|
|
|
literals, literal_lp_values,
|
|
|
|
|
/*rhs_lower_bound=*/min_outgoing_flow, lp_values, manager,
|
|
|
|
|
model);
|
2019-07-12 10:22:51 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Sparse clean up.
|
|
|
|
|
for (const int n : subset) in_subset[n] = false;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2019-07-18 11:36:47 -07:00
|
|
|
namespace {
|
|
|
|
|
|
|
|
|
|
// Returns for each literal its integer view, or the view of its negation.
|
|
|
|
|
std::vector<IntegerVariable> GetAssociatedVariables(
|
|
|
|
|
const std::vector<Literal>& literals, Model* model) {
|
|
|
|
|
auto* encoder = model->GetOrCreate<IntegerEncoder>();
|
|
|
|
|
std::vector<IntegerVariable> result;
|
|
|
|
|
for (const Literal l : literals) {
|
|
|
|
|
const IntegerVariable direct_view = encoder->GetLiteralView(l);
|
|
|
|
|
if (direct_view != kNoIntegerVariable) {
|
|
|
|
|
result.push_back(direct_view);
|
|
|
|
|
} else {
|
|
|
|
|
result.push_back(encoder->GetLiteralView(l.Negated()));
|
|
|
|
|
DCHECK_NE(result.back(), kNoIntegerVariable);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return result;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
} // namespace
|
|
|
|
|
|
2017-10-18 11:09:13 +02:00
|
|
|
// We use a basic algorithm to detect components that are not connected to the
|
|
|
|
|
// rest of the graph in the LP solution, and add cuts to force some arcs to
|
|
|
|
|
// enter and leave this component from outside.
|
|
|
|
|
CutGenerator CreateStronglyConnectedGraphCutGenerator(
|
|
|
|
|
int num_nodes, const std::vector<int>& tails, const std::vector<int>& heads,
|
2019-07-18 11:36:47 -07:00
|
|
|
const std::vector<Literal>& literals, Model* model) {
|
2017-10-18 11:09:13 +02:00
|
|
|
CutGenerator result;
|
2019-07-18 11:36:47 -07:00
|
|
|
result.vars = GetAssociatedVariables(literals, model);
|
2018-12-03 14:26:31 +01:00
|
|
|
result.generate_cuts =
|
2019-07-18 11:36:47 -07:00
|
|
|
[num_nodes, tails, heads, literals, model](
|
2019-04-05 14:58:33 +02:00
|
|
|
const gtl::ITIVector<IntegerVariable, double>& lp_values,
|
|
|
|
|
LinearConstraintManager* manager) {
|
2019-07-18 11:36:47 -07:00
|
|
|
SeparateSubtourInequalities(
|
|
|
|
|
num_nodes, tails, heads, literals, lp_values,
|
|
|
|
|
/*demands=*/{}, /*capacity=*/0, manager, model);
|
2018-12-03 14:26:31 +01:00
|
|
|
};
|
2017-11-07 15:45:52 +01:00
|
|
|
return result;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
CutGenerator CreateCVRPCutGenerator(int num_nodes,
|
|
|
|
|
const std::vector<int>& tails,
|
|
|
|
|
const std::vector<int>& heads,
|
2019-07-18 11:36:47 -07:00
|
|
|
const std::vector<Literal>& literals,
|
2017-11-07 15:45:52 +01:00
|
|
|
const std::vector<int64>& demands,
|
2019-07-18 11:36:47 -07:00
|
|
|
int64 capacity, Model* model) {
|
2017-11-07 15:45:52 +01:00
|
|
|
CutGenerator result;
|
2019-07-18 11:36:47 -07:00
|
|
|
result.vars = GetAssociatedVariables(literals, model);
|
2018-12-03 14:26:31 +01:00
|
|
|
result.generate_cuts =
|
2019-07-18 11:36:47 -07:00
|
|
|
[num_nodes, tails, heads, demands, capacity, literals, model](
|
2019-04-05 14:58:33 +02:00
|
|
|
const gtl::ITIVector<IntegerVariable, double>& lp_values,
|
|
|
|
|
LinearConstraintManager* manager) {
|
2019-07-18 11:36:47 -07:00
|
|
|
SeparateSubtourInequalities(num_nodes, tails, heads, literals,
|
|
|
|
|
lp_values, demands, capacity, manager,
|
|
|
|
|
model);
|
2018-12-03 14:26:31 +01:00
|
|
|
};
|
2017-10-18 11:09:13 +02:00
|
|
|
return result;
|
|
|
|
|
}
|
|
|
|
|
|
2017-12-08 14:52:49 +01:00
|
|
|
std::function<LiteralIndex()>
|
|
|
|
|
LinearProgrammingConstraint::HeuristicLPMostInfeasibleBinary(Model* model) {
|
|
|
|
|
IntegerTrail* integer_trail = integer_trail_;
|
|
|
|
|
IntegerEncoder* integer_encoder = model->GetOrCreate<IntegerEncoder>();
|
2017-08-03 10:20:59 -07:00
|
|
|
// Gather all 0-1 variables that appear in some LP.
|
|
|
|
|
std::vector<IntegerVariable> variables;
|
2017-12-08 14:52:49 +01:00
|
|
|
for (IntegerVariable var : integer_variables_) {
|
|
|
|
|
if (integer_trail_->LowerBound(var) == 0 &&
|
|
|
|
|
integer_trail_->UpperBound(var) == 1) {
|
2017-08-03 10:20:59 -07:00
|
|
|
variables.push_back(var);
|
|
|
|
|
}
|
|
|
|
|
}
|
2018-01-17 13:11:14 +01:00
|
|
|
VLOG(1) << "HeuristicLPMostInfeasibleBinary has " << variables.size()
|
|
|
|
|
<< " variables.";
|
2017-08-03 10:20:59 -07:00
|
|
|
|
2017-12-08 14:52:49 +01:00
|
|
|
return [this, variables, integer_trail, integer_encoder]() {
|
2017-08-03 10:20:59 -07:00
|
|
|
const double kEpsilon = 1e-6;
|
|
|
|
|
// Find most fractional value.
|
|
|
|
|
IntegerVariable fractional_var = kNoIntegerVariable;
|
|
|
|
|
double fractional_distance_best = -1.0;
|
|
|
|
|
for (const IntegerVariable var : variables) {
|
2017-12-08 14:52:49 +01:00
|
|
|
// Skip ignored and fixed variables.
|
|
|
|
|
if (integer_trail_->IsCurrentlyIgnored(var)) continue;
|
|
|
|
|
const IntegerValue lb = integer_trail_->LowerBound(var);
|
|
|
|
|
const IntegerValue ub = integer_trail_->UpperBound(var);
|
2017-08-03 10:20:59 -07:00
|
|
|
if (lb == ub) continue;
|
|
|
|
|
|
|
|
|
|
// Check variable's support is fractional.
|
2017-12-08 14:52:49 +01:00
|
|
|
const double lp_value = this->GetSolutionValue(var);
|
2017-08-03 10:20:59 -07:00
|
|
|
const double fractional_distance =
|
|
|
|
|
std::min(std::ceil(lp_value - kEpsilon) - lp_value,
|
|
|
|
|
lp_value - std::floor(lp_value + kEpsilon));
|
|
|
|
|
if (fractional_distance < kEpsilon) continue;
|
|
|
|
|
|
|
|
|
|
// Keep variable if it is farther from integrality than the previous.
|
|
|
|
|
if (fractional_distance > fractional_distance_best) {
|
|
|
|
|
fractional_var = var;
|
|
|
|
|
fractional_distance_best = fractional_distance;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (fractional_var != kNoIntegerVariable) {
|
|
|
|
|
return integer_encoder
|
|
|
|
|
->GetOrCreateAssociatedLiteral(
|
|
|
|
|
IntegerLiteral::GreaterOrEqual(fractional_var, IntegerValue(1)))
|
|
|
|
|
.Index();
|
|
|
|
|
}
|
|
|
|
|
return kNoLiteralIndex;
|
|
|
|
|
};
|
|
|
|
|
}
|
|
|
|
|
|
2017-12-08 14:52:49 +01:00
|
|
|
std::function<LiteralIndex()>
|
|
|
|
|
LinearProgrammingConstraint::HeuristicLPPseudoCostBinary(Model* model) {
|
2018-02-12 11:36:18 +01:00
|
|
|
// Gather all 0-1 variables that appear in this LP.
|
2017-08-03 10:20:59 -07:00
|
|
|
std::vector<IntegerVariable> variables;
|
2017-12-08 14:52:49 +01:00
|
|
|
for (IntegerVariable var : integer_variables_) {
|
|
|
|
|
if (integer_trail_->LowerBound(var) == 0 &&
|
|
|
|
|
integer_trail_->UpperBound(var) == 1) {
|
2017-08-03 10:20:59 -07:00
|
|
|
variables.push_back(var);
|
|
|
|
|
}
|
|
|
|
|
}
|
2018-01-17 13:11:14 +01:00
|
|
|
VLOG(1) << "HeuristicLPPseudoCostBinary has " << variables.size()
|
|
|
|
|
<< " variables.";
|
2017-08-03 10:20:59 -07:00
|
|
|
|
|
|
|
|
// Store average of reduced cost from 1 to 0. The best heuristic only sets
|
|
|
|
|
// variables to one and cares about cost to zero, even though classic
|
2018-01-10 13:21:06 +01:00
|
|
|
// pseudocost will use max_var min(cost_to_one[var], cost_to_zero[var]).
|
2017-08-03 10:20:59 -07:00
|
|
|
const int num_vars = variables.size();
|
|
|
|
|
std::vector<double> cost_to_zero(num_vars, 0.0);
|
|
|
|
|
std::vector<int> num_cost_to_zero(num_vars);
|
|
|
|
|
int num_calls = 0;
|
|
|
|
|
|
|
|
|
|
IntegerEncoder* integer_encoder = model->GetOrCreate<IntegerEncoder>();
|
|
|
|
|
return [=]() mutable {
|
|
|
|
|
const double kEpsilon = 1e-6;
|
|
|
|
|
|
|
|
|
|
// Every 10000 calls, decay pseudocosts.
|
|
|
|
|
num_calls++;
|
|
|
|
|
if (num_calls == 10000) {
|
|
|
|
|
for (int i = 0; i < num_vars; i++) {
|
|
|
|
|
cost_to_zero[i] /= 2;
|
|
|
|
|
num_cost_to_zero[i] /= 2;
|
|
|
|
|
}
|
|
|
|
|
num_calls = 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Accumulate pseudo-costs of all unassigned variables.
|
|
|
|
|
for (int i = 0; i < num_vars; i++) {
|
|
|
|
|
const IntegerVariable var = variables[i];
|
2017-12-08 14:52:49 +01:00
|
|
|
// Skip ignored and fixed variables.
|
|
|
|
|
if (integer_trail_->IsCurrentlyIgnored(var)) continue;
|
|
|
|
|
const IntegerValue lb = integer_trail_->LowerBound(var);
|
|
|
|
|
const IntegerValue ub = integer_trail_->UpperBound(var);
|
|
|
|
|
if (lb == ub) continue;
|
2017-08-03 10:20:59 -07:00
|
|
|
|
2017-12-08 14:52:49 +01:00
|
|
|
const double rc = this->GetSolutionReducedCost(var);
|
2017-08-03 10:20:59 -07:00
|
|
|
// Skip reduced costs that are nonzero because of numerical issues.
|
|
|
|
|
if (std::abs(rc) < kEpsilon) continue;
|
|
|
|
|
|
2017-12-08 14:52:49 +01:00
|
|
|
const double value = std::round(this->GetSolutionValue(var));
|
2017-08-03 10:20:59 -07:00
|
|
|
if (value == 1.0 && rc < 0.0) {
|
|
|
|
|
cost_to_zero[i] -= rc;
|
|
|
|
|
num_cost_to_zero[i]++;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Select noninstantiated variable with highest pseudo-cost.
|
|
|
|
|
int selected_index = -1;
|
|
|
|
|
double best_cost = 0.0;
|
|
|
|
|
for (int i = 0; i < num_vars; i++) {
|
|
|
|
|
const IntegerVariable var = variables[i];
|
2017-12-08 14:52:49 +01:00
|
|
|
// Skip ignored and fixed variables.
|
|
|
|
|
if (integer_trail_->IsCurrentlyIgnored(var)) continue;
|
|
|
|
|
const IntegerValue lb = integer_trail_->LowerBound(var);
|
|
|
|
|
const IntegerValue ub = integer_trail_->UpperBound(var);
|
|
|
|
|
if (lb == ub) continue;
|
2017-08-03 10:20:59 -07:00
|
|
|
|
|
|
|
|
if (num_cost_to_zero[i] > 0 &&
|
|
|
|
|
best_cost < cost_to_zero[i] / num_cost_to_zero[i]) {
|
|
|
|
|
best_cost = cost_to_zero[i] / num_cost_to_zero[i];
|
|
|
|
|
selected_index = i;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (selected_index >= 0) {
|
|
|
|
|
const Literal decision = integer_encoder->GetOrCreateAssociatedLiteral(
|
|
|
|
|
IntegerLiteral::GreaterOrEqual(variables[selected_index],
|
|
|
|
|
IntegerValue(1)));
|
|
|
|
|
return decision.Index();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return kNoLiteralIndex;
|
|
|
|
|
};
|
|
|
|
|
}
|
|
|
|
|
|
2018-02-12 11:36:18 +01:00
|
|
|
std::function<LiteralIndex()>
|
|
|
|
|
LinearProgrammingConstraint::LPReducedCostAverageBranching() {
|
|
|
|
|
if (!compute_reduced_cost_averages_) {
|
|
|
|
|
compute_reduced_cost_averages_ = true;
|
|
|
|
|
const int num_vars = integer_variables_.size();
|
|
|
|
|
VLOG(1) << " LPReducedCostAverageBranching has #variables: " << num_vars;
|
|
|
|
|
sum_cost_down_.resize(num_vars, 0.0);
|
|
|
|
|
num_cost_down_.resize(num_vars, 0);
|
|
|
|
|
sum_cost_up_.resize(num_vars, 0.0);
|
|
|
|
|
num_cost_up_.resize(num_vars, 0);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return [this]() { return this->LPReducedCostAverageDecision(); };
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
LiteralIndex LinearProgrammingConstraint::LPReducedCostAverageDecision() {
|
|
|
|
|
const int num_vars = integer_variables_.size();
|
|
|
|
|
// Select noninstantiated variable with highest pseudo-cost.
|
|
|
|
|
int selected_index = -1;
|
|
|
|
|
double best_cost = 0.0;
|
|
|
|
|
for (int i = 0; i < num_vars; i++) {
|
|
|
|
|
const IntegerVariable var = this->integer_variables_[i];
|
|
|
|
|
// Skip ignored and fixed variables.
|
|
|
|
|
if (integer_trail_->IsCurrentlyIgnored(var)) continue;
|
|
|
|
|
const IntegerValue lb = integer_trail_->LowerBound(var);
|
|
|
|
|
const IntegerValue ub = integer_trail_->UpperBound(var);
|
|
|
|
|
if (lb == ub) continue;
|
|
|
|
|
|
|
|
|
|
// If only one direction exist, we takes its value divided by 2, so that
|
|
|
|
|
// such variable should have a smaller cost than the min of the two side
|
|
|
|
|
// except if one direction have a really high reduced costs.
|
|
|
|
|
double cost_i = 0.0;
|
|
|
|
|
if (num_cost_down_[i] > 0 && num_cost_up_[i] > 0) {
|
|
|
|
|
cost_i = std::min(sum_cost_down_[i] / num_cost_down_[i],
|
|
|
|
|
sum_cost_up_[i] / num_cost_up_[i]);
|
|
|
|
|
} else {
|
|
|
|
|
const double divisor = num_cost_down_[i] + num_cost_up_[i];
|
|
|
|
|
if (divisor != 0) {
|
|
|
|
|
cost_i = 0.5 * (sum_cost_down_[i] + sum_cost_up_[i]) / divisor;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (selected_index == -1 || cost_i > best_cost) {
|
|
|
|
|
best_cost = cost_i;
|
|
|
|
|
selected_index = i;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (selected_index == -1) return kNoLiteralIndex;
|
2019-04-03 18:10:56 +02:00
|
|
|
const IntegerVariable var = integer_variables_[selected_index];
|
2018-02-12 11:36:18 +01:00
|
|
|
|
|
|
|
|
// If ceil(value) is current upper bound, try var == upper bound first.
|
|
|
|
|
// Guarding with >= prevents numerical problems.
|
|
|
|
|
// With 0/1 variables, this will tend to try setting to 1 first,
|
|
|
|
|
// which produces more shallow trees.
|
|
|
|
|
const IntegerValue ub = integer_trail_->UpperBound(var);
|
|
|
|
|
const IntegerValue value_ceil(
|
|
|
|
|
std::ceil(this->GetSolutionValue(var) - kCpEpsilon));
|
|
|
|
|
if (value_ceil >= ub) {
|
2019-04-03 18:10:56 +02:00
|
|
|
const Literal result = integer_encoder_->GetOrCreateAssociatedLiteral(
|
|
|
|
|
IntegerLiteral::GreaterOrEqual(var, ub));
|
|
|
|
|
CHECK(!trail_->Assignment().LiteralIsAssigned(result));
|
|
|
|
|
return result.Index();
|
2018-02-12 11:36:18 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// If floor(value) is current lower bound, try var == lower bound first.
|
|
|
|
|
// Guarding with <= prevents numerical problems.
|
|
|
|
|
const IntegerValue lb = integer_trail_->LowerBound(var);
|
|
|
|
|
const IntegerValue value_floor(
|
|
|
|
|
std::floor(this->GetSolutionValue(var) + kCpEpsilon));
|
|
|
|
|
if (value_floor <= lb) {
|
2019-04-03 18:10:56 +02:00
|
|
|
const Literal result = integer_encoder_->GetOrCreateAssociatedLiteral(
|
|
|
|
|
IntegerLiteral::LowerOrEqual(var, lb));
|
|
|
|
|
CHECK(!trail_->Assignment().LiteralIsAssigned(result))
|
|
|
|
|
<< " " << lb << " " << ub;
|
|
|
|
|
return result.Index();
|
2018-02-12 11:36:18 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Here lb < value_floor <= value_ceil < ub.
|
|
|
|
|
// Try the most promising split between var <= floor or var >= ceil.
|
|
|
|
|
if (sum_cost_down_[selected_index] / num_cost_down_[selected_index] <
|
|
|
|
|
sum_cost_up_[selected_index] / num_cost_up_[selected_index]) {
|
2019-04-03 18:10:56 +02:00
|
|
|
const Literal result = integer_encoder_->GetOrCreateAssociatedLiteral(
|
|
|
|
|
IntegerLiteral::LowerOrEqual(var, value_floor));
|
|
|
|
|
CHECK(!trail_->Assignment().LiteralIsAssigned(result));
|
|
|
|
|
return result.Index();
|
2018-02-12 11:36:18 +01:00
|
|
|
} else {
|
2019-04-03 18:10:56 +02:00
|
|
|
const Literal result = integer_encoder_->GetOrCreateAssociatedLiteral(
|
|
|
|
|
IntegerLiteral::GreaterOrEqual(var, value_ceil));
|
|
|
|
|
CHECK(!trail_->Assignment().LiteralIsAssigned(result));
|
|
|
|
|
return result.Index();
|
2018-02-12 11:36:18 +01:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2017-03-28 16:11:06 +02:00
|
|
|
} // namespace sat
|
|
|
|
|
} // namespace operations_research
|