restructure LNS code in CP-SAT; add linear relaxation and dynamic cuts for product in CP-SAT; bug fixes

This commit is contained in:
Laurent Perron
2019-06-20 17:16:59 +02:00
parent ae295d18fc
commit 739ae3d17f
15 changed files with 916 additions and 317 deletions

View File

@@ -189,6 +189,7 @@ cc_library(
"//ortools/base:int_type_indexed_vector",
"//ortools/base:map_util",
"//ortools/base:status",
"//ortools/base:stl_util",
"//ortools/base:threadpool",
"//ortools/graph:connectivity",
"//ortools/port:proto_utils",
@@ -1010,9 +1011,11 @@ cc_library(
":cp_model_loader",
":cp_model_utils",
":linear_programming_constraint",
":lns",
":model",
":rins",
"//ortools/base",
"//ortools/base:threadpool",
"//ortools/util:random_engine",
"//ortools/util:time_limit",
],

View File

@@ -223,6 +223,20 @@ std::string ValidateObjective(const CpModelProto& model,
return "";
}
std::string ValidateSolutionHint(const CpModelProto& model) {
if (!model.has_solution_hint()) return "";
const auto& hint = model.solution_hint();
if (hint.vars().size() != hint.values().size()) {
return "Invalid solution hint: vars and values do not have the same size.";
}
for (const int ref : hint.vars()) {
if (!VariableReferenceIsValid(model, ref)) {
return absl::StrCat("Invalid variable reference in solution hint: ", ref);
}
}
return "";
}
} // namespace
std::string ValidateCpModel(const CpModelProto& model) {
@@ -277,6 +291,7 @@ std::string ValidateCpModel(const CpModelProto& model) {
RETURN_IF_NOT_EMPTY(ValidateObjective(model, model.objective()));
}
RETURN_IF_NOT_EMPTY(ValidateSolutionHint(model));
return "";
}

View File

@@ -95,13 +95,22 @@ bool NeighborhoodGeneratorHelper::IsConstant(int var) const {
model_proto_.variables(var).domain(1);
}
Neighborhood NeighborhoodGeneratorHelper::FullNeighborhood() const {
Neighborhood neighborhood;
neighborhood.is_reduced = false;
neighborhood.is_generated = true;
neighborhood.cp_model = model_proto_;
return neighborhood;
}
Neighborhood NeighborhoodGeneratorHelper::FixGivenVariables(
const CpSolverResponse& initial_solution,
const std::vector<int>& variables_to_fix) const {
Neighborhood neighborhood;
// TODO(user): Do not include constraint with all fixed variables to save
// memory and speed-up LNS presolving.
Neighborhood neighborhood = FullNeighborhood();
neighborhood.is_reduced = !variables_to_fix.empty();
neighborhood.cp_model = model_proto_;
neighborhood.is_generated = true;
if (!neighborhood.is_reduced) return neighborhood;
CHECK_EQ(initial_solution.solution_size(),
neighborhood.cp_model.variables_size());
@@ -152,23 +161,63 @@ Neighborhood NeighborhoodGeneratorHelper::FixAllVariables(
}
double NeighborhoodGenerator::GetUCBScore(int64 total_num_calls) const {
absl::MutexLock mutex_lock(&mutex_);
DCHECK_GE(total_num_calls, num_calls_);
if (num_calls_ <= 10) return std::numeric_limits<double>::infinity();
return current_average_ + sqrt((2 * log(total_num_calls)) / num_calls_);
}
void NeighborhoodGenerator::AddSolveData(double objective_diff,
double deterministic_time) {
double gain_per_time_unit = objective_diff / (1.0 + deterministic_time);
// TODO(user): Add more data.
// TODO(user): Weight more recent data.
num_calls_++;
// degrade the current average to forget old learnings.
if (num_calls_ <= 100) {
current_average_ += (gain_per_time_unit - current_average_) / num_calls_;
} else {
current_average_ = 0.9 * current_average_ + 0.1 * gain_per_time_unit;
void NeighborhoodGenerator::Synchronize() {
absl::MutexLock mutex_lock(&mutex_);
// To make the whole update process deterministic, we currently sort the
// SolveData.
std::sort(solve_data_.begin(), solve_data_.end());
for (const SolveData& data : solve_data_) {
// TODO(user): we should use the original neighborhood difficulty in the
// formula that update the difficulty. Ideally, using all the recent data we
// want to aim for a given percentage of "solvable vs. unsolvable" problem
// so we are at an interesting spot. It seems the current code converges
// towards a 50% percentage though.
if (data.status == CpSolverStatus::INFEASIBLE ||
data.status == CpSolverStatus::OPTIMAL) {
num_fully_solved_calls_++;
difficulty_.Increase();
} else {
difficulty_.Decrease();
}
num_calls_++;
if (data.objective_diff > 0.0) {
num_consecutive_non_improving_calls_ = 0;
} else {
num_consecutive_non_improving_calls_++;
}
// TODO(user): Weight more recent data.
// degrade the current average to forget old learnings.
const double gain_per_time_unit =
data.objective_diff / (1.0 + data.deterministic_time);
if (num_calls_ <= 100) {
current_average_ += (gain_per_time_unit - current_average_) / num_calls_;
} else {
current_average_ = 0.9 * current_average_ + 0.1 * gain_per_time_unit;
}
}
// Bump the time limit if we saw no better solution in the last few calls.
// This means that as the search progress, we likely spend more and more time
// trying to solve individual neighborhood.
//
// TODO(user): experiment with resetting the time limit if a solution is
// found.
if (num_consecutive_non_improving_calls_ > 20) {
num_consecutive_non_improving_calls_ = 0;
deterministic_limit_ *= 1.1;
}
solve_data_.clear();
}
namespace {
@@ -201,11 +250,7 @@ Neighborhood VariableGraphNeighborhoodGenerator::Generate(
const int num_model_vars = helper_.ModelProto().variables_size();
const int target_size = std::ceil(difficulty * num_active_vars);
if (target_size == num_active_vars) {
Neighborhood neighborhood;
neighborhood.is_reduced = false;
neighborhood.is_generated = true;
neighborhood.cp_model = helper_.ModelProto();
return neighborhood;
return helper_.FullNeighborhood();
}
CHECK_GT(target_size, 0);
@@ -260,11 +305,7 @@ Neighborhood ConstraintGraphNeighborhoodGenerator::Generate(
const int target_size = std::ceil(difficulty * num_active_vars);
const int num_constraints = helper_.ConstraintToVar().size();
if (num_constraints == 0 || target_size == num_active_vars) {
Neighborhood neighborhood;
neighborhood.is_reduced = false;
neighborhood.is_generated = true;
neighborhood.cp_model = helper_.ModelProto();
return neighborhood;
return helper_.FullNeighborhood();
}
CHECK_GT(target_size, 0);
@@ -323,11 +364,11 @@ Neighborhood GenerateSchedulingNeighborhoodForRelaxation(
const absl::Span<const int> intervals_to_relax,
const CpSolverResponse& initial_solution,
const NeighborhoodGeneratorHelper& helper) {
Neighborhood neighborhood;
Neighborhood neighborhood = helper.FullNeighborhood();
neighborhood.is_reduced =
(intervals_to_relax.size() <
helper.TypeToConstraints(ConstraintProto::kInterval).size());
neighborhood.cp_model = helper.ModelProto();
// We will extend the set with some interval that we cannot fix.
std::set<int> ignored_intervals(intervals_to_relax.begin(),
intervals_to_relax.end());
@@ -455,8 +496,7 @@ Neighborhood SchedulingTimeWindowNeighborhoodGenerator::Generate(
Neighborhood RelaxationInducedNeighborhoodGenerator::Generate(
const CpSolverResponse& initial_solution, int64 seed,
double difficulty) const {
Neighborhood neighborhood;
neighborhood.cp_model = helper_.ModelProto();
Neighborhood neighborhood = helper_.FullNeighborhood();
neighborhood.is_generated = false;
SharedRINSNeighborhoodManager* rins_manager =

View File

@@ -19,6 +19,7 @@
#include "absl/types/span.h"
#include "ortools/base/integral_types.h"
#include "ortools/sat/cp_model.pb.h"
#include "ortools/sat/lns.h"
#include "ortools/sat/model.h"
namespace operations_research {
@@ -42,14 +43,14 @@ struct Neighborhood {
// Contains pre-computed information about a given CpModelProto that is meant
// to be used to generate LNS neighborhood. This class can be shared between
// more than one generator in order to reduce memory usage.
//
// Thread-safe.
class NeighborhoodGeneratorHelper {
public:
NeighborhoodGeneratorHelper(CpModelProto const* model_proto,
bool focus_on_decision_variables);
// Updates private variables using the current 'model_proto_'.
// Updates private variables using the current 'model_proto_'. This is NOT
// thread-safe, but the other function in this class are if this is not called
// at the same time.
void UpdateHelperData(bool focus_on_decision_variables);
// Returns the LNS fragment where the given variables are fixed to the value
@@ -68,6 +69,9 @@ class NeighborhoodGeneratorHelper {
// solution values.
Neighborhood FixAllVariables(const CpSolverResponse& initial_solution) const;
// Return a neighborhood that correspond to the full problem.
Neighborhood FullNeighborhood() const;
// Indicates if the variable can be frozen. It happens if the variable is non
// constant, and if it is a decision variable, or if
// focus_on_decision_variables is false.
@@ -92,6 +96,7 @@ class NeighborhoodGeneratorHelper {
}
// The initial problem.
// Note that the domain of the variables are not updated here.
const CpModelProto& ModelProto() const { return model_proto_; }
private:
@@ -120,7 +125,7 @@ class NeighborhoodGenerator {
public:
NeighborhoodGenerator(const std::string& name,
NeighborhoodGeneratorHelper const* helper)
: helper_(*helper), name_(name) {}
: helper_(*helper), name_(name), difficulty_(0.5) {}
virtual ~NeighborhoodGenerator() {}
// Generates a "local" subproblem for the given seed.
@@ -143,9 +148,6 @@ class NeighborhoodGenerator {
// Returns true if the generator needs a solution to generate a neighborhood.
virtual bool NeedsFirstSolution() const { return true; }
// Returns a short description of the generator.
std::string name() const { return name_; }
// Uses UCB1 algorithm to compute the score (Multi armed bandit problem).
// Details are at
// https://lilianweng.github.io/lil-log/2018/01/23/the-multi-armed-bandit-problem-and-its-solutions.html.
@@ -156,17 +158,84 @@ class NeighborhoodGenerator {
// performance.
double GetUCBScore(int64 total_num_calls) const;
// Updates the records using the current improvement in objective for the
// generator.
void AddSolveData(double objective_diff, double deterministic_time);
// Adds solve data about one "solved" neighborhood.
//
// TODO(user): Add more data.
struct SolveData {
// The status of the sub-solve.
CpSolverStatus status = CpSolverStatus::UNKNOWN;
// Number of times this generator is called.
int64 num_calls() const { return num_calls_; }
// The difficulty when this neighborhood was solved.
double difficulty = 0.0;
// The time it took to solve this neighborhood.
double deterministic_time = 0.0;
// The objective improvement compared to the base solution.
double objective_diff = 0.0;
// This is just used to construct a deterministic order for the updates.
bool operator<(const SolveData& o) const {
return std::tie(status, difficulty, deterministic_time, objective_diff) <
std::tie(o.status, o.difficulty, o.deterministic_time,
o.objective_diff);
}
};
void AddSolveData(SolveData data) {
absl::MutexLock mutex_lock(&mutex_);
solve_data_.push_back(data);
}
// Process all the recently added solve data and update this generator
// score and difficulty.
void Synchronize();
// Returns a short description of the generator.
std::string name() const { return name_; }
// Number of times this generator was called.
int64 num_calls() const {
absl::MutexLock mutex_lock(&mutex_);
return num_calls_;
}
// Number of time the neighborhood was fully solved (OPTIMAL/INFEASIBLE).
int64 num_fully_solved_calls() const {
absl::MutexLock mutex_lock(&mutex_);
return num_fully_solved_calls_;
}
// The current difficulty of this generator
double difficulty() const {
absl::MutexLock mutex_lock(&mutex_);
return difficulty_.value();
}
// The current time limit that the sub-solve should use on this generator.
double deterministic_limit() const {
absl::MutexLock mutex_lock(&mutex_);
return deterministic_limit_;
}
protected:
const NeighborhoodGeneratorHelper& helper_;
const std::string name_;
private:
mutable absl::Mutex mutex_;
std::vector<SolveData> solve_data_;
// Current parameters to be used when generating/solving a neighborhood with
// this generator. Only updated on Synchronize().
AdaptiveParameterValue difficulty_;
double deterministic_limit_ = 0.1;
// Current statistics of the last solved neighborhood.
// Only updated on Synchronize().
int64 num_calls_ = 0;
int64 num_fully_solved_calls_ = 0;
int64 num_consecutive_non_improving_calls_ = 0;
double current_average_ = 0.0;
};

View File

@@ -428,6 +428,15 @@ void CpModelMapping::DetectOptionalVariables(const CpModelProto& model_proto,
if (!parameters.use_optional_variables()) return;
if (parameters.enumerate_all_solutions()) return;
// The variables from the objective cannot be marked as optional!
const int num_proto_variables = model_proto.variables_size();
std::vector<bool> already_seen(num_proto_variables, false);
if (model_proto.has_objective()) {
for (const int ref : model_proto.objective().vars()) {
already_seen[PositiveRef(ref)] = true;
}
}
// Compute for each variables the intersection of the enforcement literals
// of the constraints in which they appear.
//
@@ -436,8 +445,6 @@ void CpModelMapping::DetectOptionalVariables(const CpModelProto& model_proto,
// appear to false. This can be done with a LCA computation in the tree of
// Boolean implication (once the presolve remove cycles). Not sure if we can
// properly exploit that afterwards though. Do some research!
const int num_proto_variables = model_proto.variables_size();
std::vector<bool> already_seen(num_proto_variables, false);
std::vector<std::vector<int>> enforcement_intersection(num_proto_variables);
std::set<int> literals_set;
for (int c = 0; c < model_proto.constraints_size(); ++c) {

View File

@@ -3173,6 +3173,9 @@ void ExpandObjective(PresolveContext* context) {
for (int i = 0; i < context->working_model->objective().vars_size(); ++i) {
const int ref = context->working_model->objective().vars(i);
const int64 coeff = context->working_model->objective().coeffs(i);
// NOTE: Domain multiplication with zero coeff returns empty domain so we
// avoid it by skipping the terms with zero coeff.
if (coeff == 0) continue;
implied_domain =
implied_domain
.AdditionWith(context->DomainOf(ref).MultiplicationBy(coeff))
@@ -3185,8 +3188,12 @@ void ExpandObjective(PresolveContext* context) {
if (!mutable_objective->domain().empty()) {
old_domain = ReadDomainFromProto(*mutable_objective);
}
FillDomainInProto(old_domain.SimplifyUsingImpliedDomain(implied_domain),
mutable_objective);
const Domain simplified_domain =
old_domain.SimplifyUsingImpliedDomain(implied_domain);
if (simplified_domain.IsEmpty()) {
return (void)context->NotifyThatModelIsUnsat();
}
FillDomainInProto(simplified_domain, mutable_objective);
}
// Convert the objective linear expression to a map for ease of use below.
@@ -3223,7 +3230,7 @@ void ExpandObjective(PresolveContext* context) {
// If the objective is a single variable, then we can usually remove this
// variable if it is only used in one linear equality constraint and we do
// just one expansion. This is because the domain of the variable will be
// transfered to our objective_domain.
// transferred to our objective_domain.
int unique_expanded_constraint = -1;
const bool objective_was_a_single_variable = objective_map.size() == 1;
@@ -3449,8 +3456,12 @@ void ExpandObjective(PresolveContext* context) {
.AdditionWith(Domain(-objective_offset_change))
.InverseMultiplicationBy(objective_divisor);
}
FillDomainInProto(old_domain.SimplifyUsingImpliedDomain(implied_domain),
mutable_objective);
const Domain simplified_domain =
old_domain.SimplifyUsingImpliedDomain(implied_domain);
if (simplified_domain.IsEmpty()) {
return (void)context->NotifyThatModelIsUnsat();
}
FillDomainInProto(simplified_domain, mutable_objective);
mutable_objective->set_offset(mutable_objective->offset() +
objective_offset_change);
if (objective_divisor > 1) {
@@ -4306,6 +4317,13 @@ bool PresolveCpModel(const PresolveOptions& options,
PresolveToFixPoint(options, &context);
}
// Regroup no-overlaps into max-cliques.
if (!context.ModelIsUnsat()) MergeNoOverlapConstraints(&context);
if (context.working_model->has_objective() && !context.ModelIsUnsat()) {
ExpandObjective(&context);
}
if (context.ModelIsUnsat()) {
if (options.log_info) LogInfoFromContext(context);
@@ -4315,13 +4333,6 @@ bool PresolveCpModel(const PresolveOptions& options,
return true;
}
// Regroup no-overlaps into max-cliques.
MergeNoOverlapConstraints(&context);
if (context.working_model->has_objective()) {
ExpandObjective(&context);
}
// Note: Removing unused equivalent variables should be done at the end.
RemoveUnusedEquivalentVariables(&context);

View File

@@ -26,6 +26,9 @@
#include "absl/memory/memory.h"
#include "glog/vlog_is_on.h"
#include "ortools/sat/cuts.h"
#include "ortools/sat/sat_parameters.pb.h"
#include "ortools/util/saturated_arithmetic.h"
#if !defined(__PORTABLE_PLATFORM__)
#include "absl/synchronization/notification.h"
@@ -376,6 +379,11 @@ std::string Summarize(const std::string& input) {
input.substr(input.size() - half, half));
}
bool IsPositive(IntegerVariable v, Model* m) {
IntegerTrail* const integer_trail = m->GetOrCreate<IntegerTrail>();
return integer_trail->LevelZeroLowerBound(v) >= 0;
}
} // namespace.
// =============================================================================
@@ -670,8 +678,11 @@ IntegerVariable GetOrCreateVariableGreaterOrEqualToSumOf(
void TryToAddCutGenerators(const CpModelProto& model_proto,
const ConstraintProto& ct, Model* m,
LinearRelaxation* relaxation) {
const int linearization_level =
m->GetOrCreate<SatParameters>()->linearization_level();
auto* mapping = m->GetOrCreate<CpModelMapping>();
if (ct.constraint_case() == ConstraintProto::ConstraintCase::kCircuit) {
if (ct.constraint_case() == ConstraintProto::ConstraintCase::kCircuit &&
linearization_level > 1) {
std::vector<int> tails(ct.circuit().tails().begin(),
ct.circuit().tails().end());
std::vector<int> heads(ct.circuit().heads().begin(),
@@ -689,7 +700,8 @@ void TryToAddCutGenerators(const CpModelProto& model_proto,
CreateStronglyConnectedGraphCutGenerator(num_nodes, tails, heads,
vars));
}
if (ct.constraint_case() == ConstraintProto::ConstraintCase::kRoutes) {
if (ct.constraint_case() == ConstraintProto::ConstraintCase::kRoutes &&
linearization_level > 1) {
std::vector<int> tails;
std::vector<int> heads;
std::vector<IntegerVariable> vars;
@@ -717,6 +729,23 @@ void TryToAddCutGenerators(const CpModelProto& model_proto,
num_nodes, tails, heads, vars, demands, ct.routes().capacity()));
}
}
if (ct.constraint_case() == ConstraintProto::ConstraintCase::kIntProd) {
if (HasEnforcementLiteral(ct)) return;
if (ct.int_prod().vars_size() != 2) return;
const int target = ct.int_prod().target();
const IntegerVariable v0 = mapping->Integer(ct.int_prod().vars(0));
const IntegerVariable v1 = mapping->Integer(ct.int_prod().vars(1));
if (v0 == v1 && IsPositive(v0, m)) {
relaxation->cut_generators.push_back(
CreateSquareCutGenerator(mapping->Integer(target), v0, m));
} else if (IsPositive(v0, m) && IsPositive(v1, m)) {
relaxation->cut_generators.push_back(
CreatePositiveMultiplicationCutGenerator(mapping->Integer(target), v0,
v1, m));
}
}
}
} // namespace
@@ -759,11 +788,7 @@ IntegerVariable AddLPConstraints(const CpModelProto& model_proto,
TryToLinearizeConstraint(model_proto, ct, m, linearization_level,
&relaxation);
// For now these are only useful on problem with circuit. They can help
// a lot on complex problems, but they also slow down simple ones.
if (linearization_level > 1) {
TryToAddCutGenerators(model_proto, ct, m, &relaxation);
}
TryToAddCutGenerators(model_proto, ct, m, &relaxation);
}
// Linearize the encoding of variable that are fully encoded in the proto.
@@ -1727,8 +1752,7 @@ bool UpdateDomain(int64 new_lb, int64 new_ub,
}
} // namespace
void SolveCpModelWithLNS(const CpModelProto& model_proto, int num_workers,
int worker_id,
void SolveCpModelWithLNS(const CpModelProto& model_proto, int worker_id,
SharedResponseManager* shared_response_manager,
SharedBoundsManager* shared_bounds_manager,
WallTimer* wall_timer, Model* model) {
@@ -1739,7 +1763,11 @@ void SolveCpModelWithLNS(const CpModelProto& model_proto, int num_workers,
const bool focus_on_decision_variables =
parameters->lns_focus_on_decision_variables();
// This mutex should protect all access to the helper or time_limit.
absl::Mutex mutex;
CpModelProto mutable_model_proto = model_proto;
TimeLimit* limit = model->GetOrCreate<TimeLimit>();
NeighborhoodGeneratorHelper helper(&mutable_model_proto,
focus_on_decision_variables);
@@ -1791,190 +1819,201 @@ void SolveCpModelWithLNS(const CpModelProto& model_proto, int num_workers,
return;
}
// The "optimal" difficulties do not have to be the same for different
// generators.
//
// TODO(user): This should be shared across LNS threads! create a thread
// safe class that accept signals of the form (difficulty and result) and
// properly update the difficulties.
std::vector<AdaptiveParameterValue> difficulties(generators.size(),
AdaptiveParameterValue(0.5));
const auto synchronize_and_maybe_stop_function = [&]() {
// Update the bounds on mutable model proto.
if (shared_bounds_manager != nullptr) {
absl::MutexLock mutex_lock(&mutex);
std::vector<int> model_variables;
std::vector<int64> new_lower_bounds;
std::vector<int64> new_upper_bounds;
shared_bounds_manager->GetChangedBounds(
worker_id, &model_variables, &new_lower_bounds, &new_upper_bounds);
TimeLimit* limit = model->GetOrCreate<TimeLimit>();
double deterministic_time = 0.1;
int num_consecutive_not_fully_solved = 0;
const int num_threads = std::max(1, parameters->lns_num_threads());
int64 total_num_calls = 0;
OptimizeWithLNS(
num_threads,
[&]() {
// Synchronize with external world.
response = shared_response_manager->GetResponse();
// Update the bounds on mutable model proto.
if (shared_bounds_manager != nullptr) {
std::vector<int> model_variables;
std::vector<int64> new_lower_bounds;
std::vector<int64> new_upper_bounds;
shared_bounds_manager->GetChangedBounds(worker_id, &model_variables,
&new_lower_bounds,
&new_upper_bounds);
for (int i = 0; i < model_variables.size(); ++i) {
const int var = model_variables[i];
const int64 new_lb = new_lower_bounds[i];
const int64 new_ub = new_upper_bounds[i];
if (VLOG_IS_ON(3)) {
const auto& domain = mutable_model_proto.variables(var).domain();
const int64 old_lb = domain.Get(0);
const int64 old_ub = domain.Get(domain.size() - 1);
VLOG(3) << "Variable: " << var << " old domain: [" << old_lb
<< ", " << old_ub << "] new domain: [" << new_lb << ", "
<< new_ub << "]";
}
if (!UpdateDomain(new_lb, new_ub,
mutable_model_proto.mutable_variables(var))) {
// Model is unsat.
return true;
}
}
if (!model_variables.empty()) {
helper.UpdateHelperData(focus_on_decision_variables);
}
for (int i = 0; i < model_variables.size(); ++i) {
const int var = model_variables[i];
const int64 new_lb = new_lower_bounds[i];
const int64 new_ub = new_upper_bounds[i];
if (VLOG_IS_ON(3)) {
const auto& domain = mutable_model_proto.variables(var).domain();
const int64 old_lb = domain.Get(0);
const int64 old_ub = domain.Get(domain.size() - 1);
VLOG(3) << "Variable: " << var << " old domain: [" << old_lb << ", "
<< old_ub << "] new domain: [" << new_lb << ", " << new_ub
<< "]";
}
if (!UpdateDomain(new_lb, new_ub,
mutable_model_proto.mutable_variables(var))) {
// Model is unsat.
return true;
}
}
if (!model_variables.empty()) {
helper.UpdateHelperData(focus_on_decision_variables);
}
}
// If we didn't see any progress recently, bump the time limit.
// TODO(user): Tune the logic and expose the parameters.
if (num_consecutive_not_fully_solved > 100) {
deterministic_time *= 1.1;
num_consecutive_not_fully_solved = 0;
}
return limit->LimitReached() ||
response.objective_value() == response.best_objective_bound();
},
[&](int64 seed) {
int selected_generator = seed % generators.size();
while (response.status() == CpSolverStatus::UNKNOWN &&
generators[selected_generator]->NeedsFirstSolution()) {
selected_generator = (selected_generator + 1) % generators.size();
}
AdaptiveParameterValue& difficulty = difficulties[selected_generator];
const double saved_difficulty = difficulty.value();
Neighborhood neighborhood = generators[selected_generator]->Generate(
response, num_workers * seed + worker_id, saved_difficulty);
if (!neighborhood.is_generated) {
std::function<void()> update_function = []() {};
return update_function;
}
CpModelProto& local_problem = neighborhood.cp_model;
const std::string solution_info = absl::StrFormat(
"%s(d=%0.2f s=%i t=%0.2f)", generators[selected_generator]->name(),
saved_difficulty, seed, deterministic_time);
// Synchronize the solution repository and process all the latest
// "SolveData" since the last synchronization point.
shared_response_manager->MutableSolutionsRepository()->Synchronize();
for (std::unique_ptr<NeighborhoodGenerator>& generator : generators) {
generator->Synchronize();
}
absl::MutexLock mutex_lock(&mutex);
response = shared_response_manager->GetResponse();
return limit->LimitReached() ||
response.objective_value() == response.best_objective_bound();
};
const auto generate_and_solve_function = [&](int64 seed) {
const int num_solutions =
shared_response_manager->SolutionsRepository().NumSolutions();
int selected_generator = seed % generators.size();
while (num_solutions == 0 &&
generators[selected_generator]->NeedsFirstSolution()) {
selected_generator = (selected_generator + 1) % generators.size();
}
CpSolverResponse base_response;
if (num_solutions > 0) {
base_response.set_status(CpSolverStatus::FEASIBLE);
const int selected_solution = seed / generators.size() % num_solutions;
const SharedSolutionRepository::Solution solution =
shared_response_manager->SolutionsRepository().GetSolution(
selected_solution);
for (const int64 value : solution.variable_values) {
base_response.add_solution(value);
}
} else {
base_response.set_status(CpSolverStatus::UNKNOWN);
}
NeighborhoodGenerator* generator = generators[selected_generator].get();
const double saved_difficulty = generator->difficulty();
const double saved_limit = generator->deterministic_limit();
Neighborhood neighborhood;
{
absl::MutexLock mutex_lock(&mutex);
// TODO(user): currently the full neigbhorhood generation cannot be
// parallelized since we can change the underlying helper data when
// synchronizing new bounds. Improve?
if (limit->LimitReached()) return;
neighborhood = generator->Generate(base_response, seed, saved_difficulty);
}
if (!neighborhood.is_generated) return;
CHECK_NE(saved_limit, 0);
CpModelProto& local_problem = neighborhood.cp_model;
const std::string solution_info = absl::StrFormat(
"%s(d=%0.2f s=%i t=%0.2f p=%0.2f)", generator->name(), saved_difficulty,
seed, saved_limit,
static_cast<double>(generator->num_fully_solved_calls()) /
std::max(int64{1}, generator->num_calls()));
#if !defined(__PORTABLE_PLATFORM__)
if (FLAGS_cp_model_dump_lns_problems) {
const int id = num_workers * seed + worker_id;
const std::string name = absl::StrCat("/tmp/lns_", id, ".pb.txt");
LOG(INFO) << "Dumping LNS model to '" << name << "'.";
CHECK_OK(file::SetTextProto(name, local_problem, file::Defaults()));
}
if (FLAGS_cp_model_dump_lns_problems) {
const std::string name = absl::StrCat("/tmp/lns_", seed, ".pb.txt");
LOG(INFO) << "Dumping LNS model to '" << name << "'.";
CHECK_OK(file::SetTextProto(name, local_problem, file::Defaults()));
}
#endif // __PORTABLE_PLATFORM__
Model local_model;
{
SatParameters local_parameters;
local_parameters = *parameters;
local_parameters.set_max_deterministic_time(deterministic_time);
local_parameters.set_stop_after_first_solution(false);
local_model.Add(NewSatParameters(local_parameters));
}
local_model.GetOrCreate<TimeLimit>()->MergeWithGlobalTimeLimit(limit);
Model local_model;
{
SatParameters local_parameters;
local_parameters = *parameters;
local_parameters.set_max_deterministic_time(saved_limit);
local_parameters.set_stop_after_first_solution(false);
local_model.Add(NewSatParameters(local_parameters));
}
{
absl::MutexLock mutex_lock(&mutex);
local_model.GetOrCreate<TimeLimit>()->MergeWithGlobalTimeLimit(limit);
}
// Presolve and solve the LNS fragment.
CpModelProto mapping_proto;
std::vector<int> postsolve_mapping;
PresolveOptions options;
options.log_info = VLOG_IS_ON(3);
options.parameters = *local_model.GetOrCreate<SatParameters>();
options.time_limit = local_model.GetOrCreate<TimeLimit>();
PresolveCpModel(options, &local_problem, &mapping_proto,
&postsolve_mapping);
CpSolverResponse local_response =
LocalSolve(local_problem, wall_timer, &local_model);
// Presolve and solve the LNS fragment.
CpModelProto mapping_proto;
std::vector<int> postsolve_mapping;
PresolveOptions options;
options.log_info = VLOG_IS_ON(3);
options.parameters = *local_model.GetOrCreate<SatParameters>();
options.time_limit = local_model.GetOrCreate<TimeLimit>();
PresolveCpModel(options, &local_problem, &mapping_proto,
&postsolve_mapping);
CpSolverResponse local_response =
LocalSolve(local_problem, wall_timer, &local_model);
// TODO(user): we actually do not need to postsolve if the solution is
// not going to be used...
PostsolveResponse(model_proto, mapping_proto, postsolve_mapping,
wall_timer, &local_response);
if (local_response.solution_info().empty()) {
local_response.set_solution_info(solution_info);
} else {
local_response.set_solution_info(
absl::StrCat(local_response.solution_info(), " ", solution_info));
}
// TODO(user): we actually do not need to postsolve if the solution is
// not going to be used...
PostsolveResponse(model_proto, mapping_proto, postsolve_mapping, wall_timer,
&local_response);
if (local_response.solution_info().empty()) {
local_response.set_solution_info(solution_info);
} else {
local_response.set_solution_info(
absl::StrCat(local_response.solution_info(), " ", solution_info));
}
const bool neighborhood_is_reduced = neighborhood.is_reduced;
std::function<void()> update_function =
[neighborhood_is_reduced, &num_consecutive_not_fully_solved,
&shared_response_manager, &response, &difficulty, local_response,
&generators, selected_generator, &total_num_calls]() {
// TODO(user): This is not ideal in multithread because even
// though the saved_difficulty will be the same for all thread, we
// will Increase()/Decrease() the difficuty sequentially more than
// once.
if (local_response.status() == CpSolverStatus::OPTIMAL ||
local_response.status() == CpSolverStatus::INFEASIBLE) {
num_consecutive_not_fully_solved = 0;
difficulty.Increase();
} else {
++num_consecutive_not_fully_solved;
difficulty.Decrease();
}
NeighborhoodGenerator::SolveData data;
data.status = local_response.status();
data.difficulty = saved_difficulty;
data.deterministic_time = local_response.deterministic_time();
// Update the generator record.
double objective_diff = 0.0;
if (local_response.status() == CpSolverStatus::OPTIMAL ||
local_response.status() == CpSolverStatus::FEASIBLE) {
objective_diff = std::abs(local_response.objective_value() -
response.objective_value());
}
total_num_calls++;
generators[selected_generator]->AddSolveData(
objective_diff, local_response.deterministic_time());
VLOG(2) << generators[selected_generator]->name()
<< ": [difficulty: " << difficulty.value()
<< ", deterministic time: "
<< local_response.deterministic_time() << ", status: "
<< ProtoEnumToString<CpSolverStatus>(
local_response.status())
<< ", num calls: "
<< generators[selected_generator]->num_calls()
<< ", UCB1 Score: "
<< generators[selected_generator]->GetUCBScore(
total_num_calls)
<< "]";
data.objective_diff = 0.0;
if (local_response.status() == CpSolverStatus::OPTIMAL ||
local_response.status() == CpSolverStatus::FEASIBLE) {
// TODO(user): Compute the diff with respect to the base solution
// instead.
data.objective_diff = std::abs(local_response.objective_value() -
response.objective_value());
}
// Report any feasible solution we have. Note that we do not want
// to keep the full model in this lambda, so for now we do not
// report local stats in the solution.
//
// TODO(user): depending on the problem, the bound sharing may or
// may not restrict the objective though. Uniformize the behavior.
if (local_response.status() == CpSolverStatus::OPTIMAL ||
local_response.status() == CpSolverStatus::FEASIBLE) {
shared_response_manager->NewSolution(local_response,
/*model=*/nullptr);
}
if (!neighborhood_is_reduced &&
(local_response.status() == CpSolverStatus::OPTIMAL ||
local_response.status() == CpSolverStatus::INFEASIBLE)) {
shared_response_manager->NotifyThatImprovingProblemIsInfeasible(
local_response.solution_info());
}
};
return update_function;
});
generator->AddSolveData(data);
// The total number of call when this was called is the same as the
// seed.
// TODO(user): maybe rename? Also move to the synchronize part...
const int total_num_calls = seed;
VLOG(2) << generator->name() << ": [difficulty: " << saved_difficulty
<< ", deterministic time: " << local_response.deterministic_time()
<< ", status: "
<< ProtoEnumToString<CpSolverStatus>(local_response.status())
<< ", num calls: " << generator->num_calls()
<< ", UCB1 Score: " << generator->GetUCBScore(total_num_calls)
<< "]";
// Report any feasible solution we have. Note that we do not want
// to keep the full model in this lambda, so for now we do not
// report local stats in the solution.
//
// TODO(user): depending on the problem, the bound sharing may or
// may not restrict the objective though. Uniformize the behavior.
if (local_response.status() == CpSolverStatus::OPTIMAL ||
local_response.status() == CpSolverStatus::FEASIBLE) {
shared_response_manager->NewSolution(local_response,
/*model=*/nullptr);
}
if (!neighborhood.is_reduced &&
(local_response.status() == CpSolverStatus::OPTIMAL ||
local_response.status() == CpSolverStatus::INFEASIBLE)) {
shared_response_manager->NotifyThatImprovingProblemIsInfeasible(
local_response.solution_info());
}
};
const int num_threads = std::max(1, parameters->lns_num_threads());
if (parameters->lns_is_deterministic()) {
const int batch_size = 4 * num_threads;
OptimizeWithLNS(num_threads, batch_size,
synchronize_and_maybe_stop_function,
generate_and_solve_function);
} else {
NonDeterministicOptimizeWithLNS(num_threads,
synchronize_and_maybe_stop_function,
generate_and_solve_function);
}
}
#if !defined(__PORTABLE_PLATFORM__)
@@ -2048,12 +2087,25 @@ void SolveCpModelParallel(const CpModelProto& model_proto,
shared_response_manager->UnregisterCallback(callback_id);
});
// The LNS threads are handled differently.
int num_lns_workers = 0;
int lns_worker_id = 0;
for (int worker_id = 0; worker_id < num_search_workers; ++worker_id) {
const SatParameters local_params = worker_parameters[worker_id];
if (local_params.use_lns()) {
lns_worker_id = worker_id;
++num_lns_workers;
}
}
ThreadPool pool("Parallel_search", num_search_workers);
pool.StartWorkers();
for (int worker_id = 0; worker_id < num_search_workers; ++worker_id) {
const std::string worker_name = worker_names[worker_id];
const SatParameters local_params = worker_parameters[worker_id];
if (local_params.use_lns()) continue;
pool.Schedule([&model_proto, stopped, local_params, worker_id, &mutex,
num_search_workers, wall_timer,
&first_solution_found_or_search_finished,
@@ -2074,29 +2126,20 @@ void SolveCpModelParallel(const CpModelProto& model_proto,
worker_info->worker_name = worker_name;
worker_info->worker_id = worker_id;
if (local_params.use_lns()) {
first_solution_found_or_search_finished.WaitForNotification();
// TODO(user,user): Provide a better diversification for different
// seeds.
SolveCpModelWithLNS(
model_proto, num_search_workers, worker_id, shared_response_manager,
shared_bounds_manager.get(), wall_timer, &local_model);
} else {
LoadCpModel(model_proto, shared_response_manager, &local_model);
LoadCpModel(model_proto, shared_response_manager, &local_model);
// Level zero variable bounds sharing.
//
// TODO(user): move these function to the shared_bounds_manager class?
if (shared_bounds_manager != nullptr) {
RegisterVariableBoundsLevelZeroExport(
model_proto, shared_bounds_manager.get(), &local_model);
RegisterVariableBoundsLevelZeroImport(
model_proto, shared_bounds_manager.get(), &local_model);
}
SolveLoadedCpModel(model_proto, shared_response_manager, &local_model);
// Level zero variable bounds sharing.
//
// TODO(user): move these function to the shared_bounds_manager class?
if (shared_bounds_manager != nullptr) {
RegisterVariableBoundsLevelZeroExport(
model_proto, shared_bounds_manager.get(), &local_model);
RegisterVariableBoundsLevelZeroImport(
model_proto, shared_bounds_manager.get(), &local_model);
}
SolveLoadedCpModel(model_proto, shared_response_manager, &local_model);
// TODO(user): Accumulate the stats?
//
// TODO(user): For now we assume that each worker only terminate when
@@ -2111,6 +2154,40 @@ void SolveCpModelParallel(const CpModelProto& model_proto,
}
});
}
// Schedule the LNS worker that itself will contain a thread-pool.
if (num_lns_workers > 0) {
pool.Schedule([&] {
// Wait for a solution before starting the LNS threads.
first_solution_found_or_search_finished.WaitForNotification();
// Note that because the other worker are not deterministic, there is
// no point having a deterministic LNS here as it will pull out
// non-deterministic info like bounds and best solutions...
Model local_model;
local_model.GetOrCreate<SatParameters>()->set_use_lns(true);
local_model.GetOrCreate<SatParameters>()->set_lns_num_threads(
num_lns_workers);
local_model.GetOrCreate<SatParameters>()->set_lns_is_deterministic(false);
local_model.GetOrCreate<TimeLimit>()->RegisterExternalBooleanAsLimit(
stopped);
// Stores info that will be used for logs in the local model.
WorkerInfo* worker_info = local_model.GetOrCreate<WorkerInfo>();
worker_info->worker_name = worker_names[lns_worker_id];
worker_info->worker_id = lns_worker_id;
SolveCpModelWithLNS(model_proto, lns_worker_id, shared_response_manager,
shared_bounds_manager.get(), wall_timer,
&local_model);
absl::MutexLock lock(&mutex);
*stopped = true;
if (!first_solution_found_or_search_finished.HasBeenNotified()) {
first_solution_found_or_search_finished.Notify();
}
});
}
}
#endif // __PORTABLE_PLATFORM__
@@ -2306,8 +2383,7 @@ CpSolverResponse SolveCpModel(const CpModelProto& model_proto, Model* model) {
!params.enumerate_all_solutions()) {
// TODO(user,user): Provide a better diversification for different
// seeds.
const int random_seed = model->GetOrCreate<SatParameters>()->random_seed();
SolveCpModelWithLNS(new_cp_model_proto, 1, random_seed,
SolveCpModelWithLNS(new_cp_model_proto, /*worker_id=*/1,
&shared_response_manager,
/*shared_bounds_manager=*/nullptr, &wall_timer, model);
} else {

View File

@@ -784,5 +784,146 @@ void IntegerRoundingCut(RoundingOptions options, std::vector<double> lp_values,
DivideByGCD(cut);
}
CutGenerator CreatePositiveMultiplicationCutGenerator(
IntegerVariable target_var, IntegerVariable v1, IntegerVariable v2,
Model* model) {
CutGenerator result;
result.vars = {target_var, v1, v2};
IntegerTrail* integer_trail = model->GetOrCreate<IntegerTrail>();
result.generate_cuts =
[target_var, v1, v2, model, integer_trail](
const gtl::ITIVector<IntegerVariable, double>& lp_values,
LinearConstraintManager* manager) {
const int64 v1_ub = integer_trail->LevelZeroUpperBound(v1).value();
const int64 v1_lb = integer_trail->LevelZeroLowerBound(v1).value();
const int64 v2_ub = integer_trail->LevelZeroUpperBound(v2).value();
const int64 v2_lb = integer_trail->LevelZeroLowerBound(v2).value();
const int64 kMaxSafeInteger = int64{9007199254740991}; // 2 ^ 53 - 1.
if (CapProd(v1_ub, v2_ub) >= kMaxSafeInteger) {
VLOG(3) << "Potential overflow in PositiveMultiplicationCutGenerator";
return;
}
const double target_value = lp_values[target_var];
const double v1_value = lp_values[v1];
const double v2_value = lp_values[v2];
if (target_value <= v1_value * v2_lb + v2_value * v1_lb -
v1_lb * v2_lb - kMinCutViolation) {
// cut: target >= v1 * v2_lb + v2 * v2_lb - v1_lb * v2_lb
LinearConstraint cut;
cut.vars.push_back(target_var);
cut.coeffs.push_back(IntegerValue(1));
if (v2_lb != 0) {
cut.vars.push_back(v1);
cut.coeffs.push_back(IntegerValue(-v2_lb));
}
if (v1_lb != 0) {
cut.vars.push_back(v2);
cut.coeffs.push_back(IntegerValue(-v1_lb));
}
cut.lb = IntegerValue(-v1_lb * v2_lb);
cut.ub = kMaxIntegerValue;
manager->AddCut(cut, "PositiveProduct", lp_values);
}
if (target_value >= v2_value * v1_ub + kMinCutViolation) {
// cut: target <= v2 * v1_ub.
LinearConstraint cut;
cut.vars.push_back(target_var);
cut.coeffs.push_back(IntegerValue(1));
cut.vars.push_back(v2);
cut.coeffs.push_back(IntegerValue(-v1_ub));
cut.lb = kMinIntegerValue;
cut.ub = IntegerValue(0);
manager->AddCut(cut, "PositiveProduct", lp_values);
}
if (target_value >= v1_value * v2_ub + kMinCutViolation) {
// cut: target <= v1 * v2_ub.
LinearConstraint cut;
cut.vars.push_back(target_var);
cut.coeffs.push_back(IntegerValue(1));
cut.vars.push_back(v1);
cut.coeffs.push_back(IntegerValue(-v2_ub));
cut.lb = kMinIntegerValue;
cut.ub = IntegerValue(0);
manager->AddCut(cut, "PositiveProduct", lp_values);
}
};
return result;
}
CutGenerator CreateSquareCutGenerator(IntegerVariable target_var,
IntegerVariable int_var, Model* model) {
CutGenerator result;
result.vars = {target_var, int_var};
IntegerTrail* integer_trail = model->GetOrCreate<IntegerTrail>();
result.generate_cuts =
[target_var, int_var, model, integer_trail](
const gtl::ITIVector<IntegerVariable, double>& lp_values,
LinearConstraintManager* manager) {
const int64 var_ub =
integer_trail->LevelZeroUpperBound(int_var).value();
const int64 var_lb =
integer_trail->LevelZeroLowerBound(int_var).value();
if (var_lb == var_ub) return;
// Check for potential overflows.
if (var_ub > int64{1000000000}) return;
DCHECK_GE(var_lb, 0);
const double target_value = lp_values[target_var];
const double var_value = lp_values[int_var];
// First cut: target should be below the line:
// (var_lb, val_lb ^ 2) to (var_ub, var_ub ^ 2).
// The slope of that line is (ub^2 - lb^2) / (ub - lb) = ub + lb.
const int64 target_lb = var_lb * var_lb;
const int64 above_slope = var_ub + var_lb;
const double max_target =
target_lb + above_slope * (var_value - var_lb);
if (target_value >= max_target + kMinCutViolation) {
// cut: target <= (var_lb + var_ub) * int_var - var_lb * var_ub
LinearConstraint above_cut;
above_cut.vars.push_back(target_var);
above_cut.coeffs.push_back(IntegerValue(1));
above_cut.vars.push_back(int_var);
above_cut.coeffs.push_back(IntegerValue(-above_slope));
above_cut.lb = kMinIntegerValue;
above_cut.ub = IntegerValue(-var_lb * var_ub);
manager->AddCut(above_cut, "SquareUpper", lp_values);
}
// Second cut: target should be above all the lines
// (value, value ^ 2) to (value + 1, (value + 1) ^ 2)
// The slope of that line is 2 * value + 1
const int64 var_floor = static_cast<int64>(std::floor(var_value));
const int64 below_slope = 2 * var_floor + 1;
const double min_target =
below_slope * var_value - var_floor - var_floor * var_floor;
if (min_target >= target_value + kMinCutViolation) {
// cut: target >= below_slope * (int_var - var_floor) +
// var_floor * var_floor
// : target >= below_slope * int_var - var_floor ^ 2 - var_floor
LinearConstraint below_cut;
below_cut.vars.push_back(target_var);
below_cut.coeffs.push_back(IntegerValue(1));
below_cut.vars.push_back(int_var);
below_cut.coeffs.push_back(-IntegerValue(below_slope));
below_cut.lb = IntegerValue(-var_floor - var_floor * var_floor);
below_cut.ub = kMaxIntegerValue;
manager->AddCut(below_cut, "SquareLower", lp_values);
}
};
return result;
}
} // namespace sat
} // namespace operations_research

View File

@@ -97,7 +97,7 @@ std::function<IntegerValue(IntegerValue)> GetSuperAdditiveRoundingFunction(
// values. This could be relaxed, but for now it should always be the case, so
// we log a message and abort if not, to ease debugging.
// - The IntegerVariable of the cuts are not used here. We assumes that the
// first three vectors are in one to one correspondance with the initial order
// first three vectors are in one to one correspondence with the initial order
// of the variable in the cut.
//
// TODO(user): There is a bunch of heuristic involved here, and we could spend
@@ -255,6 +255,16 @@ CutGenerator CreateKnapsackCoverCutGenerator(
const std::vector<LinearConstraint>& base_constraints,
const std::vector<IntegerVariable>& vars, Model* model);
// A cut generator for z = x * y (x and y >= 0)
CutGenerator CreatePositiveMultiplicationCutGenerator(
IntegerVariable target_var, IntegerVariable v1, IntegerVariable v2,
Model* model);
// A cut generator for y = x ^ 2. (x >= 0).
// It will dynamically add a linear inequality to push y closer to the parabola.
CutGenerator CreateSquareCutGenerator(IntegerVariable target_var,
IntegerVariable int_var, Model* model);
} // namespace sat
} // namespace operations_research

View File

@@ -324,18 +324,79 @@ void TryToLinearizeConstraint(const CpModelProto& model_proto,
} else if (ct.constraint_case() ==
ConstraintProto::ConstraintCase::kIntProd) {
if (HasEnforcementLiteral(ct)) return;
const int target = ct.int_prod().target();
const int size = ct.int_prod().vars_size();
if (ct.int_prod().vars_size() != 2) return;
IntegerVariable target = mapping->Integer(ct.int_prod().target());
IntegerVariable v0 = mapping->Integer(ct.int_prod().vars(0));
IntegerVariable v1 = mapping->Integer(ct.int_prod().vars(1));
IntegerTrail* integer_trail = model->GetOrCreate<IntegerTrail>();
// We just linearize x = y^2 by x >= y which is far from ideal but at
// least pushes x when y moves away from zero. Note that if y is negative,
// we should probably also add x >= -y, but then this do not happen in
// our test set.
if (size == 2 && ct.int_prod().vars(0) == ct.int_prod().vars(1)) {
LinearConstraintBuilder lc(model, kMinIntegerValue, IntegerValue(0));
lc.AddTerm(mapping->Integer(ct.int_prod().vars(0)), IntegerValue(1));
lc.AddTerm(mapping->Integer(target), IntegerValue(-1));
relaxation->linear_constraints.push_back(lc.Build());
if (v0 == v1) { // target = v0 ^ 2
if (integer_trail->UpperBound(v0) <= 0) {
v0 = NegationOf(v0);
}
const IntegerValue var_lb = integer_trail->LowerBound(v0);
const IntegerValue var_ub = integer_trail->UpperBound(v0);
CHECK_GE(var_lb, 0);
// target >= (2 * var_lb + 1) * v0 - var_lb - var_lb ^ 2.
LinearConstraintBuilder lc_below(model, -var_lb - var_lb * var_lb,
kMaxIntegerValue);
lc_below.AddTerm(target, IntegerValue(1));
lc_below.AddTerm(v0, 2 * var_lb + 1);
relaxation->linear_constraints.push_back(lc_below.Build());
// target <= (var_lb + var_ub) * v0 - var_lb * var_ub
LinearConstraintBuilder lc_above(model, kMinIntegerValue,
-var_lb * var_ub);
lc_above.AddTerm(target, IntegerValue(1));
lc_above.AddTerm(v0, -(var_lb + var_ub));
relaxation->linear_constraints.push_back(lc_above.Build());
} else { // target = v0 * v1
// Change signs if needed.
if (integer_trail->UpperBound(v0) <= 0) {
v0 = NegationOf(v0);
target = NegationOf(target);
}
if (integer_trail->UpperBound(v1) <= 0) {
v1 = NegationOf(v1);
target = NegationOf(target);
}
const IntegerValue v0_lb = integer_trail->LowerBound(v0);
const IntegerValue v0_ub = integer_trail->UpperBound(v0);
const IntegerValue v1_lb = integer_trail->LowerBound(v1);
const IntegerValue v1_ub = integer_trail->UpperBound(v1);
CHECK_GE(v0_lb, 0);
CHECK_GE(v1_lb, 0);
// target >= v1 * v0_lb + v0 * v1_lb - v0_lb * v1_lb
LinearConstraintBuilder lc_below(model, -v0_lb * v1_lb, kMaxIntegerValue);
lc_below.AddTerm(target, IntegerValue(1));
if (v1_lb > 0) {
lc_below.AddTerm(v0, v1_lb);
}
if (v0_lb > 0) {
lc_below.AddTerm(v1, v0_lb);
}
relaxation->linear_constraints.push_back(lc_below.Build());
// target <= v0 * v1_ub
LinearConstraintBuilder lc_v0(model, kMinIntegerValue, IntegerValue(0));
lc_v0.AddTerm(target, IntegerValue(1));
lc_v0.AddTerm(v0, -v1_ub);
relaxation->linear_constraints.push_back(lc_v0.Build());
// target <= v1 * v0_ub
LinearConstraintBuilder lc_v1(model, kMinIntegerValue, IntegerValue(0));
lc_v1.AddTerm(target, IntegerValue(1));
lc_v1.AddTerm(v1, -v0_ub);
relaxation->linear_constraints.push_back(lc_v1.Build());
}
} else if (ct.constraint_case() == ConstraintProto::ConstraintCase::kLinear) {
AppendLinearConstraintRelaxation(ct, linearization_level, *model,

View File

@@ -20,6 +20,7 @@
#include <vector>
#if !defined(__PORTABLE_PLATFORM__)
#include "absl/synchronization/mutex.h"
#include "ortools/base/threadpool.h"
#endif // __PORTABLE_PLATFORM__
@@ -28,28 +29,34 @@ namespace sat {
// A simple deterministic and multithreaded LNS design.
//
// While !stop_function(), we call a batch of num_threads
// generate_and_solve_function() in parallel. Each such functions should return
// an update_global_solution() function. These update functions will only be
// called sequentially (and in seed order) once the batch is done.
// While !synchronize_and_maybe_stop(), we call a batch of batch_size
// generate_and_solve() in parallel using num_threads threads. The
// two given functions must be thread-safe.
//
// The seed starts at zero and will be increased one by one. Each
// generate_and_solve_function() will get a different seed.
// The general idea to enforce determinism is that each
// generate_and_solve() can update a global state asynchronously, but
// should still use the past state until the synchronize_and_maybe_stop() call
// has been done.
//
// Only the generate_and_solve_function(int seed) need to be thread-safe.
// The seed starts at zero and will be increased one by one, so it also
// represent the number of calls to generate_and_solve(). Each
// generate_and_solve() will get a different seed.
//
// The two templated types should behave like:
// - StopFunction: std::function<bool()>
// - SolveNeighborhoodFunction: std::function<std::function<void()>(int seed)>
//
// TODO(user): As the improving solution become more difficult to find, we
// should trigger a lot more parallel LNS than the number of threads, so that we
// have a better utilization. For this, add a simple mecanism so that we always
// have num_threads working LNS, and do the barrier sync at the end when all the
// LNS task have been executed.
template <class StopFunction, class SolveNeighborhoodFunction>
void OptimizeWithLNS(int num_threads, StopFunction stop_function,
SolveNeighborhoodFunction generate_and_solve_function);
// - SubSolveFunction: std::function<void(int64 seed)>
template <class StopFunction, class SubSolveFunction>
void OptimizeWithLNS(int num_threads, int batch_size,
const StopFunction& synchronize_and_maybe_stop,
const SubSolveFunction& generate_and_solve);
// This one just keep num_threads tasks always in flight and call
// synchronize_and_maybe_stop() before each generate_and_solve(). It is not
// deterministic.
template <class StopFunction, class SubSolveFunction>
void NonDeterministicOptimizeWithLNS(
int num_threads, const StopFunction& synchronize_and_maybe_stop,
const SubSolveFunction& generate_and_solve);
// Basic adaptive [0.0, 1.0] parameter that can be increased or decreased with a
// step that get smaller and smaller with the number of updates.
@@ -98,41 +105,67 @@ class AdaptiveParameterValue {
// Implementation.
// ============================================================================
template <class StopFunction, class SolveNeighborhoodFunction>
inline void OptimizeWithLNS(
int num_threads, StopFunction stop_function,
SolveNeighborhoodFunction generate_and_solve_function) {
template <class StopFunction, class SubSolveFunction>
inline void OptimizeWithLNS(int num_threads, int batch_size,
const StopFunction& synchronize_and_maybe_stop,
const SubSolveFunction& generate_and_solve) {
int64_t seed = 0;
#if !defined(__PORTABLE_PLATFORM__)
while (!stop_function()) {
if (num_threads > 1) {
std::vector<std::function<void()>> update_functions(num_threads);
{
ThreadPool pool("Parallel_LNS", num_threads);
pool.StartWorkers();
for (int i = 0; i < num_threads; ++i) {
pool.Schedule(
[&update_functions, &generate_and_solve_function, i, seed]() {
update_functions[i] = generate_and_solve_function(seed + i);
});
}
}
seed += num_threads;
for (int i = 0; i < num_threads; ++i) {
update_functions[i]();
}
} else {
std::function<void()> update_function =
generate_and_solve_function(seed++);
update_function();
#if defined(__PORTABLE_PLATFORM__)
while (!synchronize_and_maybe_stop()) generate_and_solve(seed++);
return;
#else // __PORTABLE_PLATFORM__
if (num_threads == 1) {
while (!synchronize_and_maybe_stop()) generate_and_solve(seed++);
return;
}
// TODO(user): We could reuse the same ThreadPool as long as we wait for all
// the task in a batch to finish before scheduling new ones. Not sure how
// to easily do that, so for now we just recreate the pool for each batch.
while (!synchronize_and_maybe_stop()) {
// We simply rely on the fact that a ThreadPool will only schedule
// num_threads workers in parallel.
ThreadPool pool("Deterministic_Parallel_LNS", num_threads);
pool.StartWorkers();
for (int i = 0; i < batch_size; ++i) {
pool.Schedule(
[&generate_and_solve, seed]() { generate_and_solve(seed); });
++seed;
}
}
#else // __PORTABLE_PLATFORM__
while (!stop_function()) {
std::function<void()> update_function = generate_and_solve_function(seed++);
update_function();
#endif // __PORTABLE_PLATFORM__
}
template <class StopFunction, class SubSolveFunction>
inline void NonDeterministicOptimizeWithLNS(
int num_threads, const StopFunction& synchronize_and_maybe_stop,
const SubSolveFunction& generate_and_solve) {
int64_t seed = 0;
#if defined(__PORTABLE_PLATFORM__)
while (!synchronize_and_maybe_stop()) {
generate_and_solve(seed++);
}
#else // __PORTABLE_PLATFORM__
if (num_threads == 1) {
while (!synchronize_and_maybe_stop()) generate_and_solve(seed++);
return;
}
// The lambda below are using little space, but there is no reason
// to create millions of them, so we use the blocking nature of
// pool.Schedule() when the queue capacity is set.
ThreadPool pool("Parallel_LNS", num_threads);
// pool.SetQueueCapacity(10 * num_threads);
pool.StartWorkers();
while (!synchronize_and_maybe_stop()) {
pool.Schedule([&generate_and_solve, seed]() { generate_and_solve(seed); });
++seed;
}
#endif // __PORTABLE_PLATFORM__
}

View File

@@ -1409,6 +1409,8 @@ bool CoreBasedOptimizer::PropagateObjectiveBounds() {
if (gap / term.weight < var_ub - var_lb) {
some_bound_were_tightened = true;
const IntegerValue new_ub = var_lb + gap / term.weight;
CHECK_LT(new_ub, var_ub);
CHECK(!integer_trail_->IsCurrentlyIgnored(term.var));
if (!integer_trail_->Enqueue(
IntegerLiteral::LowerOrEqual(term.var, new_ub), {}, {})) {
return false;

View File

@@ -21,7 +21,7 @@ option java_multiple_files = true;
// Contains the definitions for all the sat algorithm parameters and their
// default values.
//
// NEXT TAG: 134
// NEXT TAG: 135
message SatParameters {
// ==========================================================================
// Branching and polarity
@@ -684,6 +684,7 @@ message SatParameters {
// LNS parameters.
optional bool use_lns = 101 [default = false];
optional int32 lns_num_threads = 102 [default = 1];
optional bool lns_is_deterministic = 134 [default = true];
optional bool lns_focus_on_decision_variables = 105 [default = false];
// Turns on relaxation induced neighborhood generator.

View File

@@ -14,6 +14,7 @@
#include "ortools/sat/synchronization.h"
#include "absl/container/flat_hash_set.h"
#include "ortools/base/stl_util.h"
#include "ortools/sat/cp_model.pb.h"
#include "ortools/sat/cp_model_loader.h"
#include "ortools/sat/cp_model_search.h"
@@ -26,18 +27,70 @@
namespace operations_research {
namespace sat {
int SharedSolutionRepository::NumSolutions() const {
absl::MutexLock mutex_lock(&mutex_);
return solutions_.size();
}
SharedSolutionRepository::Solution SharedSolutionRepository::GetSolution(
int i) const {
absl::MutexLock mutex_lock(&mutex_);
return solutions_[i];
}
void SharedSolutionRepository::Add(const Solution& solution) {
absl::MutexLock mutex_lock(&mutex_);
if (new_solutions_.size() < num_solutions_to_keep_) {
new_solutions_.push_back(solution);
return;
}
int worse_solution_index = 0;
for (int i = 0; i < new_solutions_.size(); ++i) {
// Do not add identical solution.
if (new_solutions_[i] == solution) return;
if (new_solutions_[worse_solution_index] < new_solutions_[i]) {
worse_solution_index = i;
}
}
if (solution < new_solutions_[worse_solution_index]) {
new_solutions_[worse_solution_index] = solution;
}
}
void SharedSolutionRepository::Synchronize() {
absl::MutexLock mutex_lock(&mutex_);
solutions_.insert(solutions_.end(), new_solutions_.begin(),
new_solutions_.end());
new_solutions_.clear();
gtl::STLSortAndRemoveDuplicates(&solutions_);
if (solutions_.size() > num_solutions_to_keep_) {
solutions_.resize(num_solutions_to_keep_);
}
}
// TODO(user): Experiments and play with the num_solutions_to_keep parameter.
SharedResponseManager::SharedResponseManager(bool log_updates,
const CpModelProto* proto,
const WallTimer* wall_timer)
: log_updates_(log_updates),
model_proto_(*proto),
wall_timer_(*wall_timer) {}
wall_timer_(*wall_timer),
solutions_(/*num_solutions_to_keep=*/10) {}
void SharedResponseManager::UpdateInnerObjectiveBounds(
const std::string& worker_info, IntegerValue lb, IntegerValue ub) {
absl::MutexLock mutex_lock(&mutex_);
CHECK(model_proto_.has_objective());
absl::MutexLock mutex_lock(&mutex_);
// The problem is already solved!
//
// TODO(user): A thread might not be notified right away that the new bounds
// that it is pushing make the problem infeasible. Fix that. For now we just
// abort early here to avoid logging the "#Done" message multiple times.
if (inner_objective_lower_bound_ > inner_objective_upper_bound_) {
return;
}
bool change = false;
if (lb > inner_objective_lower_bound_) {
change = true;
@@ -175,6 +228,15 @@ void SharedResponseManager::NewSolution(const CpSolverResponse& response,
objective_value += coeff * repeated_field_values[var];
}
// Add this solution to the pool, even if it is not improving.
if (!response.solution().empty()) {
SharedSolutionRepository::Solution solution;
solution.variable_values.assign(response.solution().begin(),
response.solution().end());
solution.internal_objective = objective_value;
solutions_.Add(solution);
}
// Ignore any non-strictly improving solution.
// We also perform some basic checks on the inner bounds.
CHECK_GE(objective_value, inner_objective_lower_bound_);

View File

@@ -29,14 +29,74 @@
namespace operations_research {
namespace sat {
// Thread-safe. Keeps a set of n unique best solution found so far.
//
// TODO(user): Maybe add some criteria to only keep solution with an objective
// really close to the best solution.
class SharedSolutionRepository {
public:
explicit SharedSolutionRepository(int num_solutions_to_keep)
: num_solutions_to_keep_(num_solutions_to_keep) {
CHECK_GE(num_solutions_to_keep_, 1);
}
// The solution format used by this class.
// We use the unscaled internal minimization objective.
struct Solution {
int64 internal_objective;
std::vector<int64> variable_values;
bool operator==(const Solution& other) const {
return internal_objective == other.internal_objective &&
variable_values == other.variable_values;
}
bool operator<(const Solution& other) const {
if (internal_objective != other.internal_objective) {
return internal_objective < other.internal_objective;
}
return variable_values < other.variable_values;
}
};
// Returns the number of current solution in the pool. This will never
// decrease.
int NumSolutions() const;
// Returns the solution #i where i must be smaller than NumSolutions().
Solution GetSolution(int index) const;
// Add a new solution. Note that it will not be added to the pool of solution
// right away. One must call Synchronize for this to happen.
//
// Works in O(num_solutions_to_keep_).
void Add(const Solution& solution);
// Updates the current pool of solution with the one recently added. Note that
// we use a stable ordering of solutions, so the final pool will be
// independent on the order of the calls to AddSolution() provided that the
// set of added solutions is the same.
//
// Works in O(num_solutions_to_keep_).
void Synchronize();
private:
const int num_solutions_to_keep_;
mutable absl::Mutex mutex_;
// Our two solutions pools, the current one and the new one that will be
// merged into the current one on each Synchronize() calls.
std::vector<Solution> solutions_;
std::vector<Solution> new_solutions_;
};
// Manages the global best response kept by the solver.
// All functions are thread-safe.
class SharedResponseManager {
public:
// If log_updates is true, then all updates to the global "state" will be
// logged. This class is responsible for our solver log progress.
explicit SharedResponseManager(bool log_updates_, const CpModelProto* proto,
const WallTimer* wall_timer);
SharedResponseManager(bool log_updates_, const CpModelProto* proto,
const WallTimer* wall_timer);
// Returns the current solver response. That is the best known response at the
// time of the call with the best feasible solution and objective bounds.
@@ -91,6 +151,13 @@ class SharedResponseManager {
// TODO(user): Also support merging statistics together.
void SetStatsFromModel(Model* model);
// Returns the underlying solution repository where we keep a set of best
// solutions.
const SharedSolutionRepository& SolutionsRepository() const {
return solutions_;
}
SharedSolutionRepository* MutableSolutionsRepository() { return &solutions_; }
private:
void FillObjectiveValuesInBestResponse();
void SetStatsFromModelInternal(Model* model);
@@ -102,6 +169,7 @@ class SharedResponseManager {
absl::Mutex mutex_;
CpSolverResponse best_response_;
SharedSolutionRepository solutions_;
int num_solutions_ = 0;
int64 inner_objective_lower_bound_ = kint64min;