fix #2525; rewrite scheduling cuts; add cumulative cuts; reorganize internal search

This commit is contained in:
Laurent Perron
2021-04-30 14:20:46 +02:00
parent b37d9c786b
commit 851bdaa7eb
12 changed files with 594 additions and 441 deletions

View File

@@ -1072,6 +1072,7 @@ cc_library(
"//ortools/base:random",
"//ortools/base:stl_util",
"//ortools/util:random_engine",
"//ortools/util:time_limit",
"@com_google_absl//absl/container:flat_hash_map",
"@com_google_absl//absl/random",
"@com_google_absl//absl/random:bit_gen_ref",

View File

@@ -1037,10 +1037,6 @@ class ConstraintChecker {
const bool has_active_variables = ct.reservoir().actives_size() > 0;
for (int i = 0; i < num_variables; i++) {
const int64_t time = Value(ct.reservoir().times(i));
if (time < 0) {
VLOG(1) << "reservoir times(" << i << ") is negative.";
return false;
}
if (!has_active_variables || Value(ct.reservoir().actives(i)) == 1) {
deltas[time] += ct.reservoir().demands(i);
}

View File

@@ -568,6 +568,8 @@ void TryToAddCutGenerators(const CpModelProto& model_proto,
m));
relaxation->cut_generators.push_back(
CreateCumulativeCutGenerator(intervals, capacity, demands, m));
relaxation->cut_generators.push_back(
CreateBalasAreaCumulativeCutGenerator(intervals, capacity, demands, m));
}
if (ct.constraint_case() == ConstraintProto::ConstraintCase::kNoOverlap) {
@@ -1202,7 +1204,7 @@ void RegisterObjectiveBoundsImport(
}
void LoadBaseModel(const CpModelProto& model_proto, Model* model) {
auto* shared_response_manager = model->Mutable<SharedResponseManager>();
auto* shared_response_manager = model->GetOrCreate<SharedResponseManager>();
CHECK(shared_response_manager != nullptr);
auto* sat_solver = model->GetOrCreate<SatSolver>();
@@ -1326,8 +1328,7 @@ void LoadFeasibilityPump(const CpModelProto& model_proto, Model* model) {
//
// TODO(user): move to cp_model_loader.h/.cc
void LoadCpModel(const CpModelProto& model_proto, Model* model) {
auto* shared_response_manager = model->Mutable<SharedResponseManager>();
CHECK(shared_response_manager != nullptr);
auto* shared_response_manager = model->GetOrCreate<SharedResponseManager>();
LoadBaseModel(model_proto, model);
@@ -1569,7 +1570,7 @@ void LoadCpModel(const CpModelProto& model_proto, Model* model) {
// and resume from the last search state as if it wasn't interuped. That would
// allow use to easily interleave different heuristics in the same thread.
void SolveLoadedCpModel(const CpModelProto& model_proto, Model* model) {
auto* shared_response_manager = model->Mutable<SharedResponseManager>();
auto* shared_response_manager = model->GetOrCreate<SharedResponseManager>();
if (shared_response_manager->ProblemIsSolved()) return;
const std::string& solution_info = model->Name();
@@ -1685,7 +1686,7 @@ void SolveLoadedCpModel(const CpModelProto& model_proto, Model* model) {
void QuickSolveWithHint(const CpModelProto& model_proto, Model* model) {
if (!model_proto.has_solution_hint()) return;
auto* shared_response_manager = model->Mutable<SharedResponseManager>();
auto* shared_response_manager = model->GetOrCreate<SharedResponseManager>();
if (shared_response_manager->ProblemIsSolved()) return;
// Temporarily change the parameters.
@@ -1737,15 +1738,17 @@ void QuickSolveWithHint(const CpModelProto& model_proto, Model* model) {
// Solve a model with a different objective consisting of minimizing the L1
// distance with the provided hint. Note that this method creates an in-memory
// copy of the model and loads a local Model object from the copied model.
void MinimizeL1DistanceWithHint(const CpModelProto& model_proto,
WallTimer* wall_timer,
SharedTimeLimit* shared_time_limit,
Model* model) {
void MinimizeL1DistanceWithHint(const CpModelProto& model_proto, Model* model) {
Model local_model;
// Forward some shared class.
local_model.Register<ModelSharedTimeLimit>(
model->GetOrCreate<ModelSharedTimeLimit>());
local_model.Register<WallTimer>(model->GetOrCreate<WallTimer>());
if (!model_proto.has_solution_hint()) return;
auto* shared_response_manager = model->Mutable<SharedResponseManager>();
auto* shared_response_manager = model->GetOrCreate<SharedResponseManager>();
if (shared_response_manager->ProblemIsSolved()) return;
auto* parameters = local_model.GetOrCreate<SatParameters>();
@@ -1810,11 +1813,9 @@ void MinimizeL1DistanceWithHint(const CpModelProto& model_proto,
updated_model_proto.mutable_objective()->add_coeffs(1);
}
SharedResponseManager local_response_manager(
parameters->enumerate_all_solutions(), &updated_model_proto, wall_timer,
shared_time_limit, model->GetOrCreate<SolverLogger>());
local_model.Register<SharedResponseManager>(&local_response_manager);
auto* local_response_manager =
local_model.GetOrCreate<SharedResponseManager>();
local_response_manager->InitializeObjective(updated_model_proto);
// Solve optimization problem.
LoadCpModel(updated_model_proto, &local_model);
@@ -1879,22 +1880,17 @@ void PostsolveResponseWithFullSolver(
// TODO(user): this problem is usually trivial, but we may still want to
// impose a time limit or copy some of the parameters passed by the user.
Model postsolve_model;
postsolve_model.Register<WallTimer>(wall_timer);
{
SatParameters& params = *postsolve_model.GetOrCreate<SatParameters>();
params.set_linearization_level(0);
params.set_cp_model_probing_level(0);
}
std::unique_ptr<TimeLimit> time_limit(TimeLimit::Infinite());
SharedTimeLimit shared_time_limit(time_limit.get());
SharedResponseManager local_response_manager(
/*enumerate_all_solutions=*/false, &mapping_proto, wall_timer,
&shared_time_limit, postsolve_model.GetOrCreate<SolverLogger>());
postsolve_model.Register(&local_response_manager);
LoadCpModel(mapping_proto, &postsolve_model);
SolveLoadedCpModel(mapping_proto, &postsolve_model);
const CpSolverResponse postsolve_response =
local_response_manager.GetResponse();
postsolve_model.GetOrCreate<SharedResponseManager>()->GetResponse();
CHECK(postsolve_response.status() == CpSolverStatus::FEASIBLE ||
postsolve_response.status() == CpSolverStatus::OPTIMAL);
@@ -2149,7 +2145,7 @@ CpSolverResponse SolvePureSatModel(const CpModelProto& model_proto,
struct SharedClasses {
CpModelProto const* model_proto;
WallTimer* wall_timer;
SharedTimeLimit* time_limit;
ModelSharedTimeLimit* time_limit;
SharedBoundsManager* bounds;
SharedResponseManager* response;
SharedRelaxationSolutionRepository* relaxation_solutions;
@@ -2225,8 +2221,7 @@ class FullProblemSolver : public SubSolver {
}
if (local_model_->GetOrCreate<SatParameters>()->repair_hint()) {
MinimizeL1DistanceWithHint(*shared_->model_proto, shared_->wall_timer,
shared_->time_limit, local_model_.get());
MinimizeL1DistanceWithHint(*shared_->model_proto, local_model_.get());
} else {
QuickSolveWithHint(*shared_->model_proto, local_model_.get());
}
@@ -2563,14 +2558,13 @@ class LnsSolver : public SubSolver {
// TODO(user): Depending on the problem, we should probably use the
// parameters that work bests (core, linearization_level, etc...) or
// maybe we can just randomize them like for the base solution used.
SharedResponseManager local_response_manager(
/*enumerate_all_solutions=*/false, &lns_fragment, shared_->wall_timer,
shared_->time_limit, local_model.GetOrCreate<SolverLogger>());
local_model.Register(&local_response_manager);
auto* local_response_manager =
local_model.GetOrCreate<SharedResponseManager>();
local_response_manager->InitializeObjective(lns_fragment);
LoadCpModel(lns_fragment, &local_model);
QuickSolveWithHint(lns_fragment, &local_model);
SolveLoadedCpModel(lns_fragment, &local_model);
CpSolverResponse local_response = local_response_manager.GetResponse();
CpSolverResponse local_response = local_response_manager->GetResponse();
// TODO(user): we actually do not need to postsolve if the solution is
// not going to be used...
@@ -2599,7 +2593,7 @@ class LnsSolver : public SubSolver {
shared_->response->GetInnerObjectiveLowerBound();
const IntegerValue local_obj_lb =
local_response_manager.GetInnerObjectiveLowerBound();
local_response_manager->GetInnerObjectiveLowerBound();
const double scaled_local_obj_bound = ScaleObjectiveValue(
lns_fragment.objective(), local_obj_lb.value());
@@ -2710,14 +2704,9 @@ class LnsSolver : public SubSolver {
};
void SolveCpModelParallel(const CpModelProto& model_proto,
SharedResponseManager* shared_response_manager,
SharedTimeLimit* shared_time_limit,
WallTimer* wall_timer, Model* global_model,
SolverLogger* logger) {
CHECK(shared_response_manager != nullptr);
Model* global_model) {
const SatParameters& parameters = *global_model->GetOrCreate<SatParameters>();
const int num_search_workers = parameters.num_search_workers();
const bool log_search = parameters.log_search_progress() || VLOG_IS_ON(1);
CHECK(!parameters.enumerate_all_solutions())
<< "Enumerating all solutions in parallel is not supported.";
@@ -2756,10 +2745,10 @@ void SolveCpModelParallel(const CpModelProto& model_proto,
SharedClasses shared;
shared.model_proto = &model_proto;
shared.wall_timer = wall_timer;
shared.time_limit = shared_time_limit;
shared.wall_timer = global_model->GetOrCreate<WallTimer>();
shared.time_limit = global_model->GetOrCreate<ModelSharedTimeLimit>();
shared.bounds = shared_bounds_manager.get();
shared.response = shared_response_manager;
shared.response = global_model->GetOrCreate<SharedResponseManager>();
shared.relaxation_solutions = shared_relaxation_solutions.get();
shared.lp_solutions = shared_lp_solutions.get();
shared.incomplete_solutions = shared_incomplete_solutions.get();
@@ -2768,21 +2757,19 @@ void SolveCpModelParallel(const CpModelProto& model_proto,
std::vector<std::unique_ptr<SubSolver>> subsolvers;
// Add a synchronization point for the shared classes.
subsolvers.push_back(absl::make_unique<SynchronizationPoint>(
[shared_response_manager, &shared_bounds_manager,
&shared_relaxation_solutions, &shared_lp_solutions]() {
shared_response_manager->Synchronize();
shared_response_manager->MutableSolutionsRepository()->Synchronize();
if (shared_bounds_manager != nullptr) {
shared_bounds_manager->Synchronize();
}
if (shared_relaxation_solutions != nullptr) {
shared_relaxation_solutions->Synchronize();
}
if (shared_lp_solutions != nullptr) {
shared_lp_solutions->Synchronize();
}
}));
subsolvers.push_back(absl::make_unique<SynchronizationPoint>([&shared]() {
shared.response->Synchronize();
shared.response->MutableSolutionsRepository()->Synchronize();
if (shared.bounds != nullptr) {
shared.bounds->Synchronize();
}
if (shared.relaxation_solutions != nullptr) {
shared.relaxation_solutions->Synchronize();
}
if (shared.lp_solutions != nullptr) {
shared.lp_solutions->Synchronize();
}
}));
if (parameters.use_lns_only()) {
// Register something to find a first solution. Note that this is mainly
@@ -2817,8 +2804,8 @@ void SolveCpModelParallel(const CpModelProto& model_proto,
// Add the NeighborhoodGeneratorHelper as a special subsolver so that its
// Synchronize() is called before any LNS neighborhood solvers.
auto unique_helper = absl::make_unique<NeighborhoodGeneratorHelper>(
&model_proto, &parameters, shared_response_manager, shared_time_limit,
shared_bounds_manager.get());
&model_proto, &parameters, shared.response, shared.time_limit,
shared.bounds);
NeighborhoodGeneratorHelper* helper = unique_helper.get();
subsolvers.push_back(std::move(unique_helper));
@@ -2907,22 +2894,22 @@ void SolveCpModelParallel(const CpModelProto& model_proto,
// Add a synchronization point for the primal integral that is executed last.
// This way, after each batch, the proper deterministic time is updated and
// then the function to integrate take the value of the new gap.
subsolvers.push_back(
absl::make_unique<SynchronizationPoint>([shared_response_manager]() {
shared_response_manager->UpdatePrimalIntegral();
}));
subsolvers.push_back(absl::make_unique<SynchronizationPoint>(
[&shared]() { shared.response->UpdatePrimalIntegral(); }));
// Log the name of all our SubSolvers.
if (log_search) {
auto* logger = global_model->GetOrCreate<SolverLogger>();
if (logger->LoggingIsEnabled()) {
std::vector<std::string> names;
for (const auto& subsolver : subsolvers) {
if (!subsolver->name().empty()) names.push_back(subsolver->name());
}
SOLVER_LOG(logger, "");
SOLVER_LOG(logger, absl::StrFormat("Starting Search at %.2fs with %i "
"workers and subsolvers: [ %s ]",
wall_timer->Get(), num_search_workers,
absl::StrJoin(names, ", ")));
SOLVER_LOG(logger,
absl::StrFormat("Starting Search at %.2fs with %i "
"workers and subsolvers: [ %s ]",
shared.wall_timer->Get(), num_search_workers,
absl::StrJoin(names, ", ")));
}
// Launch the main search loop.
@@ -2958,39 +2945,22 @@ void AddPostsolveClauses(const std::vector<int>& postsolve_mapping,
} // namespace
CpSolverResponse SolveCpModel(const CpModelProto& model_proto, Model* model) {
WallTimer wall_timer;
UserTimer user_timer;
wall_timer.Start();
user_timer.Start();
#if defined(_MSC_VER)
// On windows, The final_response is optimized out in the return part, and is
// swapped out before the cleanup callback is called. A workaround is to
// create a unique ptr that will forbid this optimization.
std::unique_ptr<CpSolverResponse> final_response_ptr(new CpSolverResponse());
CpSolverResponse& final_response = *final_response_ptr.get();
#else
CpSolverResponse final_response;
#endif
auto* wall_timer = model->GetOrCreate<WallTimer>();
auto* user_timer = model->GetOrCreate<UserTimer>();
wall_timer->Start();
user_timer->Start();
#if !defined(__PORTABLE_PLATFORM__)
// Dump?
// Dump initial model?
if (absl::GetFlag(FLAGS_cp_model_dump_models)) {
const std::string file =
absl::StrCat(absl::GetFlag(FLAGS_cp_model_dump_prefix), "model.pbtxt");
LOG(INFO) << "Dumping cp model proto to '" << file << "'.";
CHECK_OK(file::SetTextProto(file, model_proto, file::Defaults()));
}
#endif // __PORTABLE_PLATFORM__
auto dump_response_cleanup = absl::MakeCleanup([&final_response] {
if (absl::GetFlag(FLAGS_cp_model_dump_response)) {
const std::string file = absl::StrCat(
absl::GetFlag(FLAGS_cp_model_dump_prefix), "response.pbtxt");
LOG(INFO) << "Dumping response proto to '" << file << "'.";
CHECK_OK(file::SetTextProto(file, final_response, file::Defaults()));
}
});
#if !defined(__PORTABLE_PLATFORM__)
// Override parameters?
if (!absl::GetFlag(FLAGS_cp_model_params).empty()) {
SatParameters params = *model->GetOrCreate<SatParameters>();
@@ -3003,26 +2973,24 @@ CpSolverResponse SolveCpModel(const CpModelProto& model_proto, Model* model) {
#endif // __PORTABLE_PLATFORM__
// Initialize the time limit from the parameters.
model->GetOrCreate<TimeLimit>()->ResetLimitFromParameters(
*model->GetOrCreate<SatParameters>());
SharedTimeLimit shared_time_limit(model->GetOrCreate<TimeLimit>());
const SatParameters& params = *model->GetOrCreate<SatParameters>();
model->GetOrCreate<TimeLimit>()->ResetLimitFromParameters(params);
auto* shared_time_limit = model->GetOrCreate<ModelSharedTimeLimit>();
#if !defined(__PORTABLE_PLATFORM__)
// Register SIGINT handler if requested by the parameters.
SigintHandler handler;
if (model->GetOrCreate<SatParameters>()->catch_sigint_signal()) {
handler.Register([&shared_time_limit]() { shared_time_limit.Stop(); });
model->GetOrCreate<SigintHandler>()->Register(
[&shared_time_limit]() { shared_time_limit->Stop(); });
}
#endif // __PORTABLE_PLATFORM__
// Enable the logging component.
const SatParameters& params = *model->GetOrCreate<SatParameters>();
const bool log_search = params.log_search_progress() || VLOG_IS_ON(1);
SolverLogger* logger = model->GetOrCreate<SolverLogger>();
std::string log_string;
logger->EnableLogging(log_search);
logger->EnableLogging(params.log_search_progress() || VLOG_IS_ON(1));
logger->SetLogToStdOut(params.log_to_stdout());
std::string log_string;
if (params.log_to_response()) {
const auto append_to_string = [&log_string](const std::string& message) {
absl::StrAppend(&log_string, message, "\n");
@@ -3033,29 +3001,62 @@ CpSolverResponse SolveCpModel(const CpModelProto& model_proto, Model* model) {
SOLVER_LOG(logger, "");
SOLVER_LOG(logger, "Starting CP-SAT solver.");
SOLVER_LOG(logger, "Parameters: ", params.ShortDebugString());
if (log_search && params.use_absl_random()) {
if (logger->LoggingIsEnabled() && params.use_absl_random()) {
model->GetOrCreate<ModelRandomGenerator>()->LogSalt();
}
auto* shared_response_manager = model->GetOrCreate<SharedResponseManager>();
shared_response_manager->set_dump_prefix(
absl::GetFlag(FLAGS_cp_model_dump_prefix));
#if !defined(__PORTABLE_PLATFORM__)
// Note that the postprocessors are executed in reverse order, so this
// will always dump the response just before it is returned since it is
// the first one we register.
if (absl::GetFlag(FLAGS_cp_model_dump_response)) {
shared_response_manager->AddFinalSolutionPostprocessor(
[](CpSolverResponse* response) {
const std::string file = absl::StrCat(
absl::GetFlag(FLAGS_cp_model_dump_prefix), "response.pbtxt");
LOG(INFO) << "Dumping response proto to '" << file << "'.";
CHECK_OK(file::SetTextProto(file, *response, file::Defaults()));
});
}
#endif // __PORTABLE_PLATFORM__
// Always display the final response stats if requested.
auto display_response_cleanup =
absl::MakeCleanup([&final_response, &model_proto, logger, &log_string] {
// This also copy the logs to the response if requested.
shared_response_manager->AddFinalSolutionPostprocessor(
[logger, &model_proto, &log_string](CpSolverResponse* response) {
SOLVER_LOG(logger, "");
SOLVER_LOG(logger, CpSolverResponseStats(final_response,
SOLVER_LOG(logger, CpSolverResponseStats(*response,
model_proto.has_objective()));
if (!log_string.empty()) {
final_response.set_solve_log(log_string);
response->set_solve_log(log_string);
}
});
// Always add the timing information to a response. Note that it is important
// to add this after the log/dump postprocessor since we execute them in
// reverse order.
shared_response_manager->AddSolutionPostprocessor(
[&wall_timer, &user_timer,
&shared_time_limit](CpSolverResponse* response) {
response->set_wall_time(wall_timer->Get());
response->set_user_time(user_timer->Get());
response->set_deterministic_time(
shared_time_limit->GetElapsedDeterministicTime());
});
// Validate model_proto.
// TODO(user): provide an option to skip this step for speed?
{
const std::string error = ValidateCpModel(model_proto);
if (!error.empty()) {
SOLVER_LOG(logger, error);
final_response.set_status(CpSolverStatus::MODEL_INVALID);
return final_response;
shared_response_manager->MutableResponse()->set_status(
CpSolverStatus::MODEL_INVALID);
return shared_response_manager->GetResponse();
}
}
@@ -3089,24 +3090,20 @@ CpSolverResponse SolveCpModel(const CpModelProto& model_proto, Model* model) {
if (is_pure_sat) {
// TODO(user): All this duplication will go away when we are fast enough
// on pure-sat model with the CpModel presolve...
final_response =
SolvePureSatModel(model_proto, &wall_timer, model, logger);
final_response.set_wall_time(wall_timer.Get());
final_response.set_user_time(user_timer.Get());
final_response.set_deterministic_time(
shared_time_limit.GetElapsedDeterministicTime());
const SatParameters& params = *model->GetOrCreate<SatParameters>();
*shared_response_manager->MutableResponse() =
SolvePureSatModel(model_proto, wall_timer, model, logger);
if (params.fill_tightened_domains_in_response()) {
*final_response.mutable_tightened_variables() = model_proto.variables();
*shared_response_manager->MutableResponse()
->mutable_tightened_variables() = model_proto.variables();
}
return final_response;
return shared_response_manager->GetResponse();
}
}
// Presolve and expansions.
SOLVER_LOG(logger, "");
SOLVER_LOG(logger,
absl::StrFormat("Starting presolve at %.2fs", wall_timer.Get()));
absl::StrFormat("Starting presolve at %.2fs", wall_timer->Get()));
CpModelProto new_cp_model_proto;
CpModelProto mapping_proto;
auto context = absl::make_unique<PresolveContext>(model, &new_cp_model_proto,
@@ -3122,7 +3119,6 @@ CpSolverResponse SolveCpModel(const CpModelProto& model_proto, Model* model) {
CopyEverythingExceptVariablesAndConstraintsFieldsIntoContext(model_proto,
context.get());
bool degraded_assumptions_support = false;
if (params.num_search_workers() > 1 || model_proto.has_objective()) {
// For the case where the assumptions are currently not supported, we just
// assume they are fixed, and will always report all of them in the UNSAT
@@ -3130,28 +3126,35 @@ CpSolverResponse SolveCpModel(const CpModelProto& model_proto, Model* model) {
//
// If the mode is not degraded, we will hopefully report a small subset
// in case there is no feasible solution under these assumptions.
degraded_assumptions_support = true;
shared_response_manager->AddFinalSolutionPostprocessor(
[&model_proto](CpSolverResponse* response) {
if (response->status() != CpSolverStatus::INFEASIBLE) return;
// For now, just pass in all assumptions.
*response->mutable_sufficient_assumptions_for_infeasibility() =
model_proto.assumptions();
});
context->InitializeNewDomains();
for (const int ref : model_proto.assumptions()) {
if (!context->SetLiteralToTrue(ref)) {
final_response.set_status(CpSolverStatus::INFEASIBLE);
final_response.add_sufficient_assumptions_for_infeasibility(ref);
return final_response;
shared_response_manager->MutableResponse()->set_status(
CpSolverStatus::INFEASIBLE);
shared_response_manager->MutableResponse()
->add_sufficient_assumptions_for_infeasibility(ref);
return shared_response_manager->GetResponse();
}
}
}
// This function will be called before any CpSolverResponse is returned
// to the user (at the end and in callbacks).
std::function<void(CpSolverResponse * response)> postprocess_solution;
// Do the actual presolve.
std::vector<int> postsolve_mapping;
const bool ok = PresolveCpModel(context.get(), &postsolve_mapping);
if (!ok) {
LOG(ERROR) << "Error while presolving, likely due to integer overflow.";
final_response.set_status(CpSolverStatus::MODEL_INVALID);
return final_response;
shared_response_manager->MutableResponse()->set_status(
CpSolverStatus::MODEL_INVALID);
return shared_response_manager->GetResponse();
}
SOLVER_LOG(logger, "");
@@ -3160,55 +3163,63 @@ CpSolverResponse SolveCpModel(const CpModelProto& model_proto, Model* model) {
SOLVER_LOG(logger, "Preloading model.");
if (params.cp_model_presolve()) {
postprocess_solution = [&model_proto, &params, &mapping_proto,
&shared_time_limit, &postsolve_mapping, &wall_timer,
&user_timer, model](CpSolverResponse* response) {
AddPostsolveClauses(postsolve_mapping, model, &mapping_proto);
PostsolveResponseWrapper(params, model_proto.variables_size(),
mapping_proto, postsolve_mapping, &wall_timer,
response);
if (!response->solution().empty()) {
CHECK(SolutionIsFeasible(
model_proto,
std::vector<int64_t>(response->solution().begin(),
response->solution().end()),
&mapping_proto, &postsolve_mapping))
<< "final postsolved solution";
}
if (params.fill_tightened_domains_in_response()) {
// TODO(user): for now, we just use the domain infered during presolve.
if (mapping_proto.variables().size() >=
model_proto.variables().size()) {
for (int i = 0; i < model_proto.variables().size(); ++i) {
*response->add_tightened_variables() = mapping_proto.variables(i);
shared_response_manager->AddSolutionPostprocessor(
[&model_proto, &params, &mapping_proto, &postsolve_mapping, wall_timer,
model](CpSolverResponse* response) {
AddPostsolveClauses(postsolve_mapping, model, &mapping_proto);
PostsolveResponseWrapper(params, model_proto.variables_size(),
mapping_proto, postsolve_mapping, wall_timer,
response);
if (!response->solution().empty()) {
CHECK(SolutionIsFeasible(
model_proto,
std::vector<int64_t>(response->solution().begin(),
response->solution().end()),
&mapping_proto, &postsolve_mapping))
<< "postsolved solution";
}
}
}
response->set_wall_time(wall_timer.Get());
response->set_user_time(user_timer.Get());
response->set_deterministic_time(
shared_time_limit.GetElapsedDeterministicTime());
};
if (params.fill_tightened_domains_in_response()) {
// TODO(user): for now, we just use the domain infered during
// presolve.
if (mapping_proto.variables().size() >=
model_proto.variables().size()) {
for (int i = 0; i < model_proto.variables().size(); ++i) {
*response->add_tightened_variables() =
mapping_proto.variables(i);
}
}
}
});
} else {
postprocess_solution = [&model_proto, &params, &wall_timer,
&shared_time_limit,
&user_timer](CpSolverResponse* response) {
// Truncate the solution in case model expansion added more variables.
const int initial_size = model_proto.variables_size();
if (response->solution_size() > 0) {
response->mutable_solution()->Truncate(initial_size);
} else if (response->solution_lower_bounds_size() > 0) {
response->mutable_solution_lower_bounds()->Truncate(initial_size);
response->mutable_solution_upper_bounds()->Truncate(initial_size);
}
if (params.fill_tightened_domains_in_response()) {
*response->mutable_tightened_variables() = model_proto.variables();
}
response->set_wall_time(wall_timer.Get());
response->set_user_time(user_timer.Get());
response->set_deterministic_time(
shared_time_limit.GetElapsedDeterministicTime());
};
shared_response_manager->AddFinalSolutionPostprocessor(
[&model_proto](CpSolverResponse* response) {
if (!response->solution().empty()) {
CHECK(SolutionIsFeasible(
model_proto, std::vector<int64_t>(response->solution().begin(),
response->solution().end())));
}
});
shared_response_manager->AddSolutionPostprocessor(
[&model_proto, &params](CpSolverResponse* response) {
// Truncate the solution in case model expansion added more variables.
const int initial_size = model_proto.variables_size();
if (response->solution_size() > 0) {
response->mutable_solution()->Truncate(initial_size);
if (DEBUG_MODE ||
absl::GetFlag(FLAGS_cp_model_check_intermediate_solutions)) {
CHECK(SolutionIsFeasible(
model_proto,
std::vector<int64_t>(response->solution().begin(),
response->solution().end())));
}
} else if (response->solution_lower_bounds_size() > 0) {
response->mutable_solution_lower_bounds()->Truncate(initial_size);
response->mutable_solution_upper_bounds()->Truncate(initial_size);
}
if (params.fill_tightened_domains_in_response()) {
*response->mutable_tightened_variables() = model_proto.variables();
}
});
}
// Delete the context.
@@ -3218,31 +3229,10 @@ CpSolverResponse SolveCpModel(const CpModelProto& model_proto, Model* model) {
DetectAndAddSymmetryToProto(params, &new_cp_model_proto, logger);
}
SharedResponseManager shared_response_manager(
params.enumerate_all_solutions(), &new_cp_model_proto, &wall_timer,
&shared_time_limit, logger);
shared_response_manager.set_dump_prefix(
absl::GetFlag(FLAGS_cp_model_dump_prefix));
shared_response_manager.SetGapLimitsFromParameters(params);
model->Register<SharedResponseManager>(&shared_response_manager);
const auto& observers = model->GetOrCreate<SolutionObservers>()->observers;
if (!observers.empty()) {
shared_response_manager.AddSolutionCallback(
[&model_proto, &observers, &postprocess_solution](
const CpSolverResponse& response_of_presolved_problem) {
CpSolverResponse response = response_of_presolved_problem;
postprocess_solution(&response);
if (!response.solution().empty()) {
if (DEBUG_MODE ||
absl::GetFlag(FLAGS_cp_model_check_intermediate_solutions)) {
CHECK(SolutionIsFeasible(
model_proto,
std::vector<int64_t>(response.solution().begin(),
response.solution().end())));
}
}
shared_response_manager->AddSolutionCallback(
[&observers](const CpSolverResponse& response) {
for (const auto& observer : observers) {
observer(response);
}
@@ -3254,17 +3244,13 @@ CpSolverResponse SolveCpModel(const CpModelProto& model_proto, Model* model) {
// trivial min/max value if the user left it empty. This avoids to display
// [-infinity, infinity] for the initial objective search space.
if (new_cp_model_proto.has_objective()) {
const Domain domain = ReadDomainFromProto(new_cp_model_proto.objective());
if (!domain.IsEmpty()) {
shared_response_manager.UpdateInnerObjectiveBounds(
"initial_domain", IntegerValue(domain.Min()),
IntegerValue(domain.Max()));
}
shared_response_manager->InitializeObjective(new_cp_model_proto);
shared_response_manager->SetGapLimitsFromParameters(params);
}
// Start counting the primal integral from the current determistic time and
// initial objective domain gap that we just filled.
shared_response_manager.UpdatePrimalIntegral();
shared_response_manager->UpdatePrimalIntegral();
#if !defined(__PORTABLE_PLATFORM__)
if (absl::GetFlag(FLAGS_cp_model_dump_models)) {
@@ -3282,7 +3268,7 @@ CpSolverResponse SolveCpModel(const CpModelProto& model_proto, Model* model) {
}
#endif // __PORTABLE_PLATFORM__
if (params.stop_after_presolve() || shared_time_limit.LimitReached()) {
if (params.stop_after_presolve() || shared_time_limit->LimitReached()) {
int64_t num_terms = 0;
for (const ConstraintProto& ct : new_cp_model_proto.constraints()) {
num_terms += UsedVariables(ct).size();
@@ -3293,19 +3279,15 @@ CpSolverResponse SolveCpModel(const CpModelProto& model_proto, Model* model) {
"\nPresolvedNumConstraints: ", new_cp_model_proto.constraints().size(),
"\nPresolvedNumTerms: ", num_terms);
shared_response_manager.SetStatsFromModel(model);
final_response = shared_response_manager.GetResponse();
postprocess_solution(&final_response);
return final_response;
shared_response_manager->SetStatsFromModel(model);
return shared_response_manager->GetResponse();
}
// Make sure everything stops when we have a first solution if requested.
if (params.stop_after_first_solution()) {
shared_response_manager.AddSolutionCallback(
[&shared_time_limit](
const CpSolverResponse& response_of_presolved_problem) {
shared_time_limit.Stop();
shared_response_manager->AddSolutionCallback(
[shared_time_limit](const CpSolverResponse& response) {
shared_time_limit->Stop();
});
}
@@ -3314,48 +3296,46 @@ CpSolverResponse SolveCpModel(const CpModelProto& model_proto, Model* model) {
// We ignore the multithreading parameter in this case.
#else // __PORTABLE_PLATFORM__
if (params.num_search_workers() > 1 || params.interleave_search()) {
SolveCpModelParallel(new_cp_model_proto, &shared_response_manager,
&shared_time_limit, &wall_timer, model, logger);
SolveCpModelParallel(new_cp_model_proto, model);
#endif // __PORTABLE_PLATFORM__
} else {
SOLVER_LOG(logger, "");
SOLVER_LOG(logger, absl::StrFormat("Starting to load the model at %.2fs",
wall_timer.Get()));
shared_response_manager.SetUpdatePrimalIntegralOnEachChange(true);
wall_timer->Get()));
shared_response_manager->SetUpdatePrimalIntegralOnEachChange(true);
LoadCpModel(new_cp_model_proto, model);
shared_response_manager.LoadDebugSolution(model);
shared_response_manager->LoadDebugSolution(model);
SOLVER_LOG(logger, "");
SOLVER_LOG(logger, absl::StrFormat("Starting sequential search at %.2fs",
wall_timer.Get()));
wall_timer->Get()));
if (params.repair_hint()) {
MinimizeL1DistanceWithHint(new_cp_model_proto, &wall_timer,
&shared_time_limit, model);
MinimizeL1DistanceWithHint(new_cp_model_proto, model);
} else {
QuickSolveWithHint(new_cp_model_proto, model);
}
SolveLoadedCpModel(new_cp_model_proto, model);
}
final_response = shared_response_manager.GetResponse();
postprocess_solution(&final_response);
if (!final_response.solution().empty()) {
CHECK(SolutionIsFeasible(
model_proto, std::vector<int64_t>(final_response.solution().begin(),
final_response.solution().end())));
}
if (degraded_assumptions_support &&
final_response.status() == CpSolverStatus::INFEASIBLE) {
// For now, just pass in all assumptions.
*final_response.mutable_sufficient_assumptions_for_infeasibility() =
model_proto.assumptions();
}
if (log_search && params.num_search_workers() > 1) {
SOLVER_LOG(logger, "");
shared_response_manager.DisplayImprovementStatistics();
if (logger->LoggingIsEnabled()) {
if (params.num_search_workers() <= 1) {
const auto& lps =
*model->GetOrCreate<LinearProgrammingConstraintCollection>();
if (!lps.empty()) {
SOLVER_LOG(logger, "");
for (const auto* lp : lps) {
SOLVER_LOG(logger, lp->Statistics());
}
}
}
if (params.num_search_workers() > 1) {
SOLVER_LOG(logger, "");
shared_response_manager->DisplayImprovementStatistics();
}
}
return final_response;
return shared_response_manager->GetResponse();
}
CpSolverResponse Solve(const CpModelProto& model_proto) {

View File

@@ -29,6 +29,7 @@
#include "ortools/sat/integer.h"
#include "ortools/sat/intervals.h"
#include "ortools/sat/linear_constraint.h"
#include "ortools/sat/linear_constraint_manager.h"
#include "ortools/sat/sat_base.h"
#include "ortools/util/time_limit.h"
@@ -2413,6 +2414,103 @@ CutGenerator CreateNoOverlapPrecedenceCutGenerator(
return result;
}
struct BalasEvent {
AffineExpression end;
IntegerValue start_min;
IntegerValue size_min;
double lp_end;
};
// We generate the cut from:
// E. Balas, On the facial structure of scheduling polyhedra,
// Mathematical Programming Essays in Honor of George B. Dantzig
// Part I, Mathematical Programming Studies, vol. 24,
// Springer, 1985, pp. 179218.
// and
// M. Queyranne, Structure of a simple scheduling polyhedron,
// Mathematical Programming 58 (1993), 263285
//
// The original cut is:
// sum(end_min_i * duration_min_i) >=
// (sum(duration_min_i^2) + sum(duration_min_i)^2) / 2
// We strenghten this cuts by noticing that if all tasks starts after S,
// then replacing end_min_i by (end_min_i - S) is still valid.
//
// A second difference is that we look at a set of intervals starting
// after a given start_min, sorted by relative
// (end_lp - start_min) / duration_min.
//
// We actually compute the cuts with all the sizes actually equal to (size /
// size_divisor), but to keep the computation in the integer domain, we multiply
// by size_divisor where needed instead.
void GenerateBalasCut(
const std::string& cut_name,
const absl::StrongVector<IntegerVariable, double>& lp_values,
IntegerValue size_divisor, std::vector<BalasEvent> events, Model* model,
LinearConstraintManager* manager) {
TopNCuts top_n_cuts(15);
// Sort by start min to bucketize by start_min.
std::sort(events.begin(), events.end(),
[](const BalasEvent& e1, const BalasEvent& e2) {
return e1.start_min < e2.start_min;
});
for (int start = 0; start + 1 < events.size(); ++start) {
// Skip to the next start_min value.
if (start > 0 && events[start].start_min == events[start - 1].start_min) {
continue;
}
const double sequence_start_min = ToDouble(events[start].start_min);
std::vector<BalasEvent> residual_tasks(events.begin() + start,
events.end());
std::sort(residual_tasks.begin(), residual_tasks.end(),
[](const BalasEvent& e1, const BalasEvent& e2) {
return e1.lp_end < e2.lp_end;
});
int best_end = -1;
double best_efficacy = 0.01;
IntegerValue best_min_contrib(0);
IntegerValue sum_duration(0);
IntegerValue sum_square_duration(0);
double lp_contrib = 0;
IntegerValue current_start_min(kMaxIntegerValue);
for (int i = 0; i < residual_tasks.size(); ++i) {
DCHECK_GE(residual_tasks[i].start_min, sequence_start_min);
const IntegerValue duration = residual_tasks[i].size_min;
sum_duration += duration;
sum_square_duration += duration * duration;
lp_contrib +=
residual_tasks[i].lp_end * ToDouble(duration * size_divisor);
current_start_min =
std::min(current_start_min, residual_tasks[i].start_min);
const IntegerValue min_contrib =
(sum_duration * sum_duration + sum_square_duration) / 2 +
current_start_min * sum_duration * size_divisor;
const double efficacy = (ToDouble(min_contrib) - lp_contrib) /
std::sqrt(ToDouble(sum_square_duration));
// TODO(user): Check overflow and ignore if too big.
if (efficacy > best_efficacy) {
best_efficacy = efficacy;
best_end = i;
best_min_contrib = min_contrib;
}
}
if (best_end != -1) {
LinearConstraintBuilder cut(model, best_min_contrib, kMaxIntegerValue);
for (int i = 0; i <= best_end; ++i) {
cut.AddTerm(residual_tasks[i].end,
residual_tasks[i].size_min * size_divisor);
}
top_n_cuts.AddCut(cut.Build(), cut_name, lp_values);
}
}
top_n_cuts.TransferToManager(lp_values, manager);
}
CutGenerator CreateNoOverlapBalasCutGenerator(
const std::vector<IntervalVariable>& intervals, Model* model) {
CutGenerator result;
@@ -2425,106 +2523,85 @@ CutGenerator CreateNoOverlapBalasCutGenerator(
Trail* trail = model->GetOrCreate<Trail>();
result.generate_cuts = [trail, helper, model](
const absl::StrongVector<IntegerVariable, double>&
lp_values,
LinearConstraintManager* manager) {
if (trail->CurrentDecisionLevel() > 0) return;
result.generate_cuts =
[trail, helper, model](
const absl::StrongVector<IntegerVariable, double>& lp_values,
LinearConstraintManager* manager) {
if (trail->CurrentDecisionLevel() > 0) return;
struct Event {
AffineExpression end;
IntegerValue start_min;
IntegerValue size_min;
double lp_end;
int index;
};
std::vector<Event> events;
std::vector<BalasEvent> events;
std::vector<BalasEvent> reverse_events;
for (int index1 = 0; index1 < helper->NumTasks(); ++index1) {
if (!helper->IsPresent(index1)) continue;
const IntegerValue size_min = helper->SizeMin(index1);
if (size_min > 0) {
const AffineExpression end_expr = helper->Ends()[index1];
events.push_back({end_expr, helper->StartMin(index1), size_min,
end_expr.LpValue(lp_values), index1});
}
}
// We generate the cut from:
// E. Balas, On the facial structure of scheduling polyhedra,
// Mathematical Programming Essays in Honor of George B. Dantzig
// Part I, Mathematical Programming Studies, vol. 24,
// Springer, 1985, pp. 179218.
//
// The original cut is:
// sum(end_min_i * duration_min_i) >=
// (sum(duration_min_i^2) + sum(duration_min_i)^2) / 2
// We streghten this cuts by noticing that if all tasks starts after S,
// then replacing end_min_i by (end_min_i - S) is still valid.
//
// A second difference is that we look at a set of intervals starting
// after a given start_min, sorted by relative
// (end_lp - S) / duration_min.
TopNCuts top_n_cuts(15);
// Sort by start min to bucketize by start_min.
std::sort(events.begin(), events.end(),
[](const Event& e1, const Event& e2) {
return e1.start_min < e2.start_min;
});
for (int start = 0; start + 1 < events.size(); ++start) {
// Skip to the next start_min value.
if (start > 0 && events[start].start_min == events[start - 1].start_min) {
continue;
}
const IntegerValue sequence_start_min = events[start].start_min;
std::vector<Event> residual_tasks(events.begin() + start,
events.end());
std::sort(residual_tasks.begin(), residual_tasks.end(),
[sequence_start_min](const Event& e1, const Event& e2) {
return ((e1.lp_end - sequence_start_min) / e1.size_min) <
((e2.lp_end - sequence_start_min) / e2.size_min);
});
int best_end = -1;
double best_efficacy = 0.01;
IntegerValue best_min_contrib(0);
IntegerValue sum_duration(0);
IntegerValue sum_square_duration(0);
double lp_contrib = 0;
IntegerValue current_start_min(kMaxIntegerValue);
for (int i = 0; i < residual_tasks.size(); ++i) {
DCHECK_GE(residual_tasks[i].start_min, sequence_start_min);
const IntegerValue duration = residual_tasks[i].size_min;
sum_duration += duration;
sum_square_duration += duration * duration;
lp_contrib +=
residual_tasks[i].lp_end * residual_tasks[i].size_min.value();
current_start_min =
std::min(current_start_min, residual_tasks[i].start_min);
const IntegerValue min_contrib =
(sum_duration * sum_duration + sum_square_duration) / 2 +
current_start_min * sum_duration;
const double efficacy = (min_contrib.value() - lp_contrib) /
std::sqrt(sum_square_duration.value());
if (efficacy > best_efficacy) {
best_efficacy = efficacy;
best_end = i;
best_min_contrib = min_contrib;
for (int index = 0; index < helper->NumTasks(); ++index) {
if (!helper->IsPresent(index)) continue;
const IntegerValue size_min = helper->SizeMin(index);
if (size_min > 0) {
const AffineExpression end_expr = helper->Ends()[index];
events.push_back({end_expr, helper->StartMin(index), size_min,
end_expr.LpValue(lp_values)});
const AffineExpression r_start_expr =
helper->Starts()[index].Negated();
reverse_events.push_back({r_start_expr, -helper->EndMax(index),
size_min,
r_start_expr.LpValue(lp_values)});
}
}
}
if (best_end != -1) {
LinearConstraintBuilder cut(model, best_min_contrib,
kMaxIntegerValue);
for (int i = 0; i <= best_end; ++i) {
cut.AddTerm(residual_tasks[i].end, residual_tasks[i].size_min);
GenerateBalasCut("NoOverlapBalas", lp_values, IntegerValue(1),
std::move(events), model, manager);
GenerateBalasCut("NoOverlapBalasMirror", lp_values, IntegerValue(1),
std::move(reverse_events), model, manager);
};
return result;
}
CutGenerator CreateBalasAreaCumulativeCutGenerator(
const std::vector<IntervalVariable>& intervals,
const IntegerVariable capacity, const std::vector<IntegerVariable>& demands,
Model* model) {
CutGenerator result;
SchedulingConstraintHelper* helper =
new SchedulingConstraintHelper(intervals, model);
model->TakeOwnership(helper);
result.vars = demands;
result.vars.push_back(capacity);
AddIntegerVariableFromIntervals(helper, model, &result.vars);
Trail* trail = model->GetOrCreate<Trail>();
IntegerTrail* integer_trail = model->GetOrCreate<IntegerTrail>();
result.generate_cuts =
[trail, integer_trail, helper, demands, capacity, model](
const absl::StrongVector<IntegerVariable, double>& lp_values,
LinearConstraintManager* manager) {
if (trail->CurrentDecisionLevel() > 0) return;
std::vector<BalasEvent> events;
std::vector<BalasEvent> reverse_events;
for (int index = 0; index < helper->NumTasks(); ++index) {
if (!helper->IsPresent(index)) continue;
const IntegerValue area_min =
helper->SizeMin(index) *
integer_trail->LowerBound(demands[index]);
if (area_min > 0) {
const AffineExpression end_expr = helper->Ends()[index];
events.push_back({end_expr, helper->StartMin(index), area_min,
end_expr.LpValue(lp_values)});
const AffineExpression r_start_expr =
helper->Starts()[index].Negated();
reverse_events.push_back({r_start_expr, -helper->EndMax(index),
area_min,
r_start_expr.LpValue(lp_values)});
}
}
top_n_cuts.AddCut(cut.Build(), "NoOverlapBalasArea", lp_values);
}
}
top_n_cuts.TransferToManager(lp_values, manager);
};
const IntegerValue capacity_max = integer_trail->UpperBound(capacity);
GenerateBalasCut("CumulativeBalas", lp_values, capacity_max,
std::move(events), model, manager);
GenerateBalasCut("CumulativeBalasMirror", lp_values, capacity_max,
std::move(reverse_events), model, manager);
};
return result;
}

View File

@@ -505,6 +505,14 @@ CutGenerator CreateOverlappingCumulativeCutGenerator(
const IntegerVariable capacity, const std::vector<IntegerVariable>& demands,
Model* model);
// Balas area cuts for the cumulative constraint. It is a simple relaxation
// where we replace a cumulative task with demand k and duration d by a
// no_overlap task with duration d * k / capacity_max.
CutGenerator CreateBalasAreaCumulativeCutGenerator(
const std::vector<IntervalVariable>& intervals,
const IntegerVariable capacity, const std::vector<IntegerVariable>& demands,
Model* model);
// For a given set of intervals, we first compute the min and max of all
// intervals. Then we create a cut that indicates that all intervals must fit
// in that span.

View File

@@ -19,7 +19,6 @@
#include <utility>
#include "absl/container/flat_hash_set.h"
#include "absl/strings/match.h"
#include "ortools/base/strong_vector.h"
#include "ortools/sat/integer.h"
#include "ortools/sat/linear_constraint.h"
@@ -44,48 +43,44 @@ size_t ComputeHashOfTerms(const LinearConstraint& ct) {
} // namespace
LinearConstraintManager::~LinearConstraintManager() {
if (VLOG_IS_ON(2) && !absl::StrContains(model_->Name(), "_lns")) {
if (num_merged_constraints_ > 0) {
VLOG(2) << "num_merged_constraints: " << num_merged_constraints_;
}
if (num_shortened_constraints_ > 0) {
VLOG(2) << "num_shortened_constraints: " << num_shortened_constraints_;
}
if (num_splitted_constraints_ > 0) {
VLOG(2) << "num_splitted_constraints: " << num_splitted_constraints_;
}
if (num_coeff_strenghtening_ > 0) {
VLOG(2) << "num_coeff_strenghtening: " << num_coeff_strenghtening_;
}
if (num_cuts_ > 0) {
VLOG(2) << "Total cuts added: " << num_cuts_ << " (out of "
<< num_add_cut_calls_ << " calls) worker: '" << model_->Name()
<< "'";
VLOG(2) << " - num simplifications: " << num_simplifications_;
for (const auto& entry : type_to_num_cuts_) {
if (entry.second == 1) {
VLOG(2) << " - added 1 cut of type '" << entry.first << "'.";
} else {
VLOG(2) << " - added " << entry.second << " cuts of type '"
<< entry.first << "'.";
}
}
}
}
if (logger_->LoggingIsEnabled() && num_cuts_ > 0) {
SOLVER_LOG(logger_, "Total cuts added: ", num_cuts_, " (out of ",
num_add_cut_calls_, " calls) worker: '", model_->Name(), "'");
SOLVER_LOG(logger_, " - num simplifications: ", num_simplifications_);
for (const auto& entry : type_to_num_cuts_) {
if (entry.second == 1) {
SOLVER_LOG(logger_, " - added 1 cut of type '", entry.first, "'.");
} else {
SOLVER_LOG(logger_, " - added ", entry.second, " cuts of type '",
entry.first, "'.");
}
std::string LinearConstraintManager::Statistics() const {
std::string result;
absl::StrAppend(&result, "Managed constraints: ", constraint_infos_.size(),
"\n");
if (num_merged_constraints_ > 0) {
absl::StrAppend(&result,
"num_merged_constraints: ", num_merged_constraints_, "\n");
}
if (num_shortened_constraints_ > 0) {
absl::StrAppend(&result,
"num_shortened_constraints: ", num_shortened_constraints_,
"\n");
}
if (num_splitted_constraints_ > 0) {
absl::StrAppend(
&result, "num_splitted_constraints: ", num_splitted_constraints_, "\n");
}
if (num_coeff_strenghtening_ > 0) {
absl::StrAppend(
&result, "num_coeff_strenghtening: ", num_coeff_strenghtening_, "\n");
}
if (num_simplifications_ > 0) {
absl::StrAppend(&result, " - num simplifications: ", num_simplifications_,
"\n");
}
absl::StrAppend(&result, "Total cuts added: ", num_cuts_, " (out of ",
num_add_cut_calls_, " calls)\n");
for (const auto& entry : type_to_num_cuts_) {
if (entry.second == 1) {
absl::StrAppend(&result, " - added 1 cut of type '", entry.first,
"'.\n");
} else {
absl::StrAppend(&result, " - added ", entry.second, " cuts of type '",
entry.first, "'.\n");
}
}
if (!result.empty()) result.pop_back(); // Remove last \n.
return result;
}
void LinearConstraintManager::RescaleActiveCounts(const double scaling_factor) {
@@ -93,7 +88,7 @@ void LinearConstraintManager::RescaleActiveCounts(const double scaling_factor) {
constraint_infos_[i].active_count *= scaling_factor;
}
constraint_active_count_increase_ *= scaling_factor;
VLOG(3) << "Rescaled active counts by " << scaling_factor;
VLOG(2) << "Rescaled active counts by " << scaling_factor;
}
bool LinearConstraintManager::MaybeRemoveSomeInactiveConstraints(
@@ -134,7 +129,7 @@ bool LinearConstraintManager::MaybeRemoveSomeInactiveConstraints(
lp_constraints_.resize(new_size);
solution_state->statuses.resize(num_cols + glop::ColIndex(new_size));
if (num_removed_constraints > 0) {
VLOG(3) << "Removed " << num_removed_constraints << " constraints";
VLOG(2) << "Removed " << num_removed_constraints << " constraints";
}
return num_removed_constraints > 0;
}
@@ -244,7 +239,7 @@ bool LinearConstraintManager::AddCut(
// them undeletable.
constraint_infos_[ct_index].is_deletable = true;
VLOG(3) << "Cut '" << type_name << "'"
VLOG(1) << "Cut '" << type_name << "'"
<< " size=" << constraint_infos_[ct_index].constraint.vars.size()
<< " max_magnitude="
<< ComputeInfinityNorm(constraint_infos_[ct_index].constraint)
@@ -673,7 +668,7 @@ bool LinearConstraintManager::ChangeLp(
if (num_added > 0) {
// We update the solution sate to match the new LP size.
VLOG(3) << "Added " << num_added << " constraints.";
VLOG(2) << "Added " << num_added << " constraints.";
solution_state->statuses.resize(solution_state->statuses.size() + num_added,
glop::VariableStatus::BASIC);
}

View File

@@ -71,7 +71,6 @@ class LinearConstraintManager {
time_limit_(model->GetOrCreate<TimeLimit>()),
model_(model),
logger_(model->GetOrCreate<SolverLogger>()) {}
~LinearConstraintManager();
// Add a new constraint to the manager. Note that we canonicalize constraints
// and merge the bounds of constraints with the same terms. We also perform
@@ -136,6 +135,9 @@ class LinearConstraintManager {
// the loaded solution.
bool DebugCheckConstraint(const LinearConstraint& cut);
// Returns statistics on the cut added.
std::string Statistics() const;
private:
// Heuristic that decide which constraints we should remove from the current
// LP. Note that such constraints can be added back later by the heuristic

View File

@@ -180,16 +180,6 @@ LinearProgrammingConstraint::LinearProgrammingConstraint(Model* model)
integer_trail_->RegisterReversibleClass(&rc_rev_int_repository_);
}
LinearProgrammingConstraint::~LinearProgrammingConstraint() {
VLOG(1) << "Total number of simplex iterations: "
<< total_num_simplex_iterations_;
for (int i = 0; i < num_solves_by_status_.size(); ++i) {
if (num_solves_by_status_[i] == 0) continue;
VLOG(1) << "#" << glop::ProblemStatus(i) << " : "
<< num_solves_by_status_[i];
}
}
void LinearProgrammingConstraint::AddLinearConstraint(
const LinearConstraint& ct) {
DCHECK(!lp_constraint_is_registered_);
@@ -667,6 +657,7 @@ bool LinearProgrammingConstraint::SolveLp() {
if (status_as_int >= num_solves_by_status_.size()) {
num_solves_by_status_.resize(status_as_int + 1);
}
num_solves_++;
num_solves_by_status_[status_as_int]++;
VLOG(2) << "lvl:" << trail_->CurrentDecisionLevel() << " "
<< simplex_.GetProblemStatus()
@@ -1422,9 +1413,10 @@ bool LinearProgrammingConstraint::Propagate() {
while (simplex_.GetProblemStatus() == glop::ProblemStatus::OPTIMAL &&
cuts_round < max_cuts_rounds) {
// We wait for the first batch of problem constraints to be added before we
// begin to generate cuts.
// begin to generate cuts. Note that we rely on num_solves_ since on some
// problems there is no other constriants than the cuts.
cuts_round++;
if (!integer_lp_.empty()) {
if (num_solves_ > 1) {
implied_bounds_processor_.ClearCache();
implied_bounds_processor_.SeparateSomeImpliedBoundCuts(
expanded_lp_solution_);
@@ -2839,5 +2831,21 @@ IntegerLiteral LinearProgrammingConstraint::LPReducedCostAverageDecision() {
}
}
std::string LinearProgrammingConstraint::Statistics() const {
std::string result;
absl::StrAppend(&result, "LP '", model_->Name(), "'\n");
absl::StrAppend(&result, "final dimension: ", DimensionString(), "\n");
absl::StrAppend(&result, "Total number of simplex iterations: ",
total_num_simplex_iterations_, "\n");
for (int i = 0; i < num_solves_by_status_.size(); ++i) {
if (num_solves_by_status_[i] == 0) continue;
absl::StrAppend(&result, "#",
glop::GetProblemStatusString(glop::ProblemStatus(i)), " : ",
num_solves_by_status_[i], "\n");
}
absl::StrAppend(&result, constraint_manager_.Statistics());
return result;
}
} // namespace sat
} // namespace operations_research

View File

@@ -132,7 +132,6 @@ class LinearProgrammingConstraint : public PropagatorInterface,
typedef glop::RowIndex ConstraintIndex;
explicit LinearProgrammingConstraint(Model* model);
~LinearProgrammingConstraint() override;
// Add a new linear constraint to this LP.
void AddLinearConstraint(const LinearConstraint& ct);
@@ -220,6 +219,9 @@ class LinearProgrammingConstraint : public PropagatorInterface,
return total_num_simplex_iterations_;
}
// Returns some statistics about this LP.
std::string Statistics() const;
private:
// Helper methods for branching. Returns true if branching on the given
// variable helps with more propagation or finds a conflict.
@@ -523,6 +525,7 @@ class LinearProgrammingConstraint : public PropagatorInterface,
int64_t total_num_simplex_iterations_ = 0;
// Some stats on the LP statuses encountered.
int64_t num_solves_ = 0;
std::vector<int64_t> num_solves_by_status_;
};

View File

@@ -103,17 +103,13 @@ void SharedIncompleteSolutionManager::AddNewSolution(
}
// TODO(user): Experiments and play with the num_solutions_to_keep parameter.
SharedResponseManager::SharedResponseManager(bool enumerate_all_solutions,
const CpModelProto* proto,
const WallTimer* wall_timer,
SharedTimeLimit* shared_time_limit,
SolverLogger* logger)
: enumerate_all_solutions_(enumerate_all_solutions),
model_proto_(*proto),
wall_timer_(*wall_timer),
shared_time_limit_(shared_time_limit),
SharedResponseManager::SharedResponseManager(Model* model)
: enumerate_all_solutions_(
model->GetOrCreate<SatParameters>()->enumerate_all_solutions()),
wall_timer_(*model->GetOrCreate<WallTimer>()),
shared_time_limit_(model->GetOrCreate<ModelSharedTimeLimit>()),
solutions_(/*num_solutions_to_keep=*/3),
logger_(logger) {}
logger_(model->GetOrCreate<SolverLogger>()) {}
namespace {
@@ -137,6 +133,19 @@ std::string SatProgressMessage(const std::string& event_or_solution_count,
} // namespace
void SharedResponseManager::InitializeObjective(const CpModelProto& cp_model) {
if (cp_model.has_objective()) {
objective_or_null_ = &cp_model.objective();
const Domain domain = ReadDomainFromProto(cp_model.objective());
if (!domain.IsEmpty()) {
UpdateInnerObjectiveBounds("initial_domain", IntegerValue(domain.Min()),
IntegerValue(domain.Max()));
}
} else {
objective_or_null_ = nullptr;
}
}
void SharedResponseManager::SetUpdatePrimalIntegralOnEachChange(bool set) {
absl::MutexLock mutex_lock(&mutex_);
update_integral_on_each_change_ = set;
@@ -148,7 +157,7 @@ void SharedResponseManager::UpdatePrimalIntegral() {
}
void SharedResponseManager::UpdatePrimalIntegralInternal() {
if (!model_proto_.has_objective()) return;
if (objective_or_null_ == nullptr) return;
const double current_time = shared_time_limit_->GetElapsedDeterministicTime();
const double time_delta = current_time - last_primal_integral_time_stamp_;
@@ -158,7 +167,7 @@ void SharedResponseManager::UpdatePrimalIntegralInternal() {
// Using the log should count no solution as just log(2*64) = 18, and
// otherwise just compare order of magnitude which seems nice. Also, It is
// more easy to compare the primal integral with the total time.
const CpObjectiveProto& obj = model_proto_.objective();
const CpObjectiveProto& obj = *objective_or_null_;
const double factor =
obj.scaling_factor() != 0.0 ? std::abs(obj.scaling_factor()) : 1.0;
const double bounds_delta = std::log(1 + factor * last_absolute_gap_);
@@ -174,7 +183,7 @@ void SharedResponseManager::UpdatePrimalIntegralInternal() {
void SharedResponseManager::SetGapLimitsFromParameters(
const SatParameters& parameters) {
absl::MutexLock mutex_lock(&mutex_);
if (!model_proto_.has_objective()) return;
if (objective_or_null_ == nullptr) return;
absolute_gap_limit_ = parameters.absolute_gap_limit();
relative_gap_limit_ = parameters.relative_gap_limit();
}
@@ -189,7 +198,7 @@ void SharedResponseManager::TestGapLimitsIfNeeded() {
if (best_solution_objective_value_ >= kMaxIntegerValue) return;
if (inner_objective_lower_bound_ <= kMinIntegerValue) return;
const CpObjectiveProto& obj = model_proto_.objective();
const CpObjectiveProto& obj = *objective_or_null_;
const double user_best =
ScaleObjectiveValue(obj, best_solution_objective_value_);
const double user_bound =
@@ -218,7 +227,7 @@ void SharedResponseManager::TestGapLimitsIfNeeded() {
void SharedResponseManager::UpdateInnerObjectiveBounds(
const std::string& update_info, IntegerValue lb, IntegerValue ub) {
absl::MutexLock mutex_lock(&mutex_);
CHECK(model_proto_.has_objective());
CHECK(objective_or_null_ != nullptr);
// The problem is already solved!
//
@@ -256,12 +265,12 @@ void SharedResponseManager::UpdateInnerObjectiveBounds(
return;
}
if (logger_->LoggingIsEnabled() && change) {
const CpObjectiveProto& obj = model_proto_.objective();
const CpObjectiveProto& obj = *objective_or_null_;
const double best =
ScaleObjectiveValue(obj, best_solution_objective_value_);
double new_lb = ScaleObjectiveValue(obj, inner_objective_lower_bound_);
double new_ub = ScaleObjectiveValue(obj, inner_objective_upper_bound_);
if (model_proto_.objective().scaling_factor() < 0) {
if (obj.scaling_factor() < 0) {
std::swap(new_lb, new_ub);
}
RegisterObjectiveBoundImprovement(update_info);
@@ -282,7 +291,7 @@ void SharedResponseManager::NotifyThatImprovingProblemIsInfeasible(
// We also use this status to indicate that we enumerated all solutions to
// a feasible problem.
best_response_.set_status(CpSolverStatus::OPTIMAL);
if (!model_proto_.has_objective()) {
if (objective_or_null_ == nullptr) {
best_response_.set_all_solutions_were_found(true);
}
@@ -344,6 +353,18 @@ double SharedResponseManager::PrimalIntegral() const {
return primal_integral_;
}
void SharedResponseManager::AddSolutionPostprocessor(
std::function<void(CpSolverResponse*)> postprocessor) {
absl::MutexLock mutex_lock(&mutex_);
postprocessors_.push_back(postprocessor);
}
void SharedResponseManager::AddFinalSolutionPostprocessor(
std::function<void(CpSolverResponse*)> postprocessor) {
absl::MutexLock mutex_lock(&mutex_);
final_postprocessors_.push_back(postprocessor);
}
int SharedResponseManager::AddSolutionCallback(
std::function<void(const CpSolverResponse&)> callback) {
absl::MutexLock mutex_lock(&mutex_);
@@ -363,15 +384,26 @@ void SharedResponseManager::UnregisterCallback(int callback_id) {
LOG(DFATAL) << "Callback id " << callback_id << " not registered.";
}
CpSolverResponse SharedResponseManager::GetResponse() {
CpSolverResponse SharedResponseManager::GetResponse(bool full_response) {
absl::MutexLock mutex_lock(&mutex_);
FillObjectiveValuesInBestResponse();
return best_response_;
// We need to copy the response before we postsolve it.
CpSolverResponse result = best_response_;
for (int i = postprocessors_.size(); --i >= 0;) {
postprocessors_[i](&result);
}
if (full_response) {
for (int i = final_postprocessors_.size(); --i >= 0;) {
final_postprocessors_[i](&result);
}
}
return result;
}
void SharedResponseManager::FillObjectiveValuesInBestResponse() {
if (!model_proto_.has_objective()) return;
const CpObjectiveProto& obj = model_proto_.objective();
if (objective_or_null_ == nullptr) return;
const CpObjectiveProto& obj = *objective_or_null_;
if (best_response_.status() == CpSolverStatus::INFEASIBLE) {
best_response_.clear_objective_value();
@@ -401,9 +433,9 @@ void SharedResponseManager::NewSolution(const CpSolverResponse& response,
Model* model) {
absl::MutexLock mutex_lock(&mutex_);
if (model_proto_.has_objective()) {
if (objective_or_null_ != nullptr) {
const int64_t objective_value =
ComputeInnerObjective(model_proto_.objective(), response);
ComputeInnerObjective(*objective_or_null_, response);
// Add this solution to the pool, even if it is not improving.
if (!response.solution().empty()) {
@@ -432,7 +464,7 @@ void SharedResponseManager::NewSolution(const CpSolverResponse& response,
// Note that the objective will be filled by
// FillObjectiveValuesInBestResponse().
if (!model_proto_.has_objective() && !enumerate_all_solutions_) {
if (objective_or_null_ == nullptr && !enumerate_all_solutions_) {
best_response_.set_status(CpSolverStatus::OPTIMAL);
} else {
best_response_.set_status(CpSolverStatus::FEASIBLE);
@@ -446,7 +478,7 @@ void SharedResponseManager::NewSolution(const CpSolverResponse& response,
response.solution_upper_bounds();
// Mark model as OPTIMAL if the inner bound crossed.
if (model_proto_.has_objective() &&
if (objective_or_null_ != nullptr &&
inner_objective_lower_bound_ > inner_objective_upper_bound_) {
best_response_.set_status(CpSolverStatus::OPTIMAL);
}
@@ -462,13 +494,13 @@ void SharedResponseManager::NewSolution(const CpSolverResponse& response,
num_bool);
}
if (model_proto_.has_objective()) {
const CpObjectiveProto& obj = model_proto_.objective();
if (objective_or_null_ != nullptr) {
const CpObjectiveProto& obj = *objective_or_null_;
const double best =
ScaleObjectiveValue(obj, best_solution_objective_value_);
double lb = ScaleObjectiveValue(obj, inner_objective_lower_bound_);
double ub = ScaleObjectiveValue(obj, inner_objective_upper_bound_);
if (model_proto_.objective().scaling_factor() < 0) {
if (obj.scaling_factor() < 0) {
std::swap(lb, ub);
}
RegisterSolutionFound(solution_info);
@@ -487,8 +519,14 @@ void SharedResponseManager::NewSolution(const CpSolverResponse& response,
if (!callbacks_.empty()) {
FillObjectiveValuesInBestResponse();
SetStatsFromModelInternal(model);
// We need to copy the response before we postsolve it.
CpSolverResponse copy = best_response_;
for (int i = postprocessors_.size(); --i >= 0;) {
postprocessors_[i](&copy);
}
for (const auto& pair : callbacks_) {
pair.second(best_response_);
pair.second(copy);
}
}
@@ -533,7 +571,7 @@ void SharedResponseManager::LoadDebugSolution(Model* model) {
const IntegerVariable objective_var = objective_def->objective_var;
const int64_t objective_value =
ComputeInnerObjective(model_proto_.objective(), response);
ComputeInnerObjective(*objective_or_null_, response);
debug_solution[objective_var] = objective_value;
debug_solution[NegationOf(objective_var)] = -objective_value;
#endif // __PORTABLE_PLATFORM__

View File

@@ -31,6 +31,7 @@
#include "ortools/sat/model.h"
#include "ortools/sat/sat_base.h"
#include "ortools/sat/sat_parameters.pb.h"
#include "ortools/sat/util.h"
#include "ortools/util/bitset.h"
#include "ortools/util/logging.h"
#include "ortools/util/time_limit.h"
@@ -164,16 +165,21 @@ class SharedIncompleteSolutionManager {
mutable absl::Mutex mutex_;
};
// Manages the global best response kept by the solver.
// All functions are thread-safe.
// Manages the global best response kept by the solver. This class is
// responsible for logging the progress of the solutions and bounds as they are
// found.
//
// All functions are thread-safe except if specified otherwise.
class SharedResponseManager {
public:
// If log_updates is true, then all updates to the global "state" will be
// logged. This class is responsible for our solver log progress.
SharedResponseManager(bool enumerate_all_solutions, const CpModelProto* proto,
const WallTimer* wall_timer,
SharedTimeLimit* shared_time_limit,
SolverLogger* logger);
explicit SharedResponseManager(Model* model);
// Loads the initial objective bounds and keep a reference to the objective to
// properly display the scaled bounds. This is optional if the model has no
// objective.
//
// This function is not thread safe.
void InitializeObjective(const CpModelProto& cp_model);
// Reports OPTIMAL and stop the search if any gap limit are specified and
// crossed. By default, we only stop when we have the true optimal, which is
@@ -185,12 +191,29 @@ class SharedResponseManager {
//
// Note that the solver statistics correspond to the last time a better
// solution was found or SetStatsFromModel() was called.
CpSolverResponse GetResponse();
//
// If full response is true, we will do more postprocessing by calling all the
// AddFinalSolutionPostprocessor() postprocesors. Note that the response given
// to the AddSolutionCallback() will not call them.
CpSolverResponse GetResponse(bool full_response = true);
// These "postprocessing" steps will be applied in REVERSE order of
// registration to all solution passed to the callbacks.
void AddSolutionPostprocessor(
std::function<void(CpSolverResponse*)> postprocessor);
// These "postprocessing" steps will only be applied after the others to the
// solution returned by GetResponse().
void AddFinalSolutionPostprocessor(
std::function<void(CpSolverResponse*)> postprocessor);
// Adds a callback that will be called on each new solution (for
// statisfiablity problem) or each improving new solution (for an optimization
// problem). Returns its id so it can be unregistered if needed.
//
// Note that adding a callback is not free since the solution will be
// postsolved before this is called.
//
// Note that currently the class is waiting for the callback to finish before
// accepting any new updates. That could be changed if needed.
int AddSolutionCallback(
@@ -300,6 +323,15 @@ class SharedResponseManager {
// Display improvement stats.
void DisplayImprovementStatistics();
// This is here for the few codepath that needs to modify the returned
// response directly. Note that this do not work in parallel.
//
// TODO(user): This can probably be removed.
CpSolverResponse* MutableResponse() {
absl::MutexLock mutex_lock(&mutex_);
return &best_response_;
}
private:
void TestGapLimitsIfNeeded() ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
void FillObjectiveValuesInBestResponse()
@@ -314,9 +346,9 @@ class SharedResponseManager {
ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
const bool enumerate_all_solutions_;
const CpModelProto& model_proto_;
const WallTimer& wall_timer_;
SharedTimeLimit* shared_time_limit_;
ModelSharedTimeLimit* shared_time_limit_;
CpObjectiveProto const* objective_or_null_ = nullptr;
mutable absl::Mutex mutex_;
@@ -349,6 +381,11 @@ class SharedResponseManager {
std::vector<std::pair<int, std::function<void(const CpSolverResponse&)>>>
callbacks_ ABSL_GUARDED_BY(mutex_);
std::vector<std::function<void(CpSolverResponse*)>> postprocessors_
ABSL_GUARDED_BY(mutex_);
std::vector<std::function<void(CpSolverResponse*)>> final_postprocessors_
ABSL_GUARDED_BY(mutex_);
// Dump prefix.
std::string dump_prefix_;

View File

@@ -23,6 +23,7 @@
#include "ortools/sat/sat_base.h"
#include "ortools/sat/sat_parameters.pb.h"
#include "ortools/util/random_engine.h"
#include "ortools/util/time_limit.h"
#if !defined(__PORTABLE_PLATFORM__)
#include "google/protobuf/descriptor.h"
@@ -65,6 +66,13 @@ class ModelRandomGenerator : public absl::BitGenRef {
absl::BitGen absl_random_;
};
// The model "singleton" shared time limit.
class ModelSharedTimeLimit : public SharedTimeLimit {
public:
explicit ModelSharedTimeLimit(Model* model)
: SharedTimeLimit(model->GetOrCreate<TimeLimit>()) {}
};
// Randomizes the decision heuristic of the given SatParameters.
template <typename URBG>
void RandomizeDecisionHeuristic(URBG* random, SatParameters* parameters);