[CP-SAT] more span; minor scheduling optims

This commit is contained in:
Laurent Perron
2024-08-02 10:55:55 -07:00
parent 4a492d9b3f
commit 99d869a4a1
19 changed files with 194 additions and 193 deletions

View File

@@ -1121,10 +1121,12 @@ cc_library(
"//ortools/util:strong_integers",
"//ortools/util:time_limit",
"@com_google_absl//absl/container:flat_hash_set",
"@com_google_absl//absl/log",
"@com_google_absl//absl/log:check",
"@com_google_absl//absl/meta:type_traits",
"@com_google_absl//absl/random:distributions",
"@com_google_absl//absl/strings",
"@com_google_absl//absl/strings:str_format",
"@com_google_absl//absl/types:span",
],
)

View File

@@ -24,7 +24,6 @@
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/flags/flag.h"
#include "absl/log/check.h"
#include "absl/random/distributions.h"
#include "absl/strings/str_cat.h"
@@ -43,10 +42,6 @@
#include "ortools/sat/util.h"
#include "ortools/util/strong_integers.h"
// TODO(user): remove this when the code is stable and does not use SCIP
// anymore.
ABSL_FLAG(bool, cp_model_use_max_hs, false, "Use max_hs in search portfolio.");
namespace operations_research {
namespace sat {
@@ -649,6 +644,14 @@ absl::flat_hash_map<std::string, SatParameters> GetNamedParameters(
new_params.set_use_dynamic_precedence_in_disjunctive(false);
new_params.set_use_dynamic_precedence_in_cumulative(false);
strategies["fixed"] = new_params;
new_params.set_linearization_level(0);
strategies["fixed_no_lp"] = new_params;
new_params.set_linearization_level(2);
new_params.set_add_lp_constraints_lazily(false);
new_params.set_root_lp_iterations(100'000);
strategies["fixed_max_lp"] = new_params;
}
// Quick restart.
@@ -803,10 +806,6 @@ std::vector<SatParameters> GetFullWorkerParameters(
names.push_back("probing_no_lp");
names.push_back("objective_lb_search_no_lp");
names.push_back("objective_lb_search_max_lp");
#if !defined(__PORTABLE_PLATFORM__) && defined(USE_SCIP)
if (absl::GetFlag(FLAGS_cp_model_use_max_hs)) names.push_back("max_hs");
#endif // !defined(__PORTABLE_PLATFORM__) && defined(USE_SCIP)
} else {
for (const std::string& name : base_params.subsolvers()) {
// Hack for flatzinc. At the time of parameter setting, the objective is

View File

@@ -850,7 +850,7 @@ void OrbitAndPropagation(absl::Span<const int> orbits, int var,
}
std::vector<int64_t> BuildInequalityCoeffsForOrbitope(
const std::vector<int64_t>& maximum_values, int64_t max_linear_size,
absl::Span<const int64_t> maximum_values, int64_t max_linear_size,
bool* is_approximated) {
std::vector<int64_t> out(maximum_values.size());
int64_t range_product = 1;
@@ -872,8 +872,8 @@ std::vector<int64_t> BuildInequalityCoeffsForOrbitope(
*is_approximated = true;
const auto compute_approximate_coeffs =
[max_linear_size, &maximum_values](double scaling_factor,
std::vector<int64_t>* coeffs) -> bool {
[max_linear_size, maximum_values](double scaling_factor,
std::vector<int64_t>* coeffs) -> bool {
int64_t max_size = 0;
double cumulative_product_double = 1.0;
for (int i = 0; i < maximum_values.size(); ++i) {

View File

@@ -704,7 +704,7 @@ bool NonOverlappingRectanglesDisjunctivePropagator::
// Compute relevant boxes, the one with a mandatory part of y. Because we will
// need to sort it this way, we consider them by increasing start max.
indexed_boxes_.clear();
const auto temp = y->TaskByDecreasingStartMax();
const auto temp = y->TaskByIncreasingNegatedStartMax();
for (int i = temp.size(); --i >= 0;) {
const int box = temp[i].task_index;
// Ignore absent boxes.
@@ -718,7 +718,7 @@ bool NonOverlappingRectanglesDisjunctivePropagator::
continue;
}
const IntegerValue start_max = temp[i].time;
const IntegerValue start_max = -temp[i].time;
const IntegerValue end_min = y->EndMin(box);
if (start_max < end_min) {
indexed_boxes_.push_back({box, start_max, end_min});

View File

@@ -343,8 +343,8 @@ template <bool time_direction>
bool CombinedDisjunctive<time_direction>::Propagate() {
if (!helper_->SynchronizeAndSetTimeDirection(time_direction)) return false;
const auto& task_by_increasing_end_min = helper_->TaskByIncreasingEndMin();
const auto& task_by_decreasing_start_max =
helper_->TaskByDecreasingStartMax();
const auto& task_by_negated_start_max =
helper_->TaskByIncreasingNegatedStartMax();
for (auto& task_set : task_sets_) task_set.Clear();
end_mins_.assign(end_mins_.size(), kMinIntegerValue);
@@ -360,9 +360,9 @@ bool CombinedDisjunctive<time_direction>::Propagate() {
// Update all task sets.
while (queue_index >= 0) {
const auto to_insert = task_by_decreasing_start_max[queue_index];
const auto to_insert = task_by_negated_start_max[queue_index];
const int task_index = to_insert.task_index;
const IntegerValue start_max = to_insert.time;
const IntegerValue start_max = -to_insert.time;
if (end_min <= start_max) break;
if (helper_->IsPresent(task_index)) {
task_is_added_[task_index] = true;
@@ -749,8 +749,8 @@ bool DisjunctiveSimplePrecedences::Push(TaskTime before, int t) {
bool DisjunctiveSimplePrecedences::PropagateOneDirection() {
// We will loop in a decreasing way here.
// And add tasks that are present to the task_set_.
absl::Span<const TaskTime> task_by_decreasing_start_max =
helper_->TaskByDecreasingStartMax();
absl::Span<const TaskTime> task_by_negated_start_max =
helper_->TaskByIncreasingNegatedStartMax();
// We just keep amongst all the task before current_end_min, the one with the
// highesh end-min.
@@ -771,15 +771,16 @@ bool DisjunctiveSimplePrecedences::PropagateOneDirection() {
int blocking_task = -1;
IntegerValue blocking_start_max;
IntegerValue current_end_min = task_by_increasing_end_min.front().time;
for (; true; task_by_decreasing_start_max.remove_suffix(1)) {
if (task_by_decreasing_start_max.empty()) {
for (; true; task_by_negated_start_max.remove_suffix(1)) {
if (task_by_negated_start_max.empty()) {
// Small optim: this allows to process all remaining task rather than
// looping around are retesting this for all remaining tasks.
current_end_min = kMaxIntegerValue;
break;
}
const auto [t, start_max] = task_by_decreasing_start_max.back();
const auto [t, negated_start_max] = task_by_negated_start_max.back();
const IntegerValue start_max = -negated_start_max;
if (current_end_min <= start_max) break;
if (!helper_->IsPresent(t)) continue;
@@ -850,59 +851,30 @@ bool DisjunctiveDetectablePrecedences::Propagate() {
return false;
}
// TODO(user): cache this more.
// We will "consume" tasks from here across PropagateSubwindow() calls.
task_by_decreasing_start_max_ = helper_->TaskByDecreasingStartMax();
// Split problem into independent part.
//
// The "independent" window can be processed separately because for each of
// them, a task [start-min, end-min] is in the window [window_start,
// window_end]. So any task to the left of the window cannot push such
// task start_min, and any task to the right of the window will have a
// start_max >= end_min, so wouldn't be in detectable precedence.
// Compute the "rank" of each task.
const auto by_shifted_smin = helper_->TaskByIncreasingShiftedStartMin();
int rank = -1;
IntegerValue window_end = kMinIntegerValue;
IntegerValue min_start_min = kMinIntegerValue;
IntegerValue max_end_min = kMinIntegerValue;
for (const TaskTime task_time : helper_->TaskByIncreasingStartMin()) {
const int task = task_time.task_index;
if (helper_->IsAbsent(task)) continue;
// Note that the helper returns value assuming the task is present.
const IntegerValue start_min = task_time.time;
const IntegerValue end_min = helper_->EndMin(task);
if (start_min < window_end) {
const IntegerValue size_min = helper_->SizeMin(task);
DCHECK_GE(end_min, start_min + size_min);
task_by_increasing_end_min_.push_back({task, end_min});
max_end_min = std::max(max_end_min, end_min);
window_end = std::max(window_end, start_min) + size_min;
for (const auto [task, presence_lit, start_min] : by_shifted_smin) {
if (!helper_->IsPresent(presence_lit)) {
ranks_[task] = -1;
continue;
}
// Process current window.
if (task_by_increasing_end_min_.size() > 1 &&
!PropagateSubwindow(min_start_min, max_end_min)) {
++stats_.num_conflicts;
return false;
const IntegerValue size_min = helper_->SizeMin(task);
if (start_min < window_end) {
ranks_[task] = rank;
window_end += size_min;
} else {
ranks_[task] = ++rank;
window_end = start_min + size_min;
}
// Start of the next window.
task_by_increasing_end_min_.clear();
task_by_increasing_end_min_.push_back({task, end_min});
min_start_min = start_min;
max_end_min = end_min;
window_end = end_min;
}
if (task_by_increasing_end_min_.size() > 1 &&
!PropagateSubwindow(min_start_min, max_end_min)) {
if (!PropagateWithRanks()) {
++stats_.num_conflicts;
return false;
}
stats_.EndWithoutConflicts();
return true;
}
@@ -996,35 +968,18 @@ bool DisjunctiveDetectablePrecedences::Push(IntegerValue task_set_end_min,
return true;
}
bool DisjunctiveDetectablePrecedences::PropagateSubwindow(
const IntegerValue min_start_min, const IntegerValue max_end_min) {
DCHECK(!task_by_increasing_end_min_.empty());
IntegerValue first_start_max = kMaxIntegerValue;
for (; !task_by_decreasing_start_max_.empty();
task_by_decreasing_start_max_.remove_suffix(1)) {
const auto [t, start_max] = task_by_decreasing_start_max_.back();
if (!helper_->IsPresent(t)) continue;
// Because of how the window was constructed, this task cannot push
// anything.
if (helper_->EndMin(t) <= min_start_min) continue;
first_start_max = start_max;
break;
}
// No work to do for this window.
if (first_start_max >= max_end_min) return true;
// The vector is already sorted by shifted_start_min, so there is likely a
// good correlation, hence the incremental sort.
IncrementalSort(task_by_increasing_end_min_.begin(),
task_by_increasing_end_min_.end());
DCHECK_EQ(max_end_min, task_by_increasing_end_min_.back().time);
bool DisjunctiveDetectablePrecedences::PropagateWithRanks() {
// We will "consume" tasks from here.
absl::Span<const TaskTime> task_by_increasing_end_min =
absl::MakeSpan(task_by_increasing_end_min_);
helper_->TaskByIncreasingEndMin();
absl::Span<const TaskTime> task_by_negated_start_max =
helper_->TaskByIncreasingNegatedStartMax();
// We will loop in an increasing way here and consume task from beginning.
// We will stop using ranks as soon as we propagated something. This allow to
// be sure this propagate as much as possible in a single pass and seems to
// help slightly.
int highest_rank = 0;
bool some_propagation = false;
// Invariant: need_update is false implies that task_set_end_min is equal to
// task_set_.ComputeEndMin().
@@ -1032,24 +987,30 @@ bool DisjunctiveDetectablePrecedences::PropagateSubwindow(
// TODO(user): Maybe it is just faster to merge ComputeEndMin() with
// AddEntry().
task_set_.Clear();
bool need_update = false;
to_add_.clear();
IntegerValue task_set_end_min = kMinIntegerValue;
for (; !task_by_increasing_end_min.empty();) {
// Consider all task with a start_max < current_end_min.
int blocking_task = -1;
IntegerValue blocking_start_max;
IntegerValue current_end_min = task_by_increasing_end_min.front().time;
for (; true; task_by_decreasing_start_max_.remove_suffix(1)) {
if (task_by_decreasing_start_max_.empty()) {
for (; true; task_by_negated_start_max.remove_suffix(1)) {
if (task_by_negated_start_max.empty()) {
// Small optim: this allows to process all remaining task rather than
// looping around are retesting this for all remaining tasks.
current_end_min = kMaxIntegerValue;
break;
}
const auto [t, start_max] = task_by_decreasing_start_max_.back();
const auto [t, negated_start_max] = task_by_negated_start_max.back();
const IntegerValue start_max = -negated_start_max;
if (current_end_min <= start_max) break;
if (!helper_->IsPresent(t)) continue;
// If the task is not present, its rank will be -1.
const int rank = ranks_[t];
if (rank < highest_rank) continue;
DCHECK(helper_->IsPresent(t));
// If t has a mandatory part, and extend further than current_end_min
// then we can push it first. All tasks for which their push is delayed
@@ -1073,20 +1034,29 @@ bool DisjunctiveDetectablePrecedences::PropagateSubwindow(
helper_->AddReasonForBeingBefore(t, blocking_task);
return helper_->ReportConflict();
} else {
need_update = true;
task_set_.AddShiftedStartMinEntry(*helper_, t);
if (!some_propagation && rank > highest_rank) {
to_add_.clear();
task_set_.Clear();
highest_rank = rank;
}
to_add_.push_back(t);
}
}
// If we have a blocking task. We need to propagate it first.
if (blocking_task != -1) {
DCHECK(!helper_->IsAbsent(blocking_task));
if (need_update) {
need_update = false;
if (!to_add_.empty()) {
for (const int t : to_add_) {
task_set_.AddShiftedStartMinEntry(*helper_, t);
}
to_add_.clear();
task_set_end_min = task_set_.ComputeEndMin();
}
if (task_set_end_min > helper_->StartMin(blocking_task)) {
some_propagation = true;
if (!Push(task_set_end_min, blocking_task)) return false;
}
@@ -1099,8 +1069,12 @@ bool DisjunctiveDetectablePrecedences::PropagateSubwindow(
// any of the subsequent tasks below. In particular, the reason will be
// valid even though task_set might contains tasks with a start_max
// greater or equal to the end_min of the task we push.
need_update = true;
task_set_.AddShiftedStartMinEntry(*helper_, blocking_task);
if (!some_propagation && ranks_[blocking_task] > highest_rank) {
to_add_.clear();
task_set_.Clear();
highest_rank = ranks_[blocking_task];
}
to_add_.push_back(blocking_task);
}
// Lets propagate all task with end_min <= current_end_min that are also
@@ -1111,8 +1085,11 @@ bool DisjunctiveDetectablePrecedences::PropagateSubwindow(
if (end_min > current_end_min) break;
if (t == blocking_task) continue; // Already done.
if (need_update) {
need_update = false;
if (!to_add_.empty()) {
for (const int t : to_add_) {
task_set_.AddShiftedStartMinEntry(*helper_, t);
}
to_add_.clear();
task_set_end_min = task_set_.ComputeEndMin();
}
@@ -1120,6 +1097,8 @@ bool DisjunctiveDetectablePrecedences::PropagateSubwindow(
if (task_set_end_min > helper_->StartMin(t)) {
// Corner case if a previous push caused a subsequent task to be absent.
if (helper_->IsAbsent(t)) continue;
some_propagation = true;
if (!Push(task_set_end_min, t)) return false;
}
}
@@ -1347,7 +1326,8 @@ bool DisjunctiveNotLast::Propagate() {
return false;
}
const auto task_by_decreasing_start_max = helper_->TaskByDecreasingStartMax();
const auto task_by_negated_start_max =
helper_->TaskByIncreasingNegatedStartMax();
const auto task_by_increasing_shifted_start_min =
helper_->TaskByIncreasingShiftedStartMin();
@@ -1362,7 +1342,7 @@ bool DisjunctiveNotLast::Propagate() {
// looking at the task in the first window. Tasks to the left do not cause
// issue for the task to be last, and tasks to the right will not lower the
// end-min of the task under consideration.
int queue_index = task_by_decreasing_start_max.size() - 1;
int queue_index = task_by_negated_start_max.size() - 1;
const int num_tasks = task_by_increasing_shifted_start_min.size();
for (int i = 0; i < num_tasks;) {
start_min_window_.clear();
@@ -1387,12 +1367,14 @@ bool DisjunctiveNotLast::Propagate() {
// fall into [window_start, window_end).
start_max_window_.clear();
for (; queue_index >= 0; queue_index--) {
const auto task_time = task_by_decreasing_start_max[queue_index];
const auto [t, negated_start_max] =
task_by_negated_start_max[queue_index];
const IntegerValue start_max = -negated_start_max;
// Note that we add task whose presence is still unknown here.
if (task_time.time >= window_end) break;
if (helper_->IsAbsent(task_time.task_index)) continue;
start_max_window_.push_back(task_time);
if (start_max >= window_end) break;
if (helper_->IsAbsent(t)) continue;
start_max_window_.push_back({t, start_max});
}
// If this is the case, we cannot propagate more than the detectable

View File

@@ -234,17 +234,18 @@ class DisjunctiveDetectablePrecedences : public PropagatorInterface {
helper_(helper),
task_set_(helper->NumTasks()),
stats_("DisjunctiveDetectablePrecedences", model) {
task_by_increasing_end_min_.ClearAndReserve(helper->NumTasks());
ranks_.resize(helper->NumTasks());
to_add_.ClearAndReserve(helper->NumTasks());
}
bool Propagate() final;
int RegisterWith(GenericLiteralWatcher* watcher);
private:
bool PropagateSubwindow(IntegerValue min_start_min, IntegerValue max_end_min);
bool PropagateWithRanks();
bool Push(IntegerValue task_set_end_min, int t);
FixedCapacityVector<TaskTime> task_by_increasing_end_min_;
absl::Span<const TaskTime> task_by_decreasing_start_max_;
FixedCapacityVector<int> to_add_;
std::vector<int> ranks_;
const bool time_direction_;
SchedulingConstraintHelper* helper_;

View File

@@ -17,7 +17,9 @@
#include <cmath>
#include <cstdint>
#include <functional>
#include <limits>
#include <random>
#include <string>
#include <tuple>
#include <vector>
@@ -26,6 +28,7 @@
#include "absl/meta/type_traits.h"
#include "absl/random/distributions.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/types/span.h"
#include "ortools/base/logging.h"
#include "ortools/sat/clause.h"
@@ -169,7 +172,7 @@ IntegerLiteral SplitUsingBestSolutionValueInRepository(
std::function<BooleanOrIntegerLiteral()> FirstUnassignedVarAtItsMinHeuristic(
const std::vector<IntegerVariable>& vars, Model* model) {
auto* integer_trail = model->GetOrCreate<IntegerTrail>();
return [/*copy*/ vars, integer_trail]() {
return [/*copy*/ vars = vars, integer_trail]() {
for (const IntegerVariable var : vars) {
const IntegerLiteral decision = AtMinValue(var, integer_trail);
if (decision.IsValid()) return BooleanOrIntegerLiteral(decision);
@@ -254,7 +257,7 @@ std::function<BooleanOrIntegerLiteral()> LpPseudoCostHeuristic(Model* model) {
if (info.score > best_score) {
best_score = info.score;
// This direction works better than the inverse in the benchs. But
// This direction works better than the inverse in the benchmarks. But
// always branching up seems even better. TODO(user): investigate.
if (info.down_score > info.up_score) {
decision = BooleanOrIntegerLiteral(info.down_branch);
@@ -1022,8 +1025,8 @@ std::function<BooleanOrIntegerLiteral()> RandomizeOnRestartHeuristic(
// Special case: Don't change the decision value.
value_selection_weight.push_back(10);
// TODO(user): These distribution values are just guessed values. They
// need to be tuned.
// TODO(user): These distribution values are just guessed values.
// They need to be tuned.
std::discrete_distribution<int> val_dist(value_selection_weight.begin(),
value_selection_weight.end());
@@ -1114,9 +1117,9 @@ std::function<BooleanOrIntegerLiteral()> FollowHint(
auto* integer_trail = model->GetOrCreate<IntegerTrail>();
auto* rev_int_repo = model->GetOrCreate<RevIntRepository>();
// This is not ideal as we reserve an int for the full duration of the model
// even if we use this FollowHint() function just for a while. But it is
// an easy solution to not have reference to deleted memory in the
// This is not ideal as we reserve an int for the full duration of the
// model even if we use this FollowHint() function just for a while. But
// it is an easy solution to not have reference to deleted memory in the
// RevIntRepository(). Note that once we backtrack, these reference will
// disappear.
int* rev_start_index = model->TakeOwnership(new int);
@@ -1305,7 +1308,7 @@ void ConfigureSearchHeuristics(Model* model) {
}
std::vector<std::function<BooleanOrIntegerLiteral()>> CompleteHeuristics(
const std::vector<std::function<BooleanOrIntegerLiteral()>>&
absl::Span<const std::function<BooleanOrIntegerLiteral()>>
incomplete_heuristics,
const std::function<BooleanOrIntegerLiteral()>& completion_heuristic) {
std::vector<std::function<BooleanOrIntegerLiteral()>> complete_heuristics;
@@ -1456,15 +1459,15 @@ SatSolver::Status IntegerSearchHelper::SolveIntegerProblem() {
CHECK_EQ(num_policies, heuristics.restart_policies.size());
// Note that it is important to do the level-zero propagation if it wasn't
// already done because EnqueueDecisionAndBackjumpOnConflict() assumes that
// the solver is in a "propagated" state.
// already done because EnqueueDecisionAndBackjumpOnConflict() assumes
// that the solver is in a "propagated" state.
//
// TODO(user): We have the issue that at level zero. calling the propagation
// loop more than once can propagate more! This is because we call the LP
// again and again on each level zero propagation. This is causing some
// CHECKs() to fail in multithread (rarely) because when we associate new
// literals to integer ones, Propagate() is indirectly called. Not sure yet
// how to fix.
// TODO(user): We have the issue that at level zero. calling the
// propagation loop more than once can propagate more! This is because we
// call the LP again and again on each level zero propagation. This is
// causing some CHECKs() to fail in multithread (rarely) because when we
// associate new literals to integer ones, Propagate() is indirectly
// called. Not sure yet how to fix.
if (!sat_solver_->FinishPropagation()) return sat_solver_->UnsatStatus();
// Main search loop.
@@ -1508,20 +1511,21 @@ SatSolver::Status IntegerSearchHelper::SolveIntegerProblem() {
// No decision means that we reached a leave of the search tree and that
// we have a feasible solution.
//
// Tricky: If the time limit is reached during the final propagation when
// all variables are fixed, there is no guarantee that the propagation
// responsible for testing the validity of the solution was run to
// completion. So we cannot report a feasible solution.
// Tricky: If the time limit is reached during the final propagation
// when all variables are fixed, there is no guarantee that the
// propagation responsible for testing the validity of the solution was
// run to completion. So we cannot report a feasible solution.
if (time_limit_->LimitReached()) return SatSolver::LIMIT_REACHED;
if (decision == kNoLiteralIndex) {
// Save the current polarity of all Booleans in the solution. It will be
// followed for the next SAT decisions. This is known to be a good policy
// for optimization problem. Note that for decision problem we don't care
// since we are just done as soon as a solution is found.
// Save the current polarity of all Booleans in the solution. It will
// be followed for the next SAT decisions. This is known to be a good
// policy for optimization problem. Note that for decision problem we
// don't care since we are just done as soon as a solution is found.
//
// This idea is kind of "well known", see for instance the "LinSBPS"
// submission to the maxSAT 2018 competition by Emir Demirovic and Peter
// Stuckey where they show it is a good idea and provide more references.
// submission to the maxSAT 2018 competition by Emir Demirovic and
// Peter Stuckey where they show it is a good idea and provide more
// references.
if (parameters_.use_optimization_hints()) {
auto* sat_decision = model_->GetOrCreate<SatDecisionPolicy>();
const auto& trail = *model_->GetOrCreate<Trail>();
@@ -1536,9 +1540,9 @@ SatSolver::Status IntegerSearchHelper::SolveIntegerProblem() {
return sat_solver_->UnsatStatus();
}
// In multi-thread, we really only want to save the LP relaxation for thread
// with high linearization level to avoid to pollute the repository with
// sub-par lp solutions.
// In multi-thread, we really only want to save the LP relaxation for
// thread with high linearization level to avoid to pollute the
// repository with sub-par lp solutions.
//
// TODO(user): Experiment more around dynamically changing the
// threshold for storing LP solutions in the pool. Alternatively expose
@@ -1552,9 +1556,10 @@ SatSolver::Status IntegerSearchHelper::SolveIntegerProblem() {
parameters_.linearization_level() >= 2) {
num_decisions_since_last_lp_record_++;
if (num_decisions_since_last_lp_record_ >= 100) {
// NOTE: We can actually record LP solutions more frequently. However
// this process is time consuming and workers waste a lot of time doing
// this. To avoid this we don't record solutions after each decision.
// NOTE: We can actually record LP solutions more frequently.
// However this process is time consuming and workers waste a lot of
// time doing this. To avoid this we don't record solutions after
// each decision.
RecordLPRelaxationValues(model_);
num_decisions_since_last_lp_record_ = 0;
}

View File

@@ -261,7 +261,7 @@ std::function<bool()> SatSolverRestartPolicy(Model* model);
// Concatenates each input_heuristic with a default heuristic that instantiate
// all the problem's Boolean variables, into a new vector.
std::vector<std::function<BooleanOrIntegerLiteral()>> CompleteHeuristics(
const std::vector<std::function<BooleanOrIntegerLiteral()>>&
absl::Span<const std::function<BooleanOrIntegerLiteral()>>
incomplete_heuristics,
const std::function<BooleanOrIntegerLiteral()>& completion_heuristic);

View File

@@ -382,6 +382,11 @@ bool SchedulingConstraintHelper::UpdateCachedValues(int t) {
recompute_energy_profile_ = true;
}
// We might only want to do that if the value changed, but I am not sure it
// is worth the test.
recompute_by_start_max_ = true;
recompute_by_end_min_ = true;
cached_start_min_[t] = smin;
cached_end_min_[t] = emin;
cached_negated_start_max_[t] = -smax;
@@ -444,14 +449,14 @@ void SchedulingConstraintHelper::InitSortedVectors() {
task_by_increasing_start_min_.resize(num_tasks);
task_by_increasing_end_min_.resize(num_tasks);
task_by_decreasing_start_max_.resize(num_tasks);
task_by_increasing_negated_start_max_.resize(num_tasks);
task_by_decreasing_end_max_.resize(num_tasks);
task_by_increasing_shifted_start_min_.resize(num_tasks);
task_by_negated_shifted_end_max_.resize(num_tasks);
for (int t = 0; t < num_tasks; ++t) {
task_by_increasing_start_min_[t].task_index = t;
task_by_increasing_end_min_[t].task_index = t;
task_by_decreasing_start_max_[t].task_index = t;
task_by_increasing_negated_start_max_[t].task_index = t;
task_by_decreasing_end_max_[t].task_index = t;
task_by_increasing_shifted_start_min_[t].task_index = t;
@@ -461,6 +466,8 @@ void SchedulingConstraintHelper::InitSortedVectors() {
task_by_negated_shifted_end_max_[t].presence_lit = reason_for_presence_[t];
}
recompute_by_start_max_ = true;
recompute_by_end_min_ = true;
recompute_energy_profile_ = true;
recompute_shifted_start_min_ = true;
recompute_negated_shifted_end_max_ = true;
@@ -474,7 +481,9 @@ void SchedulingConstraintHelper::SetTimeDirection(bool is_forward) {
std::swap(ends_, minus_starts_);
std::swap(task_by_increasing_start_min_, task_by_decreasing_end_max_);
std::swap(task_by_increasing_end_min_, task_by_decreasing_start_max_);
std::swap(task_by_increasing_end_min_,
task_by_increasing_negated_start_max_);
std::swap(recompute_by_end_min_, recompute_by_start_max_);
std::swap(task_by_increasing_shifted_start_min_,
task_by_negated_shifted_end_max_);
@@ -575,23 +584,26 @@ SchedulingConstraintHelper::TaskByIncreasingStartMin() {
absl::Span<const TaskTime>
SchedulingConstraintHelper::TaskByIncreasingEndMin() {
if (!recompute_by_end_min_) return task_by_increasing_end_min_;
for (TaskTime& ref : task_by_increasing_end_min_) {
ref.time = EndMin(ref.task_index);
}
IncrementalSort(task_by_increasing_end_min_.begin(),
task_by_increasing_end_min_.end());
recompute_by_end_min_ = false;
return task_by_increasing_end_min_;
}
absl::Span<const TaskTime>
SchedulingConstraintHelper::TaskByDecreasingStartMax() {
for (TaskTime& ref : task_by_decreasing_start_max_) {
ref.time = StartMax(ref.task_index);
SchedulingConstraintHelper::TaskByIncreasingNegatedStartMax() {
if (!recompute_by_start_max_) return task_by_increasing_negated_start_max_;
for (TaskTime& ref : task_by_increasing_negated_start_max_) {
ref.time = cached_negated_start_max_[ref.task_index];
}
IncrementalSort(task_by_decreasing_start_max_.begin(),
task_by_decreasing_start_max_.end(),
std::greater<TaskTime>());
return task_by_decreasing_start_max_;
IncrementalSort(task_by_increasing_negated_start_max_.begin(),
task_by_increasing_negated_start_max_.end());
recompute_by_start_max_ = false;
return task_by_increasing_negated_start_max_;
}
absl::Span<const TaskTime>

View File

@@ -401,10 +401,11 @@ class SchedulingConstraintHelper : public PropagatorInterface,
// TODO(user): we could merge the first loop of IncrementalSort() with the
// loop that fill TaskTime.time at each call.
absl::Span<const TaskTime> TaskByIncreasingStartMin();
absl::Span<const TaskTime> TaskByIncreasingEndMin();
absl::Span<const TaskTime> TaskByDecreasingStartMax();
absl::Span<const TaskTime> TaskByDecreasingEndMax();
absl::Span<const TaskTime> TaskByIncreasingNegatedStartMax();
absl::Span<const TaskTime> TaskByIncreasingEndMin();
absl::Span<const CachedTaskBounds> TaskByIncreasingShiftedStartMin();
// Returns a sorted vector where each task appear twice, the first occurrence
@@ -584,10 +585,13 @@ class SchedulingConstraintHelper : public PropagatorInterface,
// Sorted vectors returned by the TasksBy*() functions.
std::vector<TaskTime> task_by_increasing_start_min_;
std::vector<TaskTime> task_by_increasing_end_min_;
std::vector<TaskTime> task_by_decreasing_start_max_;
std::vector<TaskTime> task_by_decreasing_end_max_;
bool recompute_by_start_max_ = true;
bool recompute_by_end_min_ = true;
std::vector<TaskTime> task_by_increasing_negated_start_max_;
std::vector<TaskTime> task_by_increasing_end_min_;
// Sorted vector returned by GetEnergyProfile().
bool recompute_energy_profile_ = true;
std::vector<ProfileEvent> energy_profile_;

View File

@@ -14,19 +14,15 @@
#ifndef OR_TOOLS_SAT_LINEAR_CONSTRAINT_MANAGER_H_
#define OR_TOOLS_SAT_LINEAR_CONSTRAINT_MANAGER_H_
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/btree_map.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/strings/string_view.h"
#include "ortools/base/strong_vector.h"
#include "ortools/glop/revised_simplex.h"
#include "ortools/glop/variables_info.h"
#include "ortools/lp_data/lp_types.h"
#include "ortools/sat/integer.h"
@@ -183,9 +179,9 @@ class LinearConstraintManager {
return type_to_num_cuts_;
}
// If a debug solution has been loaded, this checks if the given constaint cut
// it or not. Returns true iff everything is fine and the cut does not violate
// the loaded solution.
// If a debug solution has been loaded, this checks if the given constraint
// cut it or not. Returns true if and only if everything is fine and the cut
// does not violate the loaded solution.
bool DebugCheckConstraint(const LinearConstraint& cut);
private:
@@ -193,7 +189,7 @@ class LinearConstraintManager {
// LP. Note that such constraints can be added back later by the heuristic
// responsible for adding new constraints from the pool.
//
// Returns true iff one or more constraints where removed.
// Returns true if and only if one or more constraints where removed.
//
// If the solutions_state is empty, then this function does nothing and
// returns false (this is used for tests). Otherwise, the solutions_state is

View File

@@ -201,8 +201,8 @@ LinearConstraint ScatteredIntegerVector::ConvertToLinearConstraint(
}
void ScatteredIntegerVector::ConvertToCutData(
absl::int128 rhs, const std::vector<IntegerVariable>& integer_variables,
const std::vector<double>& lp_solution, IntegerTrail* integer_trail,
absl::int128 rhs, absl::Span<const IntegerVariable> integer_variables,
absl::Span<const double> lp_solution, IntegerTrail* integer_trail,
CutData* result) {
result->terms.clear();
result->rhs = rhs;
@@ -981,7 +981,7 @@ bool LinearProgrammingConstraint::PreprocessCut(IntegerVariable first_slack,
bool LinearProgrammingConstraint::AddCutFromConstraints(
absl::string_view name,
const std::vector<std::pair<RowIndex, IntegerValue>>& integer_multipliers) {
absl::Span<const std::pair<RowIndex, IntegerValue>> integer_multipliers) {
// This is initialized to a valid linear constraint (by taking linear
// combination of the LP rows) and will be transformed into a cut if
// possible.
@@ -1881,7 +1881,7 @@ absl::int128 LinearProgrammingConstraint::GetImpliedLowerBound(
bool LinearProgrammingConstraint::ScalingCanOverflow(
int power, bool take_objective_into_account,
const std::vector<std::pair<glop::RowIndex, double>>& multipliers,
absl::Span<const std::pair<glop::RowIndex, double>> multipliers,
int64_t overflow_cap) const {
int64_t bound = 0;
const int64_t factor = int64_t{1} << power;

View File

@@ -88,8 +88,8 @@ class ScatteredIntegerVector {
std::nullopt);
void ConvertToCutData(absl::int128 rhs,
const std::vector<IntegerVariable>& integer_variables,
const std::vector<double>& lp_solution,
absl::Span<const IntegerVariable> integer_variables,
absl::Span<const double> lp_solution,
IntegerTrail* integer_trail, CutData* result);
// Similar to ConvertToLinearConstraint().
@@ -287,7 +287,7 @@ class LinearProgrammingConstraint : public PropagatorInterface,
// Return true if a new cut was added to the cut manager.
bool AddCutFromConstraints(
absl::string_view name,
const std::vector<std::pair<glop::RowIndex, IntegerValue>>&
absl::Span<const std::pair<glop::RowIndex, IntegerValue>>
integer_multipliers);
// Second half of AddCutFromConstraints().
@@ -337,7 +337,7 @@ class LinearProgrammingConstraint : public PropagatorInterface,
// std::round(std::ldexp(coeff, power)) ?
bool ScalingCanOverflow(
int power, bool take_objective_into_account,
const std::vector<std::pair<glop::RowIndex, double>>& multipliers,
absl::Span<const std::pair<glop::RowIndex, double>> multipliers,
int64_t overflow_cap) const;
// Computes from an integer linear combination of the integer rows of the LP a

View File

@@ -164,7 +164,7 @@ namespace {
//
// Precondition: var must be the only non-integer in the given constraint.
double GetIntegralityMultiplier(const MPModelProto& mp_model,
const std::vector<double>& var_scaling, int var,
absl::Span<const double> var_scaling, int var,
int ct_index, double tolerance) {
DCHECK(!mp_model.variable(var).is_integer());
const MPConstraintProto& ct = mp_model.constraint(ct_index);

View File

@@ -1039,7 +1039,8 @@ void SatSolver::Backtrack(int target_level) {
// that will cause some problems. Note that we could forbid a user to call
// Backtrack() with the current level, but that is annoying when you just
// want to reset the solver with Backtrack(0).
if (CurrentDecisionLevel() == target_level) return;
DCHECK(target_level == 0 || !Decisions().empty());
if (CurrentDecisionLevel() == target_level || Decisions().empty()) return;
DCHECK_GE(target_level, 0);
DCHECK_LE(target_level, CurrentDecisionLevel());

View File

@@ -416,7 +416,7 @@ bool TimeTablingPerTask::BuildProfile() {
const IntegerValue default_non_relevant_height =
has_demand_equal_to_capacity_ ? 1 : 0;
const auto& by_decreasing_start_max = helper_->TaskByDecreasingStartMax();
const auto& by_negated_start_max = helper_->TaskByIncreasingNegatedStartMax();
const auto& by_end_min = helper_->TaskByIncreasingEndMin();
// Next start/end of the compulsory parts to be processed. Note that only the
@@ -427,13 +427,12 @@ bool TimeTablingPerTask::BuildProfile() {
while (next_end < num_tasks) {
IntegerValue time = by_end_min[next_end].time;
if (next_start >= 0) {
time = std::min(time, by_decreasing_start_max[next_start].time);
time = std::min(time, -by_negated_start_max[next_start].time);
}
// Process the starting compulsory parts.
while (next_start >= 0 &&
by_decreasing_start_max[next_start].time == time) {
const int t = by_decreasing_start_max[next_start].task_index;
while (next_start >= 0 && -by_negated_start_max[next_start].time == time) {
const int t = by_negated_start_max[next_start].task_index;
current_height += demands_min[t];
--next_start;
}

View File

@@ -68,12 +68,12 @@ void TimeTableEdgeFinding::BuildTimeTable() {
ecp_.clear();
// Build start of compulsory part events.
const auto by_decreasing_start_max = helper_->TaskByDecreasingStartMax();
for (const auto task_time : ::gtl::reversed_view(by_decreasing_start_max)) {
const int t = task_time.task_index;
const auto by_negated_smax = helper_->TaskByIncreasingNegatedStartMax();
for (const auto [t, negated_smax] : ::gtl::reversed_view(by_negated_smax)) {
if (!helper_->IsPresent(t)) continue;
if (task_time.time < helper_->EndMin(t)) {
scp_.push_back(task_time);
const IntegerValue start_max = -negated_smax;
if (start_max < helper_->EndMin(t)) {
scp_.push_back({t, start_max});
}
}

View File

@@ -122,7 +122,7 @@ void ZeroHalfCutHelper::AddOneConstraint(const glop::RowIndex row,
}
}
void ZeroHalfCutHelper::SymmetricDifference(const std::vector<int>& a,
void ZeroHalfCutHelper::SymmetricDifference(absl::Span<const int> a,
std::vector<int>* b) {
for (const int v : *b) tmp_marked_[v] = true;
for (const int v : a) {

View File

@@ -92,7 +92,7 @@ class ZeroHalfCutHelper {
// Like std::set_symmetric_difference, but use a vector<bool> instead of sort.
// This assumes tmp_marked_ to be all false. We don't DCHECK it here for
// speed, but it DCHECKed on each EliminateVarUsingRow() call.
void SymmetricDifference(const std::vector<int>& a, std::vector<int>* b);
void SymmetricDifference(absl::Span<const int> a, std::vector<int>* b);
private:
// As we combine rows, when the activity of a combination get too far away