diff --git a/ortools/sat/BUILD.bazel b/ortools/sat/BUILD.bazel index ce8d2c0d87..b0f01e2ec8 100644 --- a/ortools/sat/BUILD.bazel +++ b/ortools/sat/BUILD.bazel @@ -1121,10 +1121,12 @@ cc_library( "//ortools/util:strong_integers", "//ortools/util:time_limit", "@com_google_absl//absl/container:flat_hash_set", + "@com_google_absl//absl/log", "@com_google_absl//absl/log:check", "@com_google_absl//absl/meta:type_traits", "@com_google_absl//absl/random:distributions", "@com_google_absl//absl/strings", + "@com_google_absl//absl/strings:str_format", "@com_google_absl//absl/types:span", ], ) diff --git a/ortools/sat/cp_model_search.cc b/ortools/sat/cp_model_search.cc index 10c77ba015..371f2fa482 100644 --- a/ortools/sat/cp_model_search.cc +++ b/ortools/sat/cp_model_search.cc @@ -24,7 +24,6 @@ #include "absl/container/flat_hash_map.h" #include "absl/container/flat_hash_set.h" -#include "absl/flags/flag.h" #include "absl/log/check.h" #include "absl/random/distributions.h" #include "absl/strings/str_cat.h" @@ -43,10 +42,6 @@ #include "ortools/sat/util.h" #include "ortools/util/strong_integers.h" -// TODO(user): remove this when the code is stable and does not use SCIP -// anymore. -ABSL_FLAG(bool, cp_model_use_max_hs, false, "Use max_hs in search portfolio."); - namespace operations_research { namespace sat { @@ -649,6 +644,14 @@ absl::flat_hash_map GetNamedParameters( new_params.set_use_dynamic_precedence_in_disjunctive(false); new_params.set_use_dynamic_precedence_in_cumulative(false); strategies["fixed"] = new_params; + + new_params.set_linearization_level(0); + strategies["fixed_no_lp"] = new_params; + + new_params.set_linearization_level(2); + new_params.set_add_lp_constraints_lazily(false); + new_params.set_root_lp_iterations(100'000); + strategies["fixed_max_lp"] = new_params; } // Quick restart. @@ -803,10 +806,6 @@ std::vector GetFullWorkerParameters( names.push_back("probing_no_lp"); names.push_back("objective_lb_search_no_lp"); names.push_back("objective_lb_search_max_lp"); - -#if !defined(__PORTABLE_PLATFORM__) && defined(USE_SCIP) - if (absl::GetFlag(FLAGS_cp_model_use_max_hs)) names.push_back("max_hs"); -#endif // !defined(__PORTABLE_PLATFORM__) && defined(USE_SCIP) } else { for (const std::string& name : base_params.subsolvers()) { // Hack for flatzinc. At the time of parameter setting, the objective is diff --git a/ortools/sat/cp_model_symmetries.cc b/ortools/sat/cp_model_symmetries.cc index 1977fef19c..1c53e17e9e 100644 --- a/ortools/sat/cp_model_symmetries.cc +++ b/ortools/sat/cp_model_symmetries.cc @@ -850,7 +850,7 @@ void OrbitAndPropagation(absl::Span orbits, int var, } std::vector BuildInequalityCoeffsForOrbitope( - const std::vector& maximum_values, int64_t max_linear_size, + absl::Span maximum_values, int64_t max_linear_size, bool* is_approximated) { std::vector out(maximum_values.size()); int64_t range_product = 1; @@ -872,8 +872,8 @@ std::vector BuildInequalityCoeffsForOrbitope( *is_approximated = true; const auto compute_approximate_coeffs = - [max_linear_size, &maximum_values](double scaling_factor, - std::vector* coeffs) -> bool { + [max_linear_size, maximum_values](double scaling_factor, + std::vector* coeffs) -> bool { int64_t max_size = 0; double cumulative_product_double = 1.0; for (int i = 0; i < maximum_values.size(); ++i) { diff --git a/ortools/sat/diffn.cc b/ortools/sat/diffn.cc index e4248c08d4..7faf54896a 100644 --- a/ortools/sat/diffn.cc +++ b/ortools/sat/diffn.cc @@ -704,7 +704,7 @@ bool NonOverlappingRectanglesDisjunctivePropagator:: // Compute relevant boxes, the one with a mandatory part of y. Because we will // need to sort it this way, we consider them by increasing start max. indexed_boxes_.clear(); - const auto temp = y->TaskByDecreasingStartMax(); + const auto temp = y->TaskByIncreasingNegatedStartMax(); for (int i = temp.size(); --i >= 0;) { const int box = temp[i].task_index; // Ignore absent boxes. @@ -718,7 +718,7 @@ bool NonOverlappingRectanglesDisjunctivePropagator:: continue; } - const IntegerValue start_max = temp[i].time; + const IntegerValue start_max = -temp[i].time; const IntegerValue end_min = y->EndMin(box); if (start_max < end_min) { indexed_boxes_.push_back({box, start_max, end_min}); diff --git a/ortools/sat/disjunctive.cc b/ortools/sat/disjunctive.cc index fa9703992b..b28c5ddbc8 100644 --- a/ortools/sat/disjunctive.cc +++ b/ortools/sat/disjunctive.cc @@ -343,8 +343,8 @@ template bool CombinedDisjunctive::Propagate() { if (!helper_->SynchronizeAndSetTimeDirection(time_direction)) return false; const auto& task_by_increasing_end_min = helper_->TaskByIncreasingEndMin(); - const auto& task_by_decreasing_start_max = - helper_->TaskByDecreasingStartMax(); + const auto& task_by_negated_start_max = + helper_->TaskByIncreasingNegatedStartMax(); for (auto& task_set : task_sets_) task_set.Clear(); end_mins_.assign(end_mins_.size(), kMinIntegerValue); @@ -360,9 +360,9 @@ bool CombinedDisjunctive::Propagate() { // Update all task sets. while (queue_index >= 0) { - const auto to_insert = task_by_decreasing_start_max[queue_index]; + const auto to_insert = task_by_negated_start_max[queue_index]; const int task_index = to_insert.task_index; - const IntegerValue start_max = to_insert.time; + const IntegerValue start_max = -to_insert.time; if (end_min <= start_max) break; if (helper_->IsPresent(task_index)) { task_is_added_[task_index] = true; @@ -749,8 +749,8 @@ bool DisjunctiveSimplePrecedences::Push(TaskTime before, int t) { bool DisjunctiveSimplePrecedences::PropagateOneDirection() { // We will loop in a decreasing way here. // And add tasks that are present to the task_set_. - absl::Span task_by_decreasing_start_max = - helper_->TaskByDecreasingStartMax(); + absl::Span task_by_negated_start_max = + helper_->TaskByIncreasingNegatedStartMax(); // We just keep amongst all the task before current_end_min, the one with the // highesh end-min. @@ -771,15 +771,16 @@ bool DisjunctiveSimplePrecedences::PropagateOneDirection() { int blocking_task = -1; IntegerValue blocking_start_max; IntegerValue current_end_min = task_by_increasing_end_min.front().time; - for (; true; task_by_decreasing_start_max.remove_suffix(1)) { - if (task_by_decreasing_start_max.empty()) { + for (; true; task_by_negated_start_max.remove_suffix(1)) { + if (task_by_negated_start_max.empty()) { // Small optim: this allows to process all remaining task rather than // looping around are retesting this for all remaining tasks. current_end_min = kMaxIntegerValue; break; } - const auto [t, start_max] = task_by_decreasing_start_max.back(); + const auto [t, negated_start_max] = task_by_negated_start_max.back(); + const IntegerValue start_max = -negated_start_max; if (current_end_min <= start_max) break; if (!helper_->IsPresent(t)) continue; @@ -850,59 +851,30 @@ bool DisjunctiveDetectablePrecedences::Propagate() { return false; } - // TODO(user): cache this more. - // We will "consume" tasks from here across PropagateSubwindow() calls. - task_by_decreasing_start_max_ = helper_->TaskByDecreasingStartMax(); - - // Split problem into independent part. - // - // The "independent" window can be processed separately because for each of - // them, a task [start-min, end-min] is in the window [window_start, - // window_end]. So any task to the left of the window cannot push such - // task start_min, and any task to the right of the window will have a - // start_max >= end_min, so wouldn't be in detectable precedence. + // Compute the "rank" of each task. + const auto by_shifted_smin = helper_->TaskByIncreasingShiftedStartMin(); + int rank = -1; IntegerValue window_end = kMinIntegerValue; - IntegerValue min_start_min = kMinIntegerValue; - IntegerValue max_end_min = kMinIntegerValue; - for (const TaskTime task_time : helper_->TaskByIncreasingStartMin()) { - const int task = task_time.task_index; - if (helper_->IsAbsent(task)) continue; - - // Note that the helper returns value assuming the task is present. - const IntegerValue start_min = task_time.time; - const IntegerValue end_min = helper_->EndMin(task); - - if (start_min < window_end) { - const IntegerValue size_min = helper_->SizeMin(task); - DCHECK_GE(end_min, start_min + size_min); - - task_by_increasing_end_min_.push_back({task, end_min}); - max_end_min = std::max(max_end_min, end_min); - window_end = std::max(window_end, start_min) + size_min; + for (const auto [task, presence_lit, start_min] : by_shifted_smin) { + if (!helper_->IsPresent(presence_lit)) { + ranks_[task] = -1; continue; } - // Process current window. - if (task_by_increasing_end_min_.size() > 1 && - !PropagateSubwindow(min_start_min, max_end_min)) { - ++stats_.num_conflicts; - return false; + const IntegerValue size_min = helper_->SizeMin(task); + if (start_min < window_end) { + ranks_[task] = rank; + window_end += size_min; + } else { + ranks_[task] = ++rank; + window_end = start_min + size_min; } - - // Start of the next window. - task_by_increasing_end_min_.clear(); - task_by_increasing_end_min_.push_back({task, end_min}); - min_start_min = start_min; - max_end_min = end_min; - window_end = end_min; } - if (task_by_increasing_end_min_.size() > 1 && - !PropagateSubwindow(min_start_min, max_end_min)) { + if (!PropagateWithRanks()) { ++stats_.num_conflicts; return false; } - stats_.EndWithoutConflicts(); return true; } @@ -996,35 +968,18 @@ bool DisjunctiveDetectablePrecedences::Push(IntegerValue task_set_end_min, return true; } -bool DisjunctiveDetectablePrecedences::PropagateSubwindow( - const IntegerValue min_start_min, const IntegerValue max_end_min) { - DCHECK(!task_by_increasing_end_min_.empty()); - - IntegerValue first_start_max = kMaxIntegerValue; - for (; !task_by_decreasing_start_max_.empty(); - task_by_decreasing_start_max_.remove_suffix(1)) { - const auto [t, start_max] = task_by_decreasing_start_max_.back(); - if (!helper_->IsPresent(t)) continue; - - // Because of how the window was constructed, this task cannot push - // anything. - if (helper_->EndMin(t) <= min_start_min) continue; - first_start_max = start_max; - break; - } - - // No work to do for this window. - if (first_start_max >= max_end_min) return true; - - // The vector is already sorted by shifted_start_min, so there is likely a - // good correlation, hence the incremental sort. - IncrementalSort(task_by_increasing_end_min_.begin(), - task_by_increasing_end_min_.end()); - DCHECK_EQ(max_end_min, task_by_increasing_end_min_.back().time); +bool DisjunctiveDetectablePrecedences::PropagateWithRanks() { + // We will "consume" tasks from here. absl::Span task_by_increasing_end_min = - absl::MakeSpan(task_by_increasing_end_min_); + helper_->TaskByIncreasingEndMin(); + absl::Span task_by_negated_start_max = + helper_->TaskByIncreasingNegatedStartMax(); - // We will loop in an increasing way here and consume task from beginning. + // We will stop using ranks as soon as we propagated something. This allow to + // be sure this propagate as much as possible in a single pass and seems to + // help slightly. + int highest_rank = 0; + bool some_propagation = false; // Invariant: need_update is false implies that task_set_end_min is equal to // task_set_.ComputeEndMin(). @@ -1032,24 +987,30 @@ bool DisjunctiveDetectablePrecedences::PropagateSubwindow( // TODO(user): Maybe it is just faster to merge ComputeEndMin() with // AddEntry(). task_set_.Clear(); - bool need_update = false; + to_add_.clear(); IntegerValue task_set_end_min = kMinIntegerValue; for (; !task_by_increasing_end_min.empty();) { // Consider all task with a start_max < current_end_min. int blocking_task = -1; IntegerValue blocking_start_max; IntegerValue current_end_min = task_by_increasing_end_min.front().time; - for (; true; task_by_decreasing_start_max_.remove_suffix(1)) { - if (task_by_decreasing_start_max_.empty()) { + + for (; true; task_by_negated_start_max.remove_suffix(1)) { + if (task_by_negated_start_max.empty()) { // Small optim: this allows to process all remaining task rather than // looping around are retesting this for all remaining tasks. current_end_min = kMaxIntegerValue; break; } - const auto [t, start_max] = task_by_decreasing_start_max_.back(); + const auto [t, negated_start_max] = task_by_negated_start_max.back(); + const IntegerValue start_max = -negated_start_max; if (current_end_min <= start_max) break; - if (!helper_->IsPresent(t)) continue; + + // If the task is not present, its rank will be -1. + const int rank = ranks_[t]; + if (rank < highest_rank) continue; + DCHECK(helper_->IsPresent(t)); // If t has a mandatory part, and extend further than current_end_min // then we can push it first. All tasks for which their push is delayed @@ -1073,20 +1034,29 @@ bool DisjunctiveDetectablePrecedences::PropagateSubwindow( helper_->AddReasonForBeingBefore(t, blocking_task); return helper_->ReportConflict(); } else { - need_update = true; - task_set_.AddShiftedStartMinEntry(*helper_, t); + if (!some_propagation && rank > highest_rank) { + to_add_.clear(); + task_set_.Clear(); + highest_rank = rank; + } + to_add_.push_back(t); } } // If we have a blocking task. We need to propagate it first. if (blocking_task != -1) { DCHECK(!helper_->IsAbsent(blocking_task)); - if (need_update) { - need_update = false; + + if (!to_add_.empty()) { + for (const int t : to_add_) { + task_set_.AddShiftedStartMinEntry(*helper_, t); + } + to_add_.clear(); task_set_end_min = task_set_.ComputeEndMin(); } if (task_set_end_min > helper_->StartMin(blocking_task)) { + some_propagation = true; if (!Push(task_set_end_min, blocking_task)) return false; } @@ -1099,8 +1069,12 @@ bool DisjunctiveDetectablePrecedences::PropagateSubwindow( // any of the subsequent tasks below. In particular, the reason will be // valid even though task_set might contains tasks with a start_max // greater or equal to the end_min of the task we push. - need_update = true; - task_set_.AddShiftedStartMinEntry(*helper_, blocking_task); + if (!some_propagation && ranks_[blocking_task] > highest_rank) { + to_add_.clear(); + task_set_.Clear(); + highest_rank = ranks_[blocking_task]; + } + to_add_.push_back(blocking_task); } // Lets propagate all task with end_min <= current_end_min that are also @@ -1111,8 +1085,11 @@ bool DisjunctiveDetectablePrecedences::PropagateSubwindow( if (end_min > current_end_min) break; if (t == blocking_task) continue; // Already done. - if (need_update) { - need_update = false; + if (!to_add_.empty()) { + for (const int t : to_add_) { + task_set_.AddShiftedStartMinEntry(*helper_, t); + } + to_add_.clear(); task_set_end_min = task_set_.ComputeEndMin(); } @@ -1120,6 +1097,8 @@ bool DisjunctiveDetectablePrecedences::PropagateSubwindow( if (task_set_end_min > helper_->StartMin(t)) { // Corner case if a previous push caused a subsequent task to be absent. if (helper_->IsAbsent(t)) continue; + + some_propagation = true; if (!Push(task_set_end_min, t)) return false; } } @@ -1347,7 +1326,8 @@ bool DisjunctiveNotLast::Propagate() { return false; } - const auto task_by_decreasing_start_max = helper_->TaskByDecreasingStartMax(); + const auto task_by_negated_start_max = + helper_->TaskByIncreasingNegatedStartMax(); const auto task_by_increasing_shifted_start_min = helper_->TaskByIncreasingShiftedStartMin(); @@ -1362,7 +1342,7 @@ bool DisjunctiveNotLast::Propagate() { // looking at the task in the first window. Tasks to the left do not cause // issue for the task to be last, and tasks to the right will not lower the // end-min of the task under consideration. - int queue_index = task_by_decreasing_start_max.size() - 1; + int queue_index = task_by_negated_start_max.size() - 1; const int num_tasks = task_by_increasing_shifted_start_min.size(); for (int i = 0; i < num_tasks;) { start_min_window_.clear(); @@ -1387,12 +1367,14 @@ bool DisjunctiveNotLast::Propagate() { // fall into [window_start, window_end). start_max_window_.clear(); for (; queue_index >= 0; queue_index--) { - const auto task_time = task_by_decreasing_start_max[queue_index]; + const auto [t, negated_start_max] = + task_by_negated_start_max[queue_index]; + const IntegerValue start_max = -negated_start_max; // Note that we add task whose presence is still unknown here. - if (task_time.time >= window_end) break; - if (helper_->IsAbsent(task_time.task_index)) continue; - start_max_window_.push_back(task_time); + if (start_max >= window_end) break; + if (helper_->IsAbsent(t)) continue; + start_max_window_.push_back({t, start_max}); } // If this is the case, we cannot propagate more than the detectable diff --git a/ortools/sat/disjunctive.h b/ortools/sat/disjunctive.h index c5b7ec5582..bf3e0a7bfa 100644 --- a/ortools/sat/disjunctive.h +++ b/ortools/sat/disjunctive.h @@ -234,17 +234,18 @@ class DisjunctiveDetectablePrecedences : public PropagatorInterface { helper_(helper), task_set_(helper->NumTasks()), stats_("DisjunctiveDetectablePrecedences", model) { - task_by_increasing_end_min_.ClearAndReserve(helper->NumTasks()); + ranks_.resize(helper->NumTasks()); + to_add_.ClearAndReserve(helper->NumTasks()); } bool Propagate() final; int RegisterWith(GenericLiteralWatcher* watcher); private: - bool PropagateSubwindow(IntegerValue min_start_min, IntegerValue max_end_min); + bool PropagateWithRanks(); bool Push(IntegerValue task_set_end_min, int t); - FixedCapacityVector task_by_increasing_end_min_; - absl::Span task_by_decreasing_start_max_; + FixedCapacityVector to_add_; + std::vector ranks_; const bool time_direction_; SchedulingConstraintHelper* helper_; diff --git a/ortools/sat/integer_search.cc b/ortools/sat/integer_search.cc index e924dda7bd..92dadbd73b 100644 --- a/ortools/sat/integer_search.cc +++ b/ortools/sat/integer_search.cc @@ -17,7 +17,9 @@ #include #include #include +#include #include +#include #include #include @@ -26,6 +28,7 @@ #include "absl/meta/type_traits.h" #include "absl/random/distributions.h" #include "absl/strings/str_cat.h" +#include "absl/strings/str_format.h" #include "absl/types/span.h" #include "ortools/base/logging.h" #include "ortools/sat/clause.h" @@ -169,7 +172,7 @@ IntegerLiteral SplitUsingBestSolutionValueInRepository( std::function FirstUnassignedVarAtItsMinHeuristic( const std::vector& vars, Model* model) { auto* integer_trail = model->GetOrCreate(); - return [/*copy*/ vars, integer_trail]() { + return [/*copy*/ vars = vars, integer_trail]() { for (const IntegerVariable var : vars) { const IntegerLiteral decision = AtMinValue(var, integer_trail); if (decision.IsValid()) return BooleanOrIntegerLiteral(decision); @@ -254,7 +257,7 @@ std::function LpPseudoCostHeuristic(Model* model) { if (info.score > best_score) { best_score = info.score; - // This direction works better than the inverse in the benchs. But + // This direction works better than the inverse in the benchmarks. But // always branching up seems even better. TODO(user): investigate. if (info.down_score > info.up_score) { decision = BooleanOrIntegerLiteral(info.down_branch); @@ -1022,8 +1025,8 @@ std::function RandomizeOnRestartHeuristic( // Special case: Don't change the decision value. value_selection_weight.push_back(10); - // TODO(user): These distribution values are just guessed values. They - // need to be tuned. + // TODO(user): These distribution values are just guessed values. + // They need to be tuned. std::discrete_distribution val_dist(value_selection_weight.begin(), value_selection_weight.end()); @@ -1114,9 +1117,9 @@ std::function FollowHint( auto* integer_trail = model->GetOrCreate(); auto* rev_int_repo = model->GetOrCreate(); - // This is not ideal as we reserve an int for the full duration of the model - // even if we use this FollowHint() function just for a while. But it is - // an easy solution to not have reference to deleted memory in the + // This is not ideal as we reserve an int for the full duration of the + // model even if we use this FollowHint() function just for a while. But + // it is an easy solution to not have reference to deleted memory in the // RevIntRepository(). Note that once we backtrack, these reference will // disappear. int* rev_start_index = model->TakeOwnership(new int); @@ -1305,7 +1308,7 @@ void ConfigureSearchHeuristics(Model* model) { } std::vector> CompleteHeuristics( - const std::vector>& + absl::Span> incomplete_heuristics, const std::function& completion_heuristic) { std::vector> complete_heuristics; @@ -1456,15 +1459,15 @@ SatSolver::Status IntegerSearchHelper::SolveIntegerProblem() { CHECK_EQ(num_policies, heuristics.restart_policies.size()); // Note that it is important to do the level-zero propagation if it wasn't - // already done because EnqueueDecisionAndBackjumpOnConflict() assumes that - // the solver is in a "propagated" state. + // already done because EnqueueDecisionAndBackjumpOnConflict() assumes + // that the solver is in a "propagated" state. // - // TODO(user): We have the issue that at level zero. calling the propagation - // loop more than once can propagate more! This is because we call the LP - // again and again on each level zero propagation. This is causing some - // CHECKs() to fail in multithread (rarely) because when we associate new - // literals to integer ones, Propagate() is indirectly called. Not sure yet - // how to fix. + // TODO(user): We have the issue that at level zero. calling the + // propagation loop more than once can propagate more! This is because we + // call the LP again and again on each level zero propagation. This is + // causing some CHECKs() to fail in multithread (rarely) because when we + // associate new literals to integer ones, Propagate() is indirectly + // called. Not sure yet how to fix. if (!sat_solver_->FinishPropagation()) return sat_solver_->UnsatStatus(); // Main search loop. @@ -1508,20 +1511,21 @@ SatSolver::Status IntegerSearchHelper::SolveIntegerProblem() { // No decision means that we reached a leave of the search tree and that // we have a feasible solution. // - // Tricky: If the time limit is reached during the final propagation when - // all variables are fixed, there is no guarantee that the propagation - // responsible for testing the validity of the solution was run to - // completion. So we cannot report a feasible solution. + // Tricky: If the time limit is reached during the final propagation + // when all variables are fixed, there is no guarantee that the + // propagation responsible for testing the validity of the solution was + // run to completion. So we cannot report a feasible solution. if (time_limit_->LimitReached()) return SatSolver::LIMIT_REACHED; if (decision == kNoLiteralIndex) { - // Save the current polarity of all Booleans in the solution. It will be - // followed for the next SAT decisions. This is known to be a good policy - // for optimization problem. Note that for decision problem we don't care - // since we are just done as soon as a solution is found. + // Save the current polarity of all Booleans in the solution. It will + // be followed for the next SAT decisions. This is known to be a good + // policy for optimization problem. Note that for decision problem we + // don't care since we are just done as soon as a solution is found. // // This idea is kind of "well known", see for instance the "LinSBPS" - // submission to the maxSAT 2018 competition by Emir Demirovic and Peter - // Stuckey where they show it is a good idea and provide more references. + // submission to the maxSAT 2018 competition by Emir Demirovic and + // Peter Stuckey where they show it is a good idea and provide more + // references. if (parameters_.use_optimization_hints()) { auto* sat_decision = model_->GetOrCreate(); const auto& trail = *model_->GetOrCreate(); @@ -1536,9 +1540,9 @@ SatSolver::Status IntegerSearchHelper::SolveIntegerProblem() { return sat_solver_->UnsatStatus(); } - // In multi-thread, we really only want to save the LP relaxation for thread - // with high linearization level to avoid to pollute the repository with - // sub-par lp solutions. + // In multi-thread, we really only want to save the LP relaxation for + // thread with high linearization level to avoid to pollute the + // repository with sub-par lp solutions. // // TODO(user): Experiment more around dynamically changing the // threshold for storing LP solutions in the pool. Alternatively expose @@ -1552,9 +1556,10 @@ SatSolver::Status IntegerSearchHelper::SolveIntegerProblem() { parameters_.linearization_level() >= 2) { num_decisions_since_last_lp_record_++; if (num_decisions_since_last_lp_record_ >= 100) { - // NOTE: We can actually record LP solutions more frequently. However - // this process is time consuming and workers waste a lot of time doing - // this. To avoid this we don't record solutions after each decision. + // NOTE: We can actually record LP solutions more frequently. + // However this process is time consuming and workers waste a lot of + // time doing this. To avoid this we don't record solutions after + // each decision. RecordLPRelaxationValues(model_); num_decisions_since_last_lp_record_ = 0; } diff --git a/ortools/sat/integer_search.h b/ortools/sat/integer_search.h index d4722d8e59..51e608ef1a 100644 --- a/ortools/sat/integer_search.h +++ b/ortools/sat/integer_search.h @@ -261,7 +261,7 @@ std::function SatSolverRestartPolicy(Model* model); // Concatenates each input_heuristic with a default heuristic that instantiate // all the problem's Boolean variables, into a new vector. std::vector> CompleteHeuristics( - const std::vector>& + absl::Span> incomplete_heuristics, const std::function& completion_heuristic); diff --git a/ortools/sat/intervals.cc b/ortools/sat/intervals.cc index 08ead7a3c0..c61610b727 100644 --- a/ortools/sat/intervals.cc +++ b/ortools/sat/intervals.cc @@ -382,6 +382,11 @@ bool SchedulingConstraintHelper::UpdateCachedValues(int t) { recompute_energy_profile_ = true; } + // We might only want to do that if the value changed, but I am not sure it + // is worth the test. + recompute_by_start_max_ = true; + recompute_by_end_min_ = true; + cached_start_min_[t] = smin; cached_end_min_[t] = emin; cached_negated_start_max_[t] = -smax; @@ -444,14 +449,14 @@ void SchedulingConstraintHelper::InitSortedVectors() { task_by_increasing_start_min_.resize(num_tasks); task_by_increasing_end_min_.resize(num_tasks); - task_by_decreasing_start_max_.resize(num_tasks); + task_by_increasing_negated_start_max_.resize(num_tasks); task_by_decreasing_end_max_.resize(num_tasks); task_by_increasing_shifted_start_min_.resize(num_tasks); task_by_negated_shifted_end_max_.resize(num_tasks); for (int t = 0; t < num_tasks; ++t) { task_by_increasing_start_min_[t].task_index = t; task_by_increasing_end_min_[t].task_index = t; - task_by_decreasing_start_max_[t].task_index = t; + task_by_increasing_negated_start_max_[t].task_index = t; task_by_decreasing_end_max_[t].task_index = t; task_by_increasing_shifted_start_min_[t].task_index = t; @@ -461,6 +466,8 @@ void SchedulingConstraintHelper::InitSortedVectors() { task_by_negated_shifted_end_max_[t].presence_lit = reason_for_presence_[t]; } + recompute_by_start_max_ = true; + recompute_by_end_min_ = true; recompute_energy_profile_ = true; recompute_shifted_start_min_ = true; recompute_negated_shifted_end_max_ = true; @@ -474,7 +481,9 @@ void SchedulingConstraintHelper::SetTimeDirection(bool is_forward) { std::swap(ends_, minus_starts_); std::swap(task_by_increasing_start_min_, task_by_decreasing_end_max_); - std::swap(task_by_increasing_end_min_, task_by_decreasing_start_max_); + std::swap(task_by_increasing_end_min_, + task_by_increasing_negated_start_max_); + std::swap(recompute_by_end_min_, recompute_by_start_max_); std::swap(task_by_increasing_shifted_start_min_, task_by_negated_shifted_end_max_); @@ -575,23 +584,26 @@ SchedulingConstraintHelper::TaskByIncreasingStartMin() { absl::Span SchedulingConstraintHelper::TaskByIncreasingEndMin() { + if (!recompute_by_end_min_) return task_by_increasing_end_min_; for (TaskTime& ref : task_by_increasing_end_min_) { ref.time = EndMin(ref.task_index); } IncrementalSort(task_by_increasing_end_min_.begin(), task_by_increasing_end_min_.end()); + recompute_by_end_min_ = false; return task_by_increasing_end_min_; } absl::Span -SchedulingConstraintHelper::TaskByDecreasingStartMax() { - for (TaskTime& ref : task_by_decreasing_start_max_) { - ref.time = StartMax(ref.task_index); +SchedulingConstraintHelper::TaskByIncreasingNegatedStartMax() { + if (!recompute_by_start_max_) return task_by_increasing_negated_start_max_; + for (TaskTime& ref : task_by_increasing_negated_start_max_) { + ref.time = cached_negated_start_max_[ref.task_index]; } - IncrementalSort(task_by_decreasing_start_max_.begin(), - task_by_decreasing_start_max_.end(), - std::greater()); - return task_by_decreasing_start_max_; + IncrementalSort(task_by_increasing_negated_start_max_.begin(), + task_by_increasing_negated_start_max_.end()); + recompute_by_start_max_ = false; + return task_by_increasing_negated_start_max_; } absl::Span diff --git a/ortools/sat/intervals.h b/ortools/sat/intervals.h index 294ca8254f..31869bbe8c 100644 --- a/ortools/sat/intervals.h +++ b/ortools/sat/intervals.h @@ -401,10 +401,11 @@ class SchedulingConstraintHelper : public PropagatorInterface, // TODO(user): we could merge the first loop of IncrementalSort() with the // loop that fill TaskTime.time at each call. absl::Span TaskByIncreasingStartMin(); - absl::Span TaskByIncreasingEndMin(); - absl::Span TaskByDecreasingStartMax(); absl::Span TaskByDecreasingEndMax(); + absl::Span TaskByIncreasingNegatedStartMax(); + absl::Span TaskByIncreasingEndMin(); + absl::Span TaskByIncreasingShiftedStartMin(); // Returns a sorted vector where each task appear twice, the first occurrence @@ -584,10 +585,13 @@ class SchedulingConstraintHelper : public PropagatorInterface, // Sorted vectors returned by the TasksBy*() functions. std::vector task_by_increasing_start_min_; - std::vector task_by_increasing_end_min_; - std::vector task_by_decreasing_start_max_; std::vector task_by_decreasing_end_max_; + bool recompute_by_start_max_ = true; + bool recompute_by_end_min_ = true; + std::vector task_by_increasing_negated_start_max_; + std::vector task_by_increasing_end_min_; + // Sorted vector returned by GetEnergyProfile(). bool recompute_energy_profile_ = true; std::vector energy_profile_; diff --git a/ortools/sat/linear_constraint_manager.h b/ortools/sat/linear_constraint_manager.h index c823f8f210..eb39b52c3b 100644 --- a/ortools/sat/linear_constraint_manager.h +++ b/ortools/sat/linear_constraint_manager.h @@ -14,19 +14,15 @@ #ifndef OR_TOOLS_SAT_LINEAR_CONSTRAINT_MANAGER_H_ #define OR_TOOLS_SAT_LINEAR_CONSTRAINT_MANAGER_H_ -#include #include #include #include -#include #include #include "absl/container/btree_map.h" #include "absl/container/flat_hash_map.h" -#include "absl/container/flat_hash_set.h" #include "absl/strings/string_view.h" #include "ortools/base/strong_vector.h" -#include "ortools/glop/revised_simplex.h" #include "ortools/glop/variables_info.h" #include "ortools/lp_data/lp_types.h" #include "ortools/sat/integer.h" @@ -183,9 +179,9 @@ class LinearConstraintManager { return type_to_num_cuts_; } - // If a debug solution has been loaded, this checks if the given constaint cut - // it or not. Returns true iff everything is fine and the cut does not violate - // the loaded solution. + // If a debug solution has been loaded, this checks if the given constraint + // cut it or not. Returns true if and only if everything is fine and the cut + // does not violate the loaded solution. bool DebugCheckConstraint(const LinearConstraint& cut); private: @@ -193,7 +189,7 @@ class LinearConstraintManager { // LP. Note that such constraints can be added back later by the heuristic // responsible for adding new constraints from the pool. // - // Returns true iff one or more constraints where removed. + // Returns true if and only if one or more constraints where removed. // // If the solutions_state is empty, then this function does nothing and // returns false (this is used for tests). Otherwise, the solutions_state is diff --git a/ortools/sat/linear_programming_constraint.cc b/ortools/sat/linear_programming_constraint.cc index 64ed3d7f90..2d0ffd9d2e 100644 --- a/ortools/sat/linear_programming_constraint.cc +++ b/ortools/sat/linear_programming_constraint.cc @@ -201,8 +201,8 @@ LinearConstraint ScatteredIntegerVector::ConvertToLinearConstraint( } void ScatteredIntegerVector::ConvertToCutData( - absl::int128 rhs, const std::vector& integer_variables, - const std::vector& lp_solution, IntegerTrail* integer_trail, + absl::int128 rhs, absl::Span integer_variables, + absl::Span lp_solution, IntegerTrail* integer_trail, CutData* result) { result->terms.clear(); result->rhs = rhs; @@ -981,7 +981,7 @@ bool LinearProgrammingConstraint::PreprocessCut(IntegerVariable first_slack, bool LinearProgrammingConstraint::AddCutFromConstraints( absl::string_view name, - const std::vector>& integer_multipliers) { + absl::Span> integer_multipliers) { // This is initialized to a valid linear constraint (by taking linear // combination of the LP rows) and will be transformed into a cut if // possible. @@ -1881,7 +1881,7 @@ absl::int128 LinearProgrammingConstraint::GetImpliedLowerBound( bool LinearProgrammingConstraint::ScalingCanOverflow( int power, bool take_objective_into_account, - const std::vector>& multipliers, + absl::Span> multipliers, int64_t overflow_cap) const { int64_t bound = 0; const int64_t factor = int64_t{1} << power; diff --git a/ortools/sat/linear_programming_constraint.h b/ortools/sat/linear_programming_constraint.h index 6f4866ec22..cc6e5c2cc5 100644 --- a/ortools/sat/linear_programming_constraint.h +++ b/ortools/sat/linear_programming_constraint.h @@ -88,8 +88,8 @@ class ScatteredIntegerVector { std::nullopt); void ConvertToCutData(absl::int128 rhs, - const std::vector& integer_variables, - const std::vector& lp_solution, + absl::Span integer_variables, + absl::Span lp_solution, IntegerTrail* integer_trail, CutData* result); // Similar to ConvertToLinearConstraint(). @@ -287,7 +287,7 @@ class LinearProgrammingConstraint : public PropagatorInterface, // Return true if a new cut was added to the cut manager. bool AddCutFromConstraints( absl::string_view name, - const std::vector>& + absl::Span> integer_multipliers); // Second half of AddCutFromConstraints(). @@ -337,7 +337,7 @@ class LinearProgrammingConstraint : public PropagatorInterface, // std::round(std::ldexp(coeff, power)) ? bool ScalingCanOverflow( int power, bool take_objective_into_account, - const std::vector>& multipliers, + absl::Span> multipliers, int64_t overflow_cap) const; // Computes from an integer linear combination of the integer rows of the LP a diff --git a/ortools/sat/lp_utils.cc b/ortools/sat/lp_utils.cc index 9c55cbab03..0595ec66dc 100644 --- a/ortools/sat/lp_utils.cc +++ b/ortools/sat/lp_utils.cc @@ -164,7 +164,7 @@ namespace { // // Precondition: var must be the only non-integer in the given constraint. double GetIntegralityMultiplier(const MPModelProto& mp_model, - const std::vector& var_scaling, int var, + absl::Span var_scaling, int var, int ct_index, double tolerance) { DCHECK(!mp_model.variable(var).is_integer()); const MPConstraintProto& ct = mp_model.constraint(ct_index); diff --git a/ortools/sat/sat_solver.cc b/ortools/sat/sat_solver.cc index 08dd9a5ebe..b8eea11198 100644 --- a/ortools/sat/sat_solver.cc +++ b/ortools/sat/sat_solver.cc @@ -1039,7 +1039,8 @@ void SatSolver::Backtrack(int target_level) { // that will cause some problems. Note that we could forbid a user to call // Backtrack() with the current level, but that is annoying when you just // want to reset the solver with Backtrack(0). - if (CurrentDecisionLevel() == target_level) return; + DCHECK(target_level == 0 || !Decisions().empty()); + if (CurrentDecisionLevel() == target_level || Decisions().empty()) return; DCHECK_GE(target_level, 0); DCHECK_LE(target_level, CurrentDecisionLevel()); diff --git a/ortools/sat/timetable.cc b/ortools/sat/timetable.cc index 8dbcbf283f..66e7c6e491 100644 --- a/ortools/sat/timetable.cc +++ b/ortools/sat/timetable.cc @@ -416,7 +416,7 @@ bool TimeTablingPerTask::BuildProfile() { const IntegerValue default_non_relevant_height = has_demand_equal_to_capacity_ ? 1 : 0; - const auto& by_decreasing_start_max = helper_->TaskByDecreasingStartMax(); + const auto& by_negated_start_max = helper_->TaskByIncreasingNegatedStartMax(); const auto& by_end_min = helper_->TaskByIncreasingEndMin(); // Next start/end of the compulsory parts to be processed. Note that only the @@ -427,13 +427,12 @@ bool TimeTablingPerTask::BuildProfile() { while (next_end < num_tasks) { IntegerValue time = by_end_min[next_end].time; if (next_start >= 0) { - time = std::min(time, by_decreasing_start_max[next_start].time); + time = std::min(time, -by_negated_start_max[next_start].time); } // Process the starting compulsory parts. - while (next_start >= 0 && - by_decreasing_start_max[next_start].time == time) { - const int t = by_decreasing_start_max[next_start].task_index; + while (next_start >= 0 && -by_negated_start_max[next_start].time == time) { + const int t = by_negated_start_max[next_start].task_index; current_height += demands_min[t]; --next_start; } diff --git a/ortools/sat/timetable_edgefinding.cc b/ortools/sat/timetable_edgefinding.cc index ea07c08484..04d63255fe 100644 --- a/ortools/sat/timetable_edgefinding.cc +++ b/ortools/sat/timetable_edgefinding.cc @@ -68,12 +68,12 @@ void TimeTableEdgeFinding::BuildTimeTable() { ecp_.clear(); // Build start of compulsory part events. - const auto by_decreasing_start_max = helper_->TaskByDecreasingStartMax(); - for (const auto task_time : ::gtl::reversed_view(by_decreasing_start_max)) { - const int t = task_time.task_index; + const auto by_negated_smax = helper_->TaskByIncreasingNegatedStartMax(); + for (const auto [t, negated_smax] : ::gtl::reversed_view(by_negated_smax)) { if (!helper_->IsPresent(t)) continue; - if (task_time.time < helper_->EndMin(t)) { - scp_.push_back(task_time); + const IntegerValue start_max = -negated_smax; + if (start_max < helper_->EndMin(t)) { + scp_.push_back({t, start_max}); } } diff --git a/ortools/sat/zero_half_cuts.cc b/ortools/sat/zero_half_cuts.cc index 0dca968f89..ffe51981bd 100644 --- a/ortools/sat/zero_half_cuts.cc +++ b/ortools/sat/zero_half_cuts.cc @@ -122,7 +122,7 @@ void ZeroHalfCutHelper::AddOneConstraint(const glop::RowIndex row, } } -void ZeroHalfCutHelper::SymmetricDifference(const std::vector& a, +void ZeroHalfCutHelper::SymmetricDifference(absl::Span a, std::vector* b) { for (const int v : *b) tmp_marked_[v] = true; for (const int v : a) { diff --git a/ortools/sat/zero_half_cuts.h b/ortools/sat/zero_half_cuts.h index e48e9eb90d..0d307a845e 100644 --- a/ortools/sat/zero_half_cuts.h +++ b/ortools/sat/zero_half_cuts.h @@ -92,7 +92,7 @@ class ZeroHalfCutHelper { // Like std::set_symmetric_difference, but use a vector instead of sort. // This assumes tmp_marked_ to be all false. We don't DCHECK it here for // speed, but it DCHECKed on each EliminateVarUsingRow() call. - void SymmetricDifference(const std::vector& a, std::vector* b); + void SymmetricDifference(absl::Span a, std::vector* b); private: // As we combine rows, when the activity of a combination get too far away