diff --git a/ortools/sat/cp_model_solver.cc b/ortools/sat/cp_model_solver.cc index 5adb81282a..c9639e9584 100644 --- a/ortools/sat/cp_model_solver.cc +++ b/ortools/sat/cp_model_solver.cc @@ -126,6 +126,11 @@ ABSL_FLAG(bool, cp_model_dump_models, false, "format to 'FLAGS_cp_model_dump_prefix'{model|presolved_model|" "mapping_model}.pb.txt."); +ABSL_FLAG(bool, cp_model_export_model, false, + "DEBUG ONLY. When set to true, SolveCpModel() will dump the model " + "proto of the original model format to " + "'FLAGS_cp_model_dump_prefix'{name}.pb.txt."); + ABSL_FLAG(bool, cp_model_dump_text_proto, true, "DEBUG ONLY, dump models in text proto instead of binary proto."); @@ -1000,19 +1005,17 @@ IntegerVariable AddLPConstraints(bool objective_need_to_be_tight, // Dispatch every constraint to its LinearProgrammingConstraint. std::vector lp_constraints(num_components, nullptr); - std::vector> component_to_constraints( - num_components); for (int i = 0; i < num_lp_constraints; i++) { const int c = index_to_component[get_constraint_index(i)]; if (component_sizes[c] <= 1) continue; - component_to_constraints[c].push_back(relaxation.linear_constraints[i]); if (lp_constraints[c] == nullptr) { lp_constraints[c] = new LinearProgrammingConstraint(m, component_to_var[c]); m->TakeOwnership(lp_constraints[c]); } // Load the constraint. - lp_constraints[c]->AddLinearConstraint(relaxation.linear_constraints[i]); + lp_constraints[c]->AddLinearConstraint( + std::move(relaxation.linear_constraints[i])); } // Dispatch every cut generator to its LinearProgrammingConstraint. @@ -3978,6 +3981,10 @@ CpSolverResponse SolveCpModel(const CpModelProto& model_proto, Model* model) { if (absl::GetFlag(FLAGS_cp_model_dump_models)) { DumpModelProto(model_proto, "model"); } + if (absl::GetFlag(FLAGS_cp_model_export_model)) { + DumpModelProto(model_proto, + model_proto.name().empty() ? "model" : model_proto.name()); + } #endif // __PORTABLE_PLATFORM__ #if !defined(__PORTABLE_PLATFORM__) diff --git a/ortools/sat/inclusion.h b/ortools/sat/inclusion.h index 8783e0514c..dba904185b 100644 --- a/ortools/sat/inclusion.h +++ b/ortools/sat/inclusion.h @@ -26,66 +26,11 @@ #include #include "absl/log/check.h" -#include "absl/types/span.h" #include "ortools/base/logging.h" namespace operations_research { namespace sat { -// Small utility class to store a vector> where one can only append new -// vector and never change previously added ones. -// -// Note that we implement a really small subset of the vector> API. -template -class CompactVectorVector { - public: - // Same as push_back(). - // Returns the previous size() as this is convenient for how we use it. - int Add(absl::Span data) { - const int index = size(); - starts_.push_back(buffer_.size()); - sizes_.push_back(data.size()); - buffer_.insert(buffer_.end(), data.begin(), data.end()); - return index; - } - - // Same as Add() but for sat::Literal or any type from which we can get - // indices. - template - int AddLiterals(const std::vector& data) { - const int index = size(); - starts_.push_back(buffer_.size()); - sizes_.push_back(data.size()); - for (const L literal : data) { - buffer_.push_back(literal.Index().value()); - } - return index; - } - - // Warning: this is only valid until the next clear() or Add() call. - absl::Span operator[](int index) const { - DCHECK_GE(index, 0); - DCHECK_LT(index, starts_.size()); - DCHECK_LT(index, sizes_.size()); - const size_t size = static_cast(sizes_[index]); - if (size == 0) return {}; - return {&buffer_[starts_[index]], size}; - } - - void clear() { - starts_.clear(); - sizes_.clear(); - buffer_.clear(); - } - - size_t size() const { return starts_.size(); } - - private: - std::vector starts_; - std::vector sizes_; - std::vector buffer_; -}; - // An helper class to process many sets of integer in [0, n] and detects all the // set included in each others. This is a common operations in presolve, and // while it can be slow the algorithm used here is pretty efficient in practice. @@ -99,10 +44,12 @@ class CompactVectorVector { // The number n will be detected automatically but we allocate various vector // of size n, so avoid having large integer values in your sets. // -// All set contents will be accessed via storage_[index]. And of course Storage -// can be the CompactVectorVector defined above. But it can also be something -// that return a class that support .size() and integer range iteration over the -// element in the set on the fly. +// All set contents will be accessed via storage_[index]. +// This can be used with a vector> or our CompactVectorVector that we +// use in a few place. But it can also be anything that support: +// - storage_.size() +// - range iteration over storage_[index] +// - storage_[index].size() template class InclusionDetector { public: diff --git a/ortools/sat/linear_constraint.cc b/ortools/sat/linear_constraint.cc index e63ab4c5e0..8e7cfff742 100644 --- a/ortools/sat/linear_constraint.cc +++ b/ortools/sat/linear_constraint.cc @@ -484,5 +484,27 @@ IntegerValue GetCoefficientOfPositiveVar(const IntegerVariable var, return IntegerValue(0); } +bool PossibleOverflow(const IntegerTrail& integer_trail, + const LinearConstraint& constraint) { + IntegerValue min_activity(0); + IntegerValue max_activity(0); + const int size = constraint.vars.size(); + for (int i = 0; i < size; ++i) { + const IntegerVariable var = constraint.vars[i]; + const IntegerValue coeff = constraint.coeffs[i]; + CHECK_NE(coeff, 0); + const IntegerValue lb = integer_trail.LevelZeroLowerBound(var); + const IntegerValue ub = integer_trail.LevelZeroUpperBound(var); + if (coeff > 0) { + if (!AddProductTo(lb, coeff, &min_activity)) return true; + if (!AddProductTo(ub, coeff, &max_activity)) return true; + } else { + if (!AddProductTo(ub, coeff, &min_activity)) return true; + if (!AddProductTo(lb, coeff, &max_activity)) return true; + } + } + return AtMinOrMaxInt64(CapSub(max_activity.value(), min_activity.value())); +} + } // namespace sat } // namespace operations_research diff --git a/ortools/sat/linear_constraint.h b/ortools/sat/linear_constraint.h index 2254cf940b..af79f10347 100644 --- a/ortools/sat/linear_constraint.h +++ b/ortools/sat/linear_constraint.h @@ -259,6 +259,15 @@ double ComputeActivity( const LinearConstraint& constraint, const absl::StrongVector& values); +// Tests for possible overflow in the given linear constraint used for the +// linear relaxation. This is a bit relaxed compared to what we require for +// generic linear constraint that are used in our CP propagators. +// +// If this check pass, our constraint should be safe to use in our simplication +// code, our cut computation, etc... +bool PossibleOverflow(const IntegerTrail& integer_trail, + const LinearConstraint& constraint); + // Returns sqrt(sum square(coeff)). double ComputeL2Norm(const LinearConstraint& constraint); diff --git a/ortools/sat/linear_constraint_manager.cc b/ortools/sat/linear_constraint_manager.cc index 7f8bce7408..e57acab37c 100644 --- a/ortools/sat/linear_constraint_manager.cc +++ b/ortools/sat/linear_constraint_manager.cc @@ -46,28 +46,6 @@ namespace operations_research { namespace sat { -bool PossibleOverflow(const IntegerTrail& integer_trail, - const LinearConstraint& constraint) { - IntegerValue min_activity(0); - IntegerValue max_activity(0); - const int size = constraint.vars.size(); - for (int i = 0; i < size; ++i) { - const IntegerVariable var = constraint.vars[i]; - const IntegerValue coeff = constraint.coeffs[i]; - CHECK_NE(coeff, 0); - const IntegerValue lb = integer_trail.LevelZeroLowerBound(var); - const IntegerValue ub = integer_trail.LevelZeroUpperBound(var); - if (coeff > 0) { - if (!AddProductTo(lb, coeff, &min_activity)) return true; - if (!AddProductTo(ub, coeff, &max_activity)) return true; - } else { - if (!AddProductTo(ub, coeff, &min_activity)) return true; - if (!AddProductTo(lb, coeff, &max_activity)) return true; - } - } - return AtMinOrMaxInt64(CapSub(max_activity.value(), min_activity.value())); -} - namespace { const LinearConstraintManager::ConstraintIndex kInvalidConstraintIndex(-1); @@ -190,7 +168,8 @@ bool LinearConstraintManager::MaybeRemoveSomeInactiveConstraints( // we regenerate identical cuts for some reason. LinearConstraintManager::ConstraintIndex LinearConstraintManager::Add( LinearConstraint ct, bool* added) { - CHECK(!ct.vars.empty()); + DCHECK(!ct.vars.empty()); + DCHECK(!PossibleOverflow(integer_trail_, ct)) << ct.DebugString(); DCHECK(NoDuplicateVariable(ct)); SimplifyConstraint(&ct); DivideByGCD(&ct); diff --git a/ortools/sat/linear_constraint_manager.h b/ortools/sat/linear_constraint_manager.h index 819a04c528..d50260731c 100644 --- a/ortools/sat/linear_constraint_manager.h +++ b/ortools/sat/linear_constraint_manager.h @@ -41,11 +41,6 @@ namespace operations_research { namespace sat { -// Tests for possible overflow in the simplification of the given linear -// constraint. -bool PossibleOverflow(const IntegerTrail& integer_trail, - const LinearConstraint& constraint); - // Stores for each IntegerVariable its temporary LP solution. // // This is shared between all LinearProgrammingConstraint because in the corner diff --git a/ortools/sat/linear_programming_constraint.cc b/ortools/sat/linear_programming_constraint.cc index d3a439e4f7..08aadd60e4 100644 --- a/ortools/sat/linear_programming_constraint.cc +++ b/ortools/sat/linear_programming_constraint.cc @@ -276,10 +276,9 @@ LinearProgrammingConstraint::LinearProgrammingConstraint( } } -void LinearProgrammingConstraint::AddLinearConstraint( - const LinearConstraint& ct) { +void LinearProgrammingConstraint::AddLinearConstraint(LinearConstraint ct) { DCHECK(!lp_constraint_is_registered_); - constraint_manager_.Add(ct); + constraint_manager_.Add(std::move(ct)); } glop::ColIndex LinearProgrammingConstraint::GetMirrorVariable( diff --git a/ortools/sat/linear_programming_constraint.h b/ortools/sat/linear_programming_constraint.h index 906865349d..9aaa57aee9 100644 --- a/ortools/sat/linear_programming_constraint.h +++ b/ortools/sat/linear_programming_constraint.h @@ -146,7 +146,7 @@ class LinearProgrammingConstraint : public PropagatorInterface, absl::Span vars); // Add a new linear constraint to this LP. - void AddLinearConstraint(const LinearConstraint& ct); + void AddLinearConstraint(LinearConstraint ct); // Set the coefficient of the variable in the objective. Calling it twice will // overwrite the previous value. diff --git a/ortools/sat/linear_relaxation.cc b/ortools/sat/linear_relaxation.cc index bf194e90ed..5deae946a7 100644 --- a/ortools/sat/linear_relaxation.cc +++ b/ortools/sat/linear_relaxation.cc @@ -43,6 +43,7 @@ #include "ortools/sat/intervals.h" #include "ortools/sat/linear_constraint.h" #include "ortools/sat/model.h" +#include "ortools/sat/precedences.h" #include "ortools/sat/presolve_util.h" #include "ortools/sat/routing_cuts.h" #include "ortools/sat/sat_base.h" @@ -204,10 +205,17 @@ void AppendRelaxationForEqualityEncoding(IntegerVariable var, } } - relaxation->linear_constraints.push_back(at_least_one.Build()); - relaxation->linear_constraints.push_back(encoding_ct.Build()); - relaxation->at_most_ones.push_back(at_most_one_ct); - ++*num_tight; + // It is possible that the linear1 encoding respect our overflow + // precondition but not the Var = sum bool * value one. In this case, we + // just don't encode it this way. Hopefully, most normal model will not run + // into this. + const LinearConstraint lc = encoding_ct.Build(); + if (!PossibleOverflow(*integer_trail, lc)) { + relaxation->linear_constraints.push_back(at_least_one.Build()); + relaxation->linear_constraints.push_back(std::move(lc)); + relaxation->at_most_ones.push_back(at_most_one_ct); + ++*num_tight; + } return; } @@ -221,13 +229,18 @@ void AppendRelaxationForEqualityEncoding(IntegerVariable var, CHECK(encoding_ct.AddLiteralTerm(value_literal.literal, rhs - value_literal.value)); } - relaxation->at_most_ones.push_back(at_most_one_ct); - relaxation->linear_constraints.push_back(encoding_ct.Build()); - ++*num_tight; + + const LinearConstraint lc = encoding_ct.Build(); + if (!PossibleOverflow(*integer_trail, lc)) { + relaxation->at_most_ones.push_back(at_most_one_ct); + relaxation->linear_constraints.push_back(std::move(lc)); + ++*num_tight; + } return; } // min + sum l_i * (value_i - min) <= var. + // Note that this might overflow in corner cases, so we need to prevent that. const IntegerValue d_min = min_not_encoded; LinearConstraintBuilder lower_bound_ct(&model, d_min, kMaxIntegerValue); lower_bound_ct.AddTerm(var, IntegerValue(1)); @@ -235,8 +248,14 @@ void AppendRelaxationForEqualityEncoding(IntegerVariable var, CHECK(lower_bound_ct.AddLiteralTerm(value_literal.literal, d_min - value_literal.value)); } + const LinearConstraint built_lb_ct = lower_bound_ct.Build(); + if (!PossibleOverflow(*integer_trail, built_lb_ct)) { + relaxation->linear_constraints.push_back(std::move(built_lb_ct)); + ++*num_loose; + } // var <= max + sum l_i * (value_i - max). + // Note that this might overflow in corner cases, so we need to prevent that. const IntegerValue d_max = max_not_encoded; LinearConstraintBuilder upper_bound_ct(&model, kMinIntegerValue, d_max); upper_bound_ct.AddTerm(var, IntegerValue(1)); @@ -244,12 +263,14 @@ void AppendRelaxationForEqualityEncoding(IntegerVariable var, CHECK(upper_bound_ct.AddLiteralTerm(value_literal.literal, d_max - value_literal.value)); } + const LinearConstraint built_ub_ct = upper_bound_ct.Build(); + if (!PossibleOverflow(*integer_trail, built_ub_ct)) { + relaxation->linear_constraints.push_back(std::move(built_ub_ct)); + ++*num_loose; + } // Note that empty/trivial constraints will be filtered later. relaxation->at_most_ones.push_back(at_most_one_ct); - relaxation->linear_constraints.push_back(lower_bound_ct.Build()); - relaxation->linear_constraints.push_back(upper_bound_ct.Build()); - ++*num_loose; } void AppendPartialGreaterThanEncodingRelaxation(IntegerVariable var, @@ -266,9 +287,8 @@ void AppendPartialGreaterThanEncodingRelaxation(IntegerVariable var, // And also add the implications between used literals. { IntegerValue prev_used_bound = integer_trail->LowerBound(var); - LinearConstraintBuilder lb_constraint(&model, prev_used_bound, - kMaxIntegerValue); - lb_constraint.AddTerm(var, IntegerValue(1)); + LinearConstraintBuilder builder(&model, prev_used_bound, kMaxIntegerValue); + builder.AddTerm(var, IntegerValue(1)); LiteralIndex prev_literal_index = kNoLiteralIndex; for (const auto entry : greater_than_encoding) { if (entry.value <= prev_used_bound) continue; @@ -277,7 +297,7 @@ void AppendPartialGreaterThanEncodingRelaxation(IntegerVariable var, const IntegerValue diff = prev_used_bound - entry.value; // Skip the entry if the literal doesn't have a view. - if (!lb_constraint.AddLiteralTerm(entry.literal, diff)) continue; + if (!builder.AddLiteralTerm(entry.literal, diff)) continue; if (prev_literal_index != kNoLiteralIndex) { // Add var <= prev_var, which is the same as var + not(prev_var) <= 1 relaxation->at_most_ones.push_back( @@ -286,26 +306,29 @@ void AppendPartialGreaterThanEncodingRelaxation(IntegerVariable var, prev_used_bound = entry.value; prev_literal_index = literal_index; } - relaxation->linear_constraints.push_back(lb_constraint.Build()); + + // Note that by construction, this shouldn't be able to overflow. + relaxation->linear_constraints.push_back(builder.Build()); } // Do the same for the var <= side by using NegationOfVar(). // Note that we do not need to add the implications between literals again. { IntegerValue prev_used_bound = integer_trail->LowerBound(NegationOf(var)); - LinearConstraintBuilder lb_constraint(&model, prev_used_bound, - kMaxIntegerValue); - lb_constraint.AddTerm(var, IntegerValue(-1)); + LinearConstraintBuilder builder(&model, prev_used_bound, kMaxIntegerValue); + builder.AddTerm(var, IntegerValue(-1)); for (const auto entry : encoder->PartialGreaterThanEncoding(NegationOf(var))) { if (entry.value <= prev_used_bound) continue; const IntegerValue diff = prev_used_bound - entry.value; // Skip the entry if the literal doesn't have a view. - if (!lb_constraint.AddLiteralTerm(entry.literal, diff)) continue; + if (!builder.AddLiteralTerm(entry.literal, diff)) continue; prev_used_bound = entry.value; } - relaxation->linear_constraints.push_back(lb_constraint.Build()); + + // Note that by construction, this shouldn't be able to overflow. + relaxation->linear_constraints.push_back(builder.Build()); } } @@ -1210,6 +1233,7 @@ void TryToLinearizeConstraint(const CpModelProto& /*model_proto*/, CHECK_EQ(model->GetOrCreate()->CurrentDecisionLevel(), 0); DCHECK_GT(linearization_level, 0); + const int old_index = relaxation->linear_constraints.size(); switch (ct.constraint_case()) { case ConstraintProto::ConstraintCase::kBoolOr: { if (linearization_level > 1) { @@ -1310,6 +1334,18 @@ void TryToLinearizeConstraint(const CpModelProto& /*model_proto*/, default: { } } + + if (DEBUG_MODE) { + const auto& integer_trail = *model->GetOrCreate(); + for (int i = old_index; i < relaxation->linear_constraints.size(); ++i) { + const bool issue = + PossibleOverflow(integer_trail, relaxation->linear_constraints[i]); + if (issue) { + LOG(INFO) << "Possible overflow in linearization of: " + << ct.ShortDebugString(); + } + } + } } // Cut generators. @@ -1657,10 +1693,10 @@ void AddLinMaxCutGenerator(const ConstraintProto& ct, Model* m, // it is harder to detect that if all literal are false then none of the implied // value can be taken. void AppendElementEncodingRelaxation(Model* m, LinearRelaxation* relaxation) { + auto* integer_trail = m->GetOrCreate(); auto* element_encodings = m->GetOrCreate(); int num_exactly_one_elements = 0; - for (const IntegerVariable var : element_encodings->GetElementEncodedVariables()) { for (const auto& [index, literal_value_list] : @@ -1670,19 +1706,22 @@ void AppendElementEncodingRelaxation(Model* m, LinearRelaxation* relaxation) { min_value = std::min(min_value, literal_value.value); } - LinearConstraintBuilder linear_encoding(m, -min_value, -min_value); - linear_encoding.AddTerm(var, IntegerValue(-1)); + LinearConstraintBuilder builder(m, -min_value, -min_value); + builder.AddTerm(var, IntegerValue(-1)); for (const auto& [value, literal] : literal_value_list) { const IntegerValue delta_min = value - min_value; if (delta_min != 0) { // If the term has no view, we abort. - if (!linear_encoding.AddLiteralTerm(literal, delta_min)) { + if (!builder.AddLiteralTerm(literal, delta_min)) { return; } } } ++num_exactly_one_elements; - relaxation->linear_constraints.push_back(linear_encoding.Build()); + const LinearConstraint lc = builder.Build(); + if (!PossibleOverflow(*integer_trail, lc)) { + relaxation->linear_constraints.push_back(std::move(lc)); + } } } @@ -1728,7 +1767,7 @@ LinearRelaxation ComputeLinearRelaxation(const CpModelProto& model_proto, var, *m, &relaxation, &num_tight_equality_encoding_relaxations, &num_loose_equality_encoding_relaxations); - // The we try to linearize the inequality encoding. Note that on some + // Then we try to linearize the inequality encoding. Note that on some // problem like pizza27i.mps.gz, adding both equality and inequality // encoding is a must. // diff --git a/ortools/sat/presolve_util.cc b/ortools/sat/presolve_util.cc index b1c1067678..061cf48575 100644 --- a/ortools/sat/presolve_util.cc +++ b/ortools/sat/presolve_util.cc @@ -452,28 +452,9 @@ ActivityBoundHelper::PartitionLiteralsIntoAmo(absl::Span literals) { } // We have the partition, lets construct the spans now. - part_starts_.assign(num_parts, 0); - part_sizes_.assign(num_parts, 0); - part_ends_.assign(num_parts, 0); - for (int i = 0; i < num_literals; ++i) { - DCHECK_GE(partition_[i], 0); - DCHECK_LT(partition_[i], num_parts); - part_sizes_[partition_[i]]++; - } - for (int p = 1; p < num_parts; ++p) { - part_starts_[p] = part_ends_[p] = part_sizes_[p - 1] + part_starts_[p - 1]; - } - reordered_literals_.resize(num_literals); - for (int i = 0; i < num_literals; ++i) { - const int p = partition_[i]; - reordered_literals_[part_ends_[p]++] = literals[i]; - } - std::vector> result; - for (int p = 0; p < num_parts; ++p) { - result.push_back( - absl::MakeSpan(&reordered_literals_[part_starts_[p]], part_sizes_[p])); - } - return result; + part_to_literals_.ResetFromFlatMapping(partition_, literals); + DCHECK_EQ(part_to_literals_.size(), num_parts); + return part_to_literals_.AsVectorOfSpan(); } bool ActivityBoundHelper::IsAmo(absl::Span literals) { diff --git a/ortools/sat/presolve_util.h b/ortools/sat/presolve_util.h index 4c4180c6ba..6a0e80f3d0 100644 --- a/ortools/sat/presolve_util.h +++ b/ortools/sat/presolve_util.h @@ -31,6 +31,7 @@ #include "ortools/base/types.h" #include "ortools/sat/cp_model.pb.h" #include "ortools/sat/cp_model_utils.h" +#include "ortools/sat/util.h" #include "ortools/util/bitset.h" #include "ortools/util/logging.h" #include "ortools/util/sorted_interval_list.h" @@ -211,7 +212,7 @@ class ActivityBoundHelper { bool PresolveEnforcement(absl::Span refs, ConstraintProto* ct, absl::flat_hash_set* literals_at_true); - // partition the list of literals into disjoint at most ones. The returned + // Partition the list of literals into disjoint at most ones. The returned // spans are only valid until another function from this class is used. std::vector> PartitionLiteralsIntoAmo( absl::Span literals); @@ -253,10 +254,7 @@ class ActivityBoundHelper { std::vector second_max_by_partition_; // Used by PartitionLiteralsIntoAmo(). - std::vector part_starts_; - std::vector part_ends_; - std::vector part_sizes_; - std::vector reordered_literals_; + CompactVectorVector part_to_literals_; absl::flat_hash_set triggered_amo_; }; diff --git a/ortools/sat/samples/BUILD.bazel b/ortools/sat/samples/BUILD.bazel index fd9a0fd5e5..bb50f97991 100644 --- a/ortools/sat/samples/BUILD.bazel +++ b/ortools/sat/samples/BUILD.bazel @@ -63,6 +63,8 @@ code_sample_py(name = "overlapping_intervals_sample_sat") code_sample_cc_py(name = "rabbits_and_pheasants_sat") +code_sample_py(name = "ranking_circuit_sample_sat") + code_sample_cc_py(name = "ranking_sample_sat") code_sample_cc_py(name = "reified_sample_sat") diff --git a/ortools/sat/samples/ranking_circuit_sample_sat.py b/ortools/sat/samples/ranking_circuit_sample_sat.py new file mode 100644 index 0000000000..6ed5ad9c9e --- /dev/null +++ b/ortools/sat/samples/ranking_circuit_sample_sat.py @@ -0,0 +1,179 @@ +#!/usr/bin/env python3 +# Copyright 2010-2022 Google LLC +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Code sample to demonstrates how to rank intervals using a circuit.""" + +from typing import List, Sequence + + +from ortools.sat.python import cp_model + + +def rank_tasks_with_circuit( + model: cp_model.CpModel, + starts: Sequence[cp_model.IntVar], + durations: Sequence[int], + presences: Sequence[cp_model.IntVar], + ranks: Sequence[cp_model.IntVar], +): + """This method uses a circuit constraint to rank tasks. + + This method assumes that all starts are disjoint, meaning that all tasks have + a strictly positive duration, and they appear in the same NoOverlap + constraint. + + To implement this ranking, we will create a dense graph with num_tasks + 1 + nodes. + The extra node (with id 0) will be used to decide which task is first with + its only outgoing arc, and whhich task is last with its only incoming arc. + Each task i will be associated with id i + 1, and an arc between i + 1 and j + + 1 indicates that j is the immediate successor of i. + + The circuit constraint ensures there is at most 1 hamiltonian path of + length > 1. If no such path exists, then no tasks are active. + + The multiple enforced linear constraints are meant to ensure the compatibility + between the order of starts and the order of ranks, + + Args: + model: The CpModel to add the constraints to. + starts: The array of starts variables of all tasks. + durations: the durations of all tasks. + presences: The array of presence variables of all tasks. + ranks: The array of rank variables of all tasks. + """ + + num_tasks = len(starts) + all_tasks = range(num_tasks) + + arcs: List[cp_model.ArcT] = [] + for i in all_tasks: + # if node i is first. + start_lit = model.NewBoolVar(f"start_{i}") + arcs.append((0, i + 1, start_lit)) + model.Add(ranks[i] == 0).OnlyEnforceIf(start_lit) + + # As there are no other constraints on the problem, we can add this + # redundant constraint. + model.Add(starts[i] == 0).OnlyEnforceIf(start_lit) + + # if node i is last. + end_lit = model.NewBoolVar(f"end_{i}") + arcs.append((i + 1, 0, end_lit)) + + for j in all_tasks: + if i == j: + arcs.append((i + 1, i + 1, presences[i].Not())) + model.Add(ranks[i] == -1).OnlyEnforceIf(presences[i].Not()) + else: + literal = model.NewBoolVar(f"arc_{i}_to_{j}") + arcs.append((i + 1, j + 1, literal)) + model.Add(ranks[j] == ranks[i] + 1).OnlyEnforceIf(literal) + + # To perform the transitive reduction from precedences to successors, + # we need to tie the starts of the tasks with 'literal'. + # In a pure problem, the following inequality could be an equality. + # It is not true in general. + # + # Note that we could use this literal to penalize the transition, add an + # extra delay to the precedence. + model.Add(starts[j] >= starts[i] + durations[i]).OnlyEnforceIf(literal) + + # Manage the empty circuit + empty = model.NewBoolVar("empty") + arcs.append((0, 0, empty)) + + for i in all_tasks: + model.AddImplication(empty, presences[i].Not()) + + # Add the circuit constraint. + model.AddCircuit(arcs) + + +def ranking_sample_sat(): + """Ranks tasks in a NoOverlap constraint.""" + + model = cp_model.CpModel() + horizon = 100 + num_tasks = 4 + all_tasks = range(num_tasks) + + starts = [] + durations = [] + intervals = [] + presences = [] + ranks = [] + + # Creates intervals, half of them are optional. + for t in all_tasks: + start = model.NewIntVar(0, horizon, f"start[{t}]") + duration = t + 1 + presence = model.NewBoolVar(f"presence[{t}]") + interval = model.NewOptionalFixedSizeIntervalVar( + start, duration, presence, f"opt_interval[{t}]" + ) + if t < num_tasks // 2: + model.Add(presence == 1) + + starts.append(start) + durations.append(duration) + intervals.append(interval) + presences.append(presence) + + # Ranks = -1 if and only if the tasks is not performed. + ranks.append(model.NewIntVar(-1, num_tasks - 1, f"rank[{t}]")) + + # Adds NoOverlap constraint. + model.AddNoOverlap(intervals) + + # Adds ranking constraint. + rank_tasks_with_circuit(model, starts, durations, presences, ranks) + + # Adds a constraint on ranks. + model.Add(ranks[0] < ranks[1]) + + # Creates makespan variable. + makespan = model.NewIntVar(0, horizon, "makespan") + for t in all_tasks: + model.Add(starts[t] + durations[t] <= makespan).OnlyEnforceIf(presences[t]) + + # Minimizes makespan - fixed gain per tasks performed. + # As the fixed cost is less that the duration of the last interval, + # the solver will not perform the last interval. + model.Minimize(2 * makespan - 7 * sum(presences[t] for t in all_tasks)) + + # Solves the model model. + solver = cp_model.CpSolver() + status = solver.Solve(model) + + if status == cp_model.OPTIMAL: + # Prints out the makespan and the start times and ranks of all tasks. + print(f"Optimal cost: {solver.ObjectiveValue()}") + print(f"Makespan: {solver.Value(makespan)}") + for t in all_tasks: + if solver.Value(presences[t]): + print( + f"Task {t} starts at {solver.Value(starts[t])} " + f"with rank {solver.Value(ranks[t])}" + ) + else: + print( + f"Task {t} in not performed " + f"and ranked at {solver.Value(ranks[t])}" + ) + else: + print(f"Solver exited with nonoptimal status: {status}") + + +ranking_sample_sat() diff --git a/ortools/sat/samples/ranking_sample_sat.cc b/ortools/sat/samples/ranking_sample_sat.cc index e37b444326..6611969148 100644 --- a/ortools/sat/samples/ranking_sample_sat.cc +++ b/ortools/sat/samples/ranking_sample_sat.cc @@ -31,9 +31,9 @@ void RankingSampleSat() { const int kHorizon = 100; const int kNumTasks = 4; - auto add_task_ranking = [&cp_model](const std::vector& starts, - const std::vector& presences, - const std::vector& ranks) { + auto add_task_ranking = [&cp_model](absl::Span starts, + absl::Span presences, + absl::Span ranks) { const int num_tasks = starts.size(); // Creates precedence variables between pairs of intervals. diff --git a/ortools/sat/util.h b/ortools/sat/util.h index 4cebb8b115..d45dfd4330 100644 --- a/ortools/sat/util.h +++ b/ortools/sat/util.h @@ -21,6 +21,7 @@ #include #include #include +#include #include #include @@ -44,6 +45,67 @@ namespace operations_research { namespace sat { +// A simple class with always IdentityMap[t] == t. +// This is to avoid allocating vector with std::iota() in some Apis. +template +class IdentityMap { + public: + T operator[](T t) const { return t; } +}; + +// Small utility class to store a vector> where one can only append new +// vector and never change previously added ones. This allows to store a static +// key -> value(s) mapping. +// +// This is a lot more compact memorywise and thus faster than vector>. +// Note that we implement a really small subset of the vector> API. +// +// We support int and StrongType for key K and any copyable type for value V. +template +class CompactVectorVector { + public: + // Size of the "key" space, always in [0, size()). + size_t size() const; + + // Getters, either via [] or via a wrapping to be compatible with older api. + // + // Warning: Spans are only valid until the next modification! + absl::Span operator[](K key) const; + std::vector> AsVectorOfSpan() const; + + // Restore to empty vector>. + void clear(); + + // Given a flat mapping (keys[i] -> values[i]) with two parallel vectors, not + // necessarily sorted by key, regroup the same key so that + // CompactVectorVector[key] list all values in the order in which they appear. + // + // We only check keys.size(), so this can be used with IdentityMap() as + // second argument. + template + void ResetFromFlatMapping(Keys keys, Values values); + + // Append a new entry. + // Returns the previous size() as this is convenient for how we use it. + int Add(absl::Span values); + + // Hacky: same as Add() but for sat::Literal or any type from which we can get + // a value type V via L.Index().value(). + template + int AddLiterals(const std::vector& wrapped_values); + + private: + // Convert int and StrongInt to normal int. + static int InternalKey(K key); + + // TODO(user): with a sentinel, we can infer + // size_[i] = starts_[i + 1] - starts_[i]. This should be slightly faster. + // Benchmark and change. + std::vector starts_; + std::vector sizes_; + std::vector buffer_; +}; + // Prints a positive number with separators for easier reading (ex: 1'348'065). std::string FormatCounter(int64_t num); @@ -581,6 +643,108 @@ IntType FloorOfRatio(IntType numerator, IntType denominator) { return CeilOrFloorOfRatio(numerator, denominator); } +template +inline int CompactVectorVector::Add(absl::Span values) { + const int index = size(); + starts_.push_back(buffer_.size()); + sizes_.push_back(values.size()); + buffer_.insert(buffer_.end(), values.begin(), values.end()); + return index; +} + +template +template +inline int CompactVectorVector::AddLiterals( + const std::vector& wrapped_values) { + const int index = size(); + starts_.push_back(buffer_.size()); + sizes_.push_back(wrapped_values.size()); + for (const L wrapped_value : wrapped_values) { + buffer_.push_back(wrapped_value.Index().value()); + } + return index; +} + +// We need to support both StrongType and normal int. +template +inline int CompactVectorVector::InternalKey(K key) { + if constexpr (std::is_same_v) { + return key; + } else { + return key.value(); + } +} + +template +inline absl::Span CompactVectorVector::operator[](K key) const { + DCHECK_GE(key, 0); + DCHECK_LT(key, starts_.size()); + DCHECK_LT(key, sizes_.size()); + const int k = InternalKey(key); + const size_t size = static_cast(sizes_[k]); + if (size == 0) return {}; + return {&buffer_[starts_[k]], size}; +} + +template +inline std::vector> +CompactVectorVector::AsVectorOfSpan() const { + std::vector> result(starts_.size()); + for (int k = 0; k < starts_.size(); ++k) { + result[k] = (*this)[k]; + } + return result; +} + +template +inline void CompactVectorVector::clear() { + starts_.clear(); + sizes_.clear(); + buffer_.clear(); +} + +template +inline size_t CompactVectorVector::size() const { + return starts_.size(); +} + +template +template +inline void CompactVectorVector::ResetFromFlatMapping(Keys keys, + Values values) { + if (keys.empty()) return clear(); + + // Compute maximum index. + int max_key = 0; + for (const K key : keys) { + max_key = std::max(max_key, InternalKey(key) + 1); + } + + // Compute sizes_; + sizes_.assign(max_key, 0); + for (const K key : keys) { + sizes_[InternalKey(key)]++; + } + + // Compute starts_; + starts_.assign(max_key, 0); + for (int k = 1; k < max_key; ++k) { + starts_[k] = starts_[k - 1] + sizes_[k - 1]; + } + + // Copy data and uses starts as temporary indices. + buffer_.resize(keys.size()); + for (int i = 0; i < keys.size(); ++i) { + buffer_[starts_[InternalKey(keys[i])]++] = values[i]; + } + + // Restore starts_. + for (int k = max_key - 1; k > 0; --k) { + starts_[k] = starts_[k - 1]; + } + starts_[0] = 0; +} + } // namespace sat } // namespace operations_research