diff --git a/ortools/base/file.cc b/ortools/base/file.cc index 0c6b45b708..8765ffc589 100644 --- a/ortools/base/file.cc +++ b/ortools/base/file.cc @@ -242,7 +242,11 @@ absl::Status WriteString(File* file, absl::string_view contents, absl::Status SetContents(absl::string_view filename, absl::string_view contents, Options options) { File* file; +#if defined(_MSC_VER) + auto status = file::Open(filename, "wb", &file, options); +#else auto status = file::Open(filename, "w", &file, options); +#endif if (!status.ok()) return status; status = file::WriteString(file, contents, options); status.Update(file->Close(options)); // Even if WriteString() fails! diff --git a/ortools/bop/BUILD.bazel b/ortools/bop/BUILD.bazel index 8e85efbf51..b52b9393e5 100644 --- a/ortools/bop/BUILD.bazel +++ b/ortools/bop/BUILD.bazel @@ -13,8 +13,8 @@ load("@com_google_protobuf//bazel:cc_proto_library.bzl", "cc_proto_library") load("@com_google_protobuf//bazel:proto_library.bzl", "proto_library") +load("@com_google_protobuf//bazel:py_proto_library.bzl", "py_proto_library") load("@rules_cc//cc:defs.bzl", "cc_library") -load("@rules_python//python:proto.bzl", "py_proto_library") package(default_visibility = ["//visibility:public"]) diff --git a/ortools/bop/bop_base.h b/ortools/bop/bop_base.h index 179f494e4b..05b6426999 100644 --- a/ortools/bop/bop_base.h +++ b/ortools/bop/bop_base.h @@ -23,7 +23,6 @@ #include "absl/base/thread_annotations.h" #include "absl/strings/string_view.h" #include "absl/synchronization/mutex.h" -#include "ortools/base/macros.h" #include "ortools/base/strong_vector.h" #include "ortools/bop/bop_parameters.pb.h" #include "ortools/bop/bop_solution.h" diff --git a/ortools/bop/bop_ls.h b/ortools/bop/bop_ls.h index 57170c7b35..2154a29f83 100644 --- a/ortools/bop/bop_ls.h +++ b/ortools/bop/bop_ls.h @@ -41,7 +41,6 @@ #include "absl/random/random.h" #include "absl/strings/string_view.h" #include "absl/types/span.h" -#include "ortools/base/macros.h" #include "ortools/base/strong_vector.h" #include "ortools/bop/bop_base.h" #include "ortools/bop/bop_parameters.pb.h" @@ -99,7 +98,7 @@ class SatWrapper { // Backtracks the last decision if any. void BacktrackOneLevel(); - // Bactracks all the decisions. + // Backtracks all the decisions. void BacktrackAll(); // Extracts any new information learned during the search. @@ -639,7 +638,7 @@ class LocalSearchAssignmentIterator { // For each set of explored decisions, we store it in this table so that we // don't explore decisions (a, b) and later (b, a) for instance. The decisions - // are converted to int32_t, sorted and padded with 0 before beeing inserted + // are converted to int32_t, sorted and padded with 0 before being inserted // here. // // TODO(user): We may still miss some equivalent states because it is possible diff --git a/ortools/bop/integral_solver.h b/ortools/bop/integral_solver.h index 90026889d1..2a0a9712bd 100644 --- a/ortools/bop/integral_solver.h +++ b/ortools/bop/integral_solver.h @@ -15,8 +15,6 @@ #define OR_TOOLS_BOP_INTEGRAL_SOLVER_H_ #include "absl/base/attributes.h" -#include "absl/base/port.h" -#include "ortools/base/macros.h" #include "ortools/bop/bop_parameters.pb.h" #include "ortools/bop/bop_types.h" #include "ortools/lp_data/lp_data.h" diff --git a/ortools/glop/BUILD.bazel b/ortools/glop/BUILD.bazel index da97e5d2a8..bf75866933 100644 --- a/ortools/glop/BUILD.bazel +++ b/ortools/glop/BUILD.bazel @@ -13,8 +13,8 @@ load("@com_google_protobuf//bazel:cc_proto_library.bzl", "cc_proto_library") load("@com_google_protobuf//bazel:proto_library.bzl", "proto_library") +load("@com_google_protobuf//bazel:py_proto_library.bzl", "py_proto_library") load("@rules_cc//cc:defs.bzl", "cc_library") -load("@rules_python//python:proto.bzl", "py_proto_library") package( default_visibility = ["//visibility:public"], diff --git a/ortools/glop/lu_factorization.cc b/ortools/glop/lu_factorization.cc index e34fa6c5d5..89d1706077 100644 --- a/ortools/glop/lu_factorization.cc +++ b/ortools/glop/lu_factorization.cc @@ -565,20 +565,6 @@ Fractional LuFactorization::ComputeInverseInfinityNormUpperBound() const { upper_.ComputeInverseInfinityNormUpperBound(); } -namespace { -// Returns the density of the sparse column 'b' w.r.t. the given permutation. -double ComputeDensity(const SparseColumn& b, const RowPermutation& row_perm) { - double density = 0.0; - for (const SparseColumn::Entry e : b) { - if (row_perm[e.row()] != kNonPivotal && e.coefficient() != 0.0) { - ++density; - } - } - const RowIndex num_rows = row_perm.size(); - return density / num_rows.value(); -} -} // anonymous namespace - void LuFactorization::ComputeTransposeUpper() { SCOPED_TIME_STAT(&stats_); transpose_upper_.PopulateFromTranspose(upper_); diff --git a/ortools/glop/preprocessor.cc b/ortools/glop/preprocessor.cc index e70f0eb5f6..41e00bbcfa 100644 --- a/ortools/glop/preprocessor.cc +++ b/ortools/glop/preprocessor.cc @@ -44,11 +44,6 @@ namespace glop { using ::util::Reverse; namespace { -// Returns an interval as an human readable string for debugging. -std::string IntervalString(Fractional lb, Fractional ub) { - return absl::StrFormat("[%g, %g]", lb, ub); -} - #if defined(_MSC_VER) double trunc(double d) { return d > 0 ? floor(d) : ceil(d); } #endif diff --git a/ortools/glop/primal_edge_norms.h b/ortools/glop/primal_edge_norms.h index 4f5c6ecf9b..cfcc5c9e66 100644 --- a/ortools/glop/primal_edge_norms.h +++ b/ortools/glop/primal_edge_norms.h @@ -203,7 +203,6 @@ class PrimalEdgeNorms { Stats stats_; // Booleans to control what happens on the next ChooseEnteringColumn() call. - bool must_refactorize_basis_; bool recompute_edge_squared_norms_; bool reset_devex_weights_; diff --git a/ortools/glop/revised_simplex.cc b/ortools/glop/revised_simplex.cc index 0ed83b1e36..8cc50ec06a 100644 --- a/ortools/glop/revised_simplex.cc +++ b/ortools/glop/revised_simplex.cc @@ -2861,8 +2861,7 @@ Status RevisedSimplex::PrimalMinimize(TimeLimit* time_limit) { // TODO(user): we may loop a bit more than the actual number of iteration. // fix. - IF_STATS_ENABLED( - ScopedTimeDistributionUpdater timer(&iteration_stats_.total)); + ScopedTimeDistributionUpdater timer(&iteration_stats_.total); // Trigger a refactorization if one of the class we use request it. if (!refactorize && reduced_costs_.NeedsBasisRefactorization()) { @@ -3164,8 +3163,7 @@ Status RevisedSimplex::DualMinimize(bool feasibility_phase, // TODO(user): we may loop a bit more than the actual number of iteration. // fix. - IF_STATS_ENABLED( - ScopedTimeDistributionUpdater timer(&iteration_stats_.total)); + ScopedTimeDistributionUpdater timer(&iteration_stats_.total); // Trigger a refactorization if one of the class we use request it. // @@ -3488,8 +3486,7 @@ Status RevisedSimplex::PrimalPush(TimeLimit* time_limit) { AdvanceDeterministicTime(time_limit); if (time_limit->LimitReached()) break; - IF_STATS_ENABLED( - ScopedTimeDistributionUpdater timer(&iteration_stats_.total)); + ScopedTimeDistributionUpdater timer(&iteration_stats_.total); GLOP_RETURN_IF_ERROR(RefactorizeBasisIfNeeded(&refactorize)); if (basis_factorization_.IsRefactorized()) { CorrectErrorsOnVariableValues(); diff --git a/ortools/glop/update_row.h b/ortools/glop/update_row.h index a8ef34844a..8f9822a1d8 100644 --- a/ortools/glop/update_row.h +++ b/ortools/glop/update_row.h @@ -145,7 +145,6 @@ class UpdateRow { DenseRow coefficient_; // Boolean used to avoid recomputing many times the same thing. - bool compute_update_row_; RowIndex left_inverse_computed_for_ = kInvalidRow; RowIndex update_row_computed_for_ = kInvalidRow; diff --git a/ortools/graph/eulerian_path_test.cc b/ortools/graph/eulerian_path_test.cc index 1da9607324..233d3a9357 100644 --- a/ortools/graph/eulerian_path_test.cc +++ b/ortools/graph/eulerian_path_test.cc @@ -15,11 +15,10 @@ #include +#include "absl/base/macros.h" #include "benchmark/benchmark.h" #include "gtest/gtest.h" #include "ortools/base/gmock.h" -#include "ortools/base/logging.h" -#include "ortools/base/macros.h" #include "ortools/graph/graph.h" namespace operations_research { diff --git a/ortools/graph/generic_max_flow_test.cc b/ortools/graph/generic_max_flow_test.cc index 2b57a9357a..fd9f307886 100644 --- a/ortools/graph/generic_max_flow_test.cc +++ b/ortools/graph/generic_max_flow_test.cc @@ -32,7 +32,6 @@ #include "gtest/gtest.h" #include "ortools/base/gmock.h" #include "ortools/base/logging.h" -#include "ortools/graph/ebert_graph.h" #include "ortools/graph/flow_graph.h" #include "ortools/graph/graph.h" #include "ortools/linear_solver/linear_solver.h" @@ -43,6 +42,8 @@ namespace { using ::testing::ContainerEq; using ::testing::WhenSorted; +using FlowQuantity = int64_t; + template typename GenericMaxFlow::Status MaxFlowTester( const typename Graph::NodeIndex num_nodes, diff --git a/ortools/graph/graph.h b/ortools/graph/graph.h index bc71f4f920..dc25dfc933 100644 --- a/ortools/graph/graph.h +++ b/ortools/graph/graph.h @@ -18,6 +18,9 @@ // time. Its design is based on the experience acquired by the Operations // Research team in their various graph algorithm implementations. // +// Also see README.md#basegraph for a more graphical documentation of the +// concepts presented here. +// // The main ideas are: // - Graph nodes and arcs are represented by integers. // - Node or arc annotations (weight, cost, ...) are not part of the graph @@ -162,18 +165,15 @@ #include #include #include -#include #include #include -#include "absl/base/port.h" +#include "absl/base/attributes.h" #include "absl/debugging/leak_check.h" #include "absl/log/check.h" #include "absl/types/span.h" #include "ortools/base/constant_divisor.h" #include "ortools/base/logging.h" -#include "ortools/base/macros.h" -#include "ortools/base/types.h" #include "ortools/graph/iterators.h" namespace util { @@ -366,7 +366,8 @@ class ListGraph : public BaseGraph { BeginEndWrapper OutgoingArcs(NodeIndexType node) const; // Advanced usage. Same as OutgoingArcs(), but allows to restart the iteration - // from an already known outgoing arc of the given node. + // from an already known outgoing arc of the given node. If `from` is + // `kNilArc`, an empty range is returned. BeginEndWrapper OutgoingArcsStartingFrom( NodeIndexType node, ArcIndexType from) const; @@ -434,9 +435,16 @@ class StaticGraph : public BaseGraph { NodeIndexType Head(ArcIndexType arc) const; NodeIndexType Tail(ArcIndexType arc) const; ArcIndexType OutDegree(NodeIndexType node) const; // Work in O(1). - BeginEndWrapper OutgoingArcs(NodeIndexType node) const; - BeginEndWrapper OutgoingArcsStartingFrom( - NodeIndexType node, ArcIndexType from) const; + IntegerRange OutgoingArcs(NodeIndexType node) const { + return IntegerRange(start_[node], DirectArcLimit(node)); + } + IntegerRange OutgoingArcsStartingFrom(NodeIndexType node, + ArcIndexType from) const { + DCHECK_GE(from, start_[node]); + const ArcIndexType limit = DirectArcLimit(node); + return IntegerRange(from == Base::kNilArc ? limit : from, + limit); + } // This loops over the heads of the OutgoingArcs(node). It is just a more // convenient way to achieve this. Moreover this interface is used by some @@ -455,12 +463,14 @@ class StaticGraph : public BaseGraph { ArcIndexType DirectArcLimit(NodeIndexType node) const { DCHECK(is_built_); DCHECK(Base::IsNodeValid(node)); - return node + 1 < num_nodes_ ? start_[node + 1] : num_arcs_; + return start_[node + 1]; } bool is_built_; bool arc_in_order_; NodeIndexType last_tail_seen_; + // First outgoing arc for each node. If `num_nodes_ > 0`, the "past-the-end" + // value is a sentinel (`start_[num_nodes_] == num_arcs_`). std::vector start_; std::vector head_; std::vector tail_; @@ -599,14 +609,21 @@ class ReverseArcStaticGraph ArcIndexType OutDegree(NodeIndexType node) const; ArcIndexType InDegree(NodeIndexType node) const; - BeginEndWrapper OutgoingArcs(NodeIndexType node) const; + IntegerRange OutgoingArcs(NodeIndexType node) const { + return IntegerRange(start_[node], DirectArcLimit(node)); + } + IntegerRange OutgoingArcsStartingFrom(NodeIndexType node, + ArcIndexType from) const { + DCHECK_GE(from, start_[node]); + const ArcIndexType limit = DirectArcLimit(node); + return IntegerRange(from == Base::kNilArc ? limit : from, + limit); + } BeginEndWrapper IncomingArcs(NodeIndexType node) const; BeginEndWrapper OutgoingOrOppositeIncomingArcs(NodeIndexType node) const; BeginEndWrapper OppositeIncomingArcs( NodeIndexType node) const; - BeginEndWrapper OutgoingArcsStartingFrom( - NodeIndexType node, ArcIndexType from) const; BeginEndWrapper IncomingArcsStartingFrom( NodeIndexType node, ArcIndexType from) const; BeginEndWrapper @@ -636,16 +653,20 @@ class ReverseArcStaticGraph ArcIndexType DirectArcLimit(NodeIndexType node) const { DCHECK(is_built_); DCHECK(Base::IsNodeValid(node)); - return node + 1 < num_nodes_ ? start_[node + 1] : num_arcs_; + return start_[node + 1]; } ArcIndexType ReverseArcLimit(NodeIndexType node) const { DCHECK(is_built_); DCHECK(Base::IsNodeValid(node)); - return node + 1 < num_nodes_ ? reverse_start_[node + 1] : 0; + return reverse_start_[node + 1]; } bool is_built_; + // First outgoing arc for each node. If `num_nodes_ > 0`, the "past-the-end" + // value is a sentinel (`start_[num_nodes_] == num_arcs_`). std::vector start_; + // First reverse outgoing arc for each node. If `num_nodes_ > 0`, + // the "past-the-end" value is a sentinel (`reverse_start_[num_nodes_] == 0`). std::vector reverse_start_; SVector head_; SVector opposite_; @@ -948,13 +969,15 @@ template void BaseGraph:: ComputeCumulativeSum(std::vector* v) { + DCHECK_EQ(v->size(), num_nodes_ + 1); ArcIndexType sum = 0; - for (int i = 0; i < num_nodes_; ++i) { + for (NodeIndexType i = 0; i < num_nodes_; ++i) { ArcIndexType temp = (*v)[i]; (*v)[i] = sum; sum += temp; } DCHECK(sum == num_arcs_); + (*v)[num_nodes_] = sum; // Sentinel. } // Given the tail of arc #i in (*head)[i] and the head of arc #i in (*head)[~i] @@ -971,10 +994,10 @@ void BaseGraph:: // Computes the outgoing degree of each nodes and check if we need to permute // something or not. Note that the tails are currently stored in the positive // range of the SVector head. - start->assign(num_nodes_, 0); + start->assign(num_nodes_ + 1, 0); int last_tail_seen = 0; bool permutation_needed = false; - for (int i = 0; i < num_arcs_; ++i) { + for (ArcIndexType i = 0; i < num_arcs_; ++i) { NodeIndexType tail = (*head)[i]; if (!permutation_needed) { permutation_needed = tail < last_tail_seen; @@ -987,7 +1010,7 @@ void BaseGraph:: // Abort early if we do not need the permutation: we only need to put the // heads in the positive range. if (!permutation_needed) { - for (int i = 0; i < num_arcs_; ++i) { + for (ArcIndexType i = 0; i < num_arcs_; ++i) { (*head)[i] = (*head)[~i]; } if (permutation != nullptr) { @@ -999,19 +1022,20 @@ void BaseGraph:: // Computes the forward arc permutation. // Note that this temporarily alters the start vector. std::vector perm(num_arcs_); - for (int i = 0; i < num_arcs_; ++i) { + for (ArcIndexType i = 0; i < num_arcs_; ++i) { perm[i] = (*start)[(*head)[i]]++; } // Restore in (*start)[i] the index of the first arc with tail >= i. - for (int i = num_nodes_ - 1; i > 0; --i) { + DCHECK_GE(num_nodes_, 1); + for (NodeIndexType i = num_nodes_ - 1; i > 0; --i) { (*start)[i] = (*start)[i - 1]; } (*start)[0] = 0; // Permutes the head into their final position in head. // We do not need the tails anymore at this point. - for (int i = 0; i < num_arcs_; ++i) { + for (ArcIndexType i = 0; i < num_arcs_; ++i) { (*head)[perm[i]] = (*head)[~i]; } if (permutation != nullptr) { @@ -1222,8 +1246,6 @@ StaticGraph::FromArcs(NodeIndexType num_nodes, return g; } -DEFINE_RANGE_BASED_ARC_ITERATION(StaticGraph, Outgoing); - template absl::Span StaticGraph::operator[](NodeIndexType node) const { @@ -1242,7 +1264,7 @@ void StaticGraph::ReserveNodes( NodeIndexType bound) { Base::ReserveNodes(bound); if (bound <= num_nodes_) return; - start_.reserve(bound); + start_.reserve(bound + 1); } template @@ -1258,7 +1280,7 @@ void StaticGraph::AddNode(NodeIndexType node) { if (node < num_nodes_) return; DCHECK(!const_capacities_ || node < node_capacity_) << node; num_nodes_ = node + 1; - start_.resize(num_nodes_, 0); + start_.resize(num_nodes_ + 1, 0); } template @@ -1317,6 +1339,9 @@ void StaticGraph::Build( node_capacity_ = num_nodes_; arc_capacity_ = num_arcs_; this->FreezeCapacities(); + if (num_nodes_ == 0) { + return; + } // If Arc are in order, start_ already contains the degree distribution. if (arc_in_order_) { @@ -1329,8 +1354,8 @@ void StaticGraph::Build( // Computes outgoing degree of each nodes. We have to clear start_, since // at least the first arc was processed with arc_in_order_ == true. - start_.assign(num_nodes_, 0); - for (int i = 0; i < num_arcs_; ++i) { + start_.assign(num_nodes_ + 1, 0); + for (ArcIndexType i = 0; i < num_arcs_; ++i) { start_[tail_[i]]++; } this->ComputeCumulativeSum(&start_); @@ -1338,14 +1363,14 @@ void StaticGraph::Build( // Computes the forward arc permutation. // Note that this temporarily alters the start_ vector. std::vector perm(num_arcs_); - for (int i = 0; i < num_arcs_; ++i) { + for (ArcIndexType i = 0; i < num_arcs_; ++i) { perm[i] = start_[tail_[i]]++; } // We use "tail_" (which now contains rubbish) to permute "head_" faster. CHECK_EQ(tail_.size(), static_cast(num_arcs_)); tail_.swap(head_); - for (int i = 0; i < num_arcs_; ++i) { + for (ArcIndexType i = 0; i < num_arcs_; ++i) { head_[perm[i]] = tail_[i]; } @@ -1354,7 +1379,8 @@ void StaticGraph::Build( } // Restore in start_[i] the index of the first arc with tail >= i. - for (int i = num_nodes_ - 1; i > 0; --i) { + DCHECK_GE(num_nodes_, 1); + for (ArcIndexType i = num_nodes_ - 1; i > 0; --i) { start_[i] = start_[i - 1]; } start_[0] = 0; @@ -1367,6 +1393,7 @@ void StaticGraph::Build( } } +// TODO(b/385094969): Remove this class. template class StaticGraph::OutgoingArcIterator { public: @@ -1388,15 +1415,6 @@ class StaticGraph::OutgoingArcIterator { index_++; } - // Note(user): we lose a bit by returning a BeginEndWrapper<> on top of - // this iterator rather than a simple IntegerRange<> on the arc indices. - // On my computer: around 420M arcs/sec instead of 440M arcs/sec. - // - // However, it is slightly more consistent to do it this way, and we don't - // have two different codes depending on the way a client iterates on the - // arcs. - DEFINE_STL_ITERATOR_FUNCTIONS(OutgoingArcIterator); - private: ArcIndexType index_; ArcIndexType limit_; @@ -1659,7 +1677,6 @@ class ReverseArcListGraph::OutgoingHeadIterator { // ReverseArcStaticGraph implementation ---------------------------------------- -DEFINE_RANGE_BASED_ARC_ITERATION(ReverseArcStaticGraph, Outgoing); DEFINE_RANGE_BASED_ARC_ITERATION(ReverseArcStaticGraph, Incoming); DEFINE_RANGE_BASED_ARC_ITERATION(ReverseArcStaticGraph, OutgoingOrOppositeIncoming); @@ -1747,11 +1764,14 @@ void ReverseArcStaticGraph::Build( node_capacity_ = num_nodes_; arc_capacity_ = num_arcs_; this->FreezeCapacities(); + if (num_nodes_ == 0) { + return; + } this->BuildStartAndForwardHead(&head_, &start_, permutation); // Computes incoming degree of each nodes. - reverse_start_.assign(num_nodes_, 0); - for (int i = 0; i < num_arcs_; ++i) { + reverse_start_.assign(num_nodes_ + 1, 0); + for (ArcIndexType i = 0; i < num_arcs_; ++i) { reverse_start_[head_[i]]++; } this->ComputeCumulativeSum(&reverse_start_); @@ -1759,13 +1779,15 @@ void ReverseArcStaticGraph::Build( // Computes the reverse arcs of the forward arcs. // Note that this sort the reverse arcs with the same tail by head. opposite_.reserve(num_arcs_); - for (int i = 0; i < num_arcs_; ++i) { + for (ArcIndexType i = 0; i < num_arcs_; ++i) { // TODO(user): the 0 is wasted here, but minor optimisation. opposite_.grow(0, reverse_start_[head_[i]]++ - num_arcs_); } // Computes in reverse_start_ the start index of the reverse arcs. - for (int i = num_nodes_ - 1; i > 0; --i) { + DCHECK_GE(num_nodes_, 1); + reverse_start_[num_nodes_] = 0; // Sentinel. + for (NodeIndexType i = num_nodes_ - 1; i > 0; --i) { reverse_start_[i] = reverse_start_[i - 1] - num_arcs_; } if (num_nodes_ != 0) { @@ -1773,7 +1795,7 @@ void ReverseArcStaticGraph::Build( } // Fill reverse arc information. - for (int i = 0; i < num_arcs_; ++i) { + for (ArcIndexType i = 0; i < num_arcs_; ++i) { opposite_[opposite_[i]] = i; } for (const NodeIndexType node : Base::AllNodes()) { @@ -1783,6 +1805,7 @@ void ReverseArcStaticGraph::Build( } } +// TODO(b/385094969): Remove this class. template class ReverseArcStaticGraph::OutgoingArcIterator { public: @@ -1802,10 +1825,6 @@ class ReverseArcStaticGraph::OutgoingArcIterator { index_++; } - // TODO(user): we lose a bit by returning a BeginEndWrapper<> on top of this - // iterator rather than a simple IntegerRange on the arc indices. - DEFINE_STL_ITERATOR_FUNCTIONS(OutgoingArcIterator); - private: ArcIndexType index_; const ArcIndexType limit_; diff --git a/ortools/graph/hamiltonian_path.h b/ortools/graph/hamiltonian_path.h index 6a59c8cfec..dc4b754da9 100644 --- a/ortools/graph/hamiltonian_path.h +++ b/ortools/graph/hamiltonian_path.h @@ -21,7 +21,7 @@ // // The Shortest Hamiltonian Path Problem (SHPP) is similar to the Traveling // Salesperson Problem (TSP). -// You have to visit all the cities, starting from a given one and you +// You have to visit all the cities, starting from a given kOne and you // do not need to return to your starting point. With the TSP, you can start // anywhere, but you have to return to your start location. // @@ -46,7 +46,7 @@ // f(S, j) = min (i in S \ {j}, f(S \ {j}, i) + cost(i, j)) // (j is an element of S) // Note that this formulation, from the original Held-Karp paper is a bit -// different, but equivalent to the one used in Caseau and Laburthe, Solving +// different, but equivalent to the kOne used in Caseau and Laburthe, Solving // Small TSPs with Constraints, 1997, ICLP // f(S, j) = min (i in S, f(S \ {i}, i) + cost(i, j)) // (j is not an element of S) @@ -88,8 +88,8 @@ #include #include +#include "absl/log/check.h" #include "absl/types/span.h" -#include "ortools/base/logging.h" #include "ortools/util/bitset.h" #include "ortools/util/saturated_arithmetic.h" #include "ortools/util/vector_or_function.h" @@ -129,8 +129,8 @@ class Set { typedef Integer IntegerType; // Useful constants. - static constexpr Integer One = static_cast(1); - static constexpr Integer Zero = static_cast(0); + static constexpr Integer kOne = static_cast(1); + static constexpr Integer kZero = static_cast(0); static const int MaxCardinality = 8 * sizeof(Integer); // NOLINT // Construct a set from an Integer. @@ -143,22 +143,22 @@ class Set { Integer value() const { return value_; } static Set FullSet(Integer card) { - return card == 0 ? Set(0) : Set(~Zero >> (MaxCardinality - card)); + return card == 0 ? Set(0) : Set(~kZero >> (MaxCardinality - card)); } // Returns the singleton set with 'n' as its only element. - static Set Singleton(Integer n) { return Set(One << n); } + static Set Singleton(Integer n) { return Set(kOne << n); } // Returns a set equal to the calling object, with element n added. // If n is already in the set, no operation occurs. - Set AddElement(int n) const { return Set(value_ | (One << n)); } + Set AddElement(int n) const { return Set(value_ | (kOne << n)); } // Returns a set equal to the calling object, with element n removed. // If n is not in the set, no operation occurs. - Set RemoveElement(int n) const { return Set(value_ & ~(One << n)); } + Set RemoveElement(int n) const { return Set(value_ & ~(kOne << n)); } // Returns true if the calling set contains element n. - bool Contains(int n) const { return ((One << n) & value_) != 0; } + bool Contains(int n) const { return ((kOne << n) & value_) != 0; } // Returns true if 'other' is included in the calling set. bool Includes(Set other) const { @@ -178,7 +178,7 @@ class Set { Set RemoveSmallestElement() const { return Set(value_ & (value_ - 1)); } // Returns the rank of an element in a set. For the set 11100, ElementRank(4) - // would return 2. (Ranks start at zero). + // would return 2. (Ranks start at kZero). int ElementRank(int n) const { DCHECK(Contains(n)) << "n = " << n << ", value_ = " << value_; return SingletonRank(Singleton(n)); @@ -418,7 +418,7 @@ uint64_t LatticeMemoryManager::BaseOffset(int card, DCHECK_EQ(card, node_rank); // Note(user): It is possible to get rid of base_offset_[card] by using a 2-D // array. It would also make it possible to free all the memory but the layer - // being constructed and the preceding one, if another lattice of paths is + // being constructed and the preceding kOne, if another lattice of paths is // constructed. // TODO(user): Evaluate the interest of the above. // There are 'card' f(set, j) to store. That is why we need to multiply @@ -465,7 +465,7 @@ class HamiltonianPathSolver { // stored public: // In 2010, 26 was the maximum solvable with 24 Gigs of RAM, and it took - // several minutes. With this 2014 version of the code, one may go a little + // several minutes. With this 2014 version of the code, kOne may go a little // higher, but considering the complexity of the algorithm (n*2^n), and that // there are very good ways to solve TSP with more than 32 cities, // we limit ourselves to 32 cites. @@ -710,7 +710,7 @@ void HamiltonianPathSolver::Solve() { hamiltonian_costs_.resize(num_nodes_); // Compute the cost of the Hamiltonian paths starting from node 0, going // through all the other nodes, and ending at end_node. Compute the minimum - // one along the way. + // kOne along the way. CostType min_hamiltonian_cost = std::numeric_limits::max(); const NodeSet hamiltonian_set = full_set.RemoveElement(0); for (int end_node : hamiltonian_set) { @@ -744,7 +744,7 @@ std::vector HamiltonianPathSolver::ComputePath( const CostType partial_cost = mem_.Value(subset, src); const CostType incumbent_cost = Saturated::Add(partial_cost, Cost(src, dest)); - // Take precision into account when CosttType is float or double. + // Take precision into account when CostType is float or double. // There is no visible penalty in the case CostType is an integer type. if (std::abs(Saturated::Sub(current_cost, incumbent_cost)) <= std::numeric_limits::epsilon() * current_cost) { @@ -885,7 +885,7 @@ class PruningHamiltonianSolver { // guaranteed to be smaller than or equal to the cost of Hamiltonian path, // because Hamiltonian path is a spanning tree itself. - // TODO(user): Use generic map-based cache instead of lattice-based one. + // TODO(user): Use generic map-based cache instead of lattice-based kOne. // TODO(user): Use SaturatedArithmetic for better precision. public: diff --git a/ortools/graph/iterators.h b/ortools/graph/iterators.h index ea99f9f5e0..90b549e9d4 100644 --- a/ortools/graph/iterators.h +++ b/ortools/graph/iterators.h @@ -227,28 +227,6 @@ class IntegerRange : public BeginEndWrapper> { } }; -// Allow iterating over a vector as a mutable vector. -template -struct MutableVectorIteration { - explicit MutableVectorIteration(std::vector* v) : v_(v) {} - struct Iterator { - explicit Iterator(typename std::vector::iterator it) : it_(it) {} - T* operator*() { return &*it_; } - Iterator& operator++() { - it_++; - return *this; - } - bool operator!=(const Iterator& other) const { return other.it_ != it_; } - - private: - typename std::vector::iterator it_; - }; - Iterator begin() { return Iterator(v_->begin()); } - Iterator end() { return Iterator(v_->end()); } - - private: - std::vector* const v_; -}; } // namespace util #endif // UTIL_GRAPH_ITERATORS_H_ diff --git a/ortools/graph/line_graph_test.cc b/ortools/graph/line_graph_test.cc index c86700b0b1..5ffeb913ef 100644 --- a/ortools/graph/line_graph_test.cc +++ b/ortools/graph/line_graph_test.cc @@ -90,10 +90,7 @@ TYPED_TEST(LineGraphTest, LineGraph) { const typename TypeParam::NodeIndex expected_tail = kExpectedLineArcs[i][0]; const typename TypeParam::NodeIndex expected_head = kExpectedLineArcs[i][1]; bool found = false; - for (typename TypeParam::OutgoingArcIterator out_iterator(line_graph, - expected_tail); - out_iterator.Ok(); out_iterator.Next()) { - const typename TypeParam::ArcIndex arc = out_iterator.Index(); + for (const auto arc : line_graph.OutgoingArcs(expected_tail)) { if (line_graph.Head(arc) == expected_head) { found = true; break; diff --git a/ortools/graph/linear_assignment.h b/ortools/graph/linear_assignment.h index 4b53dda3b4..188a46ddab 100644 --- a/ortools/graph/linear_assignment.h +++ b/ortools/graph/linear_assignment.h @@ -207,8 +207,8 @@ #include "absl/flags/declare.h" #include "absl/flags/flag.h" #include "absl/strings/str_format.h" +#include "ortools/base/base_export.h" #include "ortools/base/logging.h" -#include "ortools/graph/ebert_graph.h" #include "ortools/graph/iterators.h" #include "ortools/util/permutation.h" #include "ortools/util/zvector.h" @@ -359,29 +359,29 @@ class LinearSumAssignment { private: struct Stats { - Stats() : pushes_(0), double_pushes_(0), relabelings_(0), refinements_(0) {} + Stats() : pushes(0), double_pushes(0), relabelings(0), refinements(0) {} void Clear() { - pushes_ = 0; - double_pushes_ = 0; - relabelings_ = 0; - refinements_ = 0; + pushes = 0; + double_pushes = 0; + relabelings = 0; + refinements = 0; } void Add(const Stats& that) { - pushes_ += that.pushes_; - double_pushes_ += that.double_pushes_; - relabelings_ += that.relabelings_; - refinements_ += that.refinements_; + pushes += that.pushes; + double_pushes += that.double_pushes; + relabelings += that.relabelings; + refinements += that.refinements; } std::string StatsString() const { return absl::StrFormat( "%d refinements; %d relabelings; " "%d double pushes; %d pushes", - refinements_, relabelings_, double_pushes_, pushes_); + refinements, relabelings, double_pushes, pushes); } - int64_t pushes_; - int64_t double_pushes_; - int64_t relabelings_; - int64_t refinements_; + int64_t pushes; + int64_t double_pushes; + int64_t relabelings; + int64_t refinements; }; #ifndef SWIG @@ -1163,17 +1163,17 @@ bool LinearSumAssignment::DoublePush(NodeIndex source) { matched_arc_[to_unmatch] = GraphType::kNilArc; active_nodes_->Add(to_unmatch); // This counts as a double push. - iteration_stats_.double_pushes_ += 1; + iteration_stats_.double_pushes += 1; } else { // We are about to increase the cardinality of the matching. total_excess_ -= 1; // This counts as a single push. - iteration_stats_.pushes_ += 1; + iteration_stats_.pushes += 1; } matched_arc_[source] = best_arc; matched_node_[new_mate] = source; // Finally, relabel new_mate. - iteration_stats_.relabelings_ += 1; + iteration_stats_.relabelings += 1; const CostValue new_price = price_[new_mate] - gap - epsilon_; price_[new_mate] = new_price; return new_price >= price_lower_bound_; @@ -1195,14 +1195,14 @@ bool LinearSumAssignment::Refine() { // we know we're returning a wrong answer so we we leave a // message in the logs to increase our hope of chasing down the // problem. - LOG_IF(DFATAL, total_stats_.refinements_ > 0) + LOG_IF(DFATAL, total_stats_.refinements > 0) << "Infeasibility detection triggered after first iteration found " << "a feasible assignment!"; return false; } } DCHECK(active_nodes_->Empty()); - iteration_stats_.refinements_ += 1; + iteration_stats_.refinements += 1; return true; } @@ -1227,8 +1227,10 @@ LinearSumAssignment::BestArcAndGap( DCHECK(IsActive(left_node)) << "Node " << left_node << " must be active (unmatched)!"; DCHECK_GT(epsilon_, 0); - typename GraphType::OutgoingArcIterator arc_it(*graph_, left_node); - ArcIndex best_arc = arc_it.Index(); + const auto arcs = graph_->OutgoingArcs(left_node); + auto arc_it = arcs.begin(); + DCHECK(!arcs.empty()); + ArcIndex best_arc = *arc_it; CostValue min_partial_reduced_cost = PartialReducedCost(best_arc); // We choose second_min_partial_reduced_cost so that in the case of // the largest possible gap (which results from a left-side node @@ -1238,8 +1240,8 @@ LinearSumAssignment::BestArcAndGap( const CostValue max_gap = slack_relabeling_price_ - epsilon_; CostValue second_min_partial_reduced_cost = min_partial_reduced_cost + max_gap; - for (arc_it.Next(); arc_it.Ok(); arc_it.Next()) { - const ArcIndex arc = arc_it.Index(); + for (++arc_it; arc_it != arcs.end(); ++arc_it) { + const ArcIndex arc = *arc_it; const CostValue partial_reduced_cost = PartialReducedCost(arc); if (partial_reduced_cost < second_min_partial_reduced_cost) { if (partial_reduced_cost < min_partial_reduced_cost) { @@ -1266,26 +1268,27 @@ inline CostValue LinearSumAssignment::ImplicitPrice( NodeIndex left_node) const { DCHECK_GT(num_left_nodes_, left_node); DCHECK_GT(epsilon_, 0); - typename GraphType::OutgoingArcIterator arc_it(*graph_, left_node); + const auto arcs = graph_->OutgoingArcs(left_node); // We must not execute this method if left_node has no incident arc. - DCHECK(arc_it.Ok()); - ArcIndex best_arc = arc_it.Index(); + DCHECK(!arcs.empty()); + auto arc_it = arcs.begin(); + ArcIndex best_arc = *arc_it; if (best_arc == matched_arc_[left_node]) { - arc_it.Next(); - if (arc_it.Ok()) { - best_arc = arc_it.Index(); + ++arc_it; + if (arc_it != arcs.end()) { + best_arc = *arc_it; } } CostValue min_partial_reduced_cost = PartialReducedCost(best_arc); - if (!arc_it.Ok()) { + if (arc_it == arcs.end()) { // Only one arc is incident to left_node, and the node is // currently matched along that arc, which must be the case in any // feasible solution. Therefore we implicitly price this node so // low that we will never consider unmatching it. return -(min_partial_reduced_cost + slack_relabeling_price_); } - for (arc_it.Next(); arc_it.Ok(); arc_it.Next()) { - const ArcIndex arc = arc_it.Index(); + for (++arc_it; arc_it != arcs.end(); ++arc_it) { + const ArcIndex arc = *arc_it; if (arc != matched_arc_[left_node]) { const CostValue partial_reduced_cost = PartialReducedCost(arc); if (partial_reduced_cost < min_partial_reduced_cost) { @@ -1314,9 +1317,7 @@ bool LinearSumAssignment::EpsilonOptimal() const { // Get the implicit price of left_node and make sure the reduced // costs of left_node's incident arcs are in bounds. CostValue left_node_price = ImplicitPrice(left_node); - for (typename GraphType::OutgoingArcIterator arc_it(*graph_, left_node); - arc_it.Ok(); arc_it.Next()) { - const ArcIndex arc = arc_it.Index(); + for (const ArcIndex arc : graph_->OutgoingArcs(left_node)) { const CostValue reduced_cost = left_node_price + PartialReducedCost(arc); // Note the asymmetric definition of epsilon-optimality that we // use because it means we can saturate all admissible arcs in @@ -1354,8 +1355,7 @@ bool LinearSumAssignment::FinalizeSetup() { // precondition. for (NodeIndex node = 0; node < num_left_nodes_; ++node) { matched_arc_[node] = GraphType::kNilArc; - typename GraphType::OutgoingArcIterator arc_it(*graph_, node); - if (!arc_it.Ok()) { + if (graph_->OutgoingArcs(node).empty()) { incidence_precondition_satisfied_ = false; } } diff --git a/ortools/graph/linear_assignment_test.cc b/ortools/graph/linear_assignment_test.cc index 20814bf791..cbb750256a 100644 --- a/ortools/graph/linear_assignment_test.cc +++ b/ortools/graph/linear_assignment_test.cc @@ -397,6 +397,7 @@ TEST(LinearSumAssignmentFriendTest, EpsilonOptimal) { // without bogging down the normal set of fastbuild tests people need to run. #if LARGE TEST(LinearSumAssignmentPrecisionTest, PrecisionWarning) { + using NodeIndex = typename util::ListGraph<>::NodeIndex; const NodeIndex kNumLeftNodes = 10000000; util::ListGraph<> g(2 * kNumLeftNodes, 2 * kNumLeftNodes); LinearSumAssignment> a(g, kNumLeftNodes); diff --git a/ortools/graph/min_cost_flow.cc b/ortools/graph/min_cost_flow.cc index c956e0f287..2d9fc4088c 100644 --- a/ortools/graph/min_cost_flow.cc +++ b/ortools/graph/min_cost_flow.cc @@ -188,8 +188,7 @@ bool GenericMinCostFlowOutgoingArcs(node)) { residual_arc_capacity_[arc] = std::min(residual_arc_capacity_[arc], upper_bound); min_node_excess[node] = @@ -204,8 +203,7 @@ bool GenericMinCostFlowIncomingArcs(node)) { residual_arc_capacity_[arc] = std::min(residual_arc_capacity_[arc], upper_bound); max_node_excess[node] = diff --git a/ortools/graph/min_cost_flow.h b/ortools/graph/min_cost_flow.h index 2d13d1e6a4..e70c00e483 100644 --- a/ortools/graph/min_cost_flow.h +++ b/ortools/graph/min_cost_flow.h @@ -408,8 +408,6 @@ class GenericMinCostFlow : public MinCostFlowBase { typedef typename Graph::ArcIndex ArcIndex; typedef int64_t CostValue; typedef int64_t FlowQuantity; - typedef typename Graph::IncomingArcIterator IncomingArcIterator; - typedef typename Graph::OutgoingArcIterator OutgoingArcIterator; typedef typename Graph::OutgoingOrOppositeIncomingArcIterator OutgoingOrOppositeIncomingArcIterator; typedef ZVector ArcIndexArray; diff --git a/ortools/graph/min_cost_flow_test.cc b/ortools/graph/min_cost_flow_test.cc index 98106f81a4..3440faabb8 100644 --- a/ortools/graph/min_cost_flow_test.cc +++ b/ortools/graph/min_cost_flow_test.cc @@ -646,8 +646,8 @@ bool CheckAssignmentFeasibility(const Graph& graph, absl::Span supply) { for (typename Graph::NodeIndex node = 0; node < graph.num_nodes(); ++node) { if (supply[node] != 0) { - typename Graph::OutgoingOrOppositeIncomingArcIterator it(graph, node); - EXPECT_TRUE(it.Ok()) << node << " has no incident arc"; + EXPECT_FALSE(graph.OutgoingOrOppositeIncomingArcs(node).empty()) + << node << " has no incident arc"; } } return true; diff --git a/ortools/graph/shortest_paths.h b/ortools/graph/shortest_paths.h index c117e99d16..68071e3a3b 100644 --- a/ortools/graph/shortest_paths.h +++ b/ortools/graph/shortest_paths.h @@ -216,8 +216,6 @@ void ComputeOneToAllShortestPaths( } // Computes shortest paths from the node `source` to nodes in `destinations`. -// TODO(b/385094969): Remove second template parameter when all clients are -// migrated. template void ComputeOneToManyShortestPaths( const GraphType& graph, const std::vector& arc_lengths, diff --git a/ortools/graph/shortest_paths_test.cc b/ortools/graph/shortest_paths_test.cc index 4490d9fd84..552f575292 100644 --- a/ortools/graph/shortest_paths_test.cc +++ b/ortools/graph/shortest_paths_test.cc @@ -67,9 +67,7 @@ void CheckPathDataRow(const GraphType& graph, const PathDistance expected_distances[], typename GraphType::NodeIndex tail) { int index = tail * graph.num_nodes(); - for (typename GraphType::NodeIterator iterator(graph); iterator.Ok(); - iterator.Next()) { - const typename GraphType::NodeIndex head(iterator.Index()); + for (const typename GraphType::NodeIndex head : graph.AllNodes()) { CheckPathDataPair(container, distance_container, expected_distances[index], expected_paths[index], tail, head); ++index; @@ -97,9 +95,7 @@ void CheckPathData(const GraphType& graph, const GenericPathContainer& distance_container, const typename GraphType::NodeIndex expected_paths[], const PathDistance expected_distances[]) { - for (typename GraphType::NodeIterator iterator(graph); iterator.Ok(); - iterator.Next()) { - const typename GraphType::NodeIndex tail(iterator.Index()); + for (const typename GraphType::NodeIndex tail : graph.AllNodes()) { CheckPathDataRow(graph, container, distance_container, expected_paths, expected_distances, tail); } diff --git a/ortools/gscip/BUILD.bazel b/ortools/gscip/BUILD.bazel index 78dab27f01..284517e6eb 100644 --- a/ortools/gscip/BUILD.bazel +++ b/ortools/gscip/BUILD.bazel @@ -13,8 +13,8 @@ load("@com_google_protobuf//bazel:cc_proto_library.bzl", "cc_proto_library") load("@com_google_protobuf//bazel:proto_library.bzl", "proto_library") +load("@com_google_protobuf//bazel:py_proto_library.bzl", "py_proto_library") load("@rules_cc//cc:defs.bzl", "cc_library") -load("@rules_python//python:proto.bzl", "py_proto_library") package( default_visibility = ["//visibility:public"], diff --git a/ortools/gscip/gscip.cc b/ortools/gscip/gscip.cc index 1bebb64b93..bba9b3fc8a 100644 --- a/ortools/gscip/gscip.cc +++ b/ortools/gscip/gscip.cc @@ -322,15 +322,15 @@ absl::Status GScip::SetParams(const GScipParameters& params, } for (const auto& bool_param : params.bool_params()) { RETURN_IF_SCIP_ERROR( - (SCIPsetBoolParam(scip_, bool_param.first.c_str(), bool_param.second))); + SCIPsetBoolParam(scip_, bool_param.first.c_str(), bool_param.second)); } for (const auto& int_param : params.int_params()) { RETURN_IF_SCIP_ERROR( - (SCIPsetIntParam(scip_, int_param.first.c_str(), int_param.second))); + SCIPsetIntParam(scip_, int_param.first.c_str(), int_param.second)); } for (const auto& long_param : params.long_params()) { - RETURN_IF_SCIP_ERROR((SCIPsetLongintParam(scip_, long_param.first.c_str(), - long_param.second))); + RETURN_IF_SCIP_ERROR(SCIPsetLongintParam(scip_, long_param.first.c_str(), + long_param.second)); } for (const auto& char_param : params.char_params()) { if (char_param.second.size() != 1) { @@ -339,16 +339,16 @@ absl::Status GScip::SetParams(const GScipParameters& params, "but parameter: ", char_param.first, " was: ", char_param.second)); } - RETURN_IF_SCIP_ERROR((SCIPsetCharParam(scip_, char_param.first.c_str(), - char_param.second[0]))); + RETURN_IF_SCIP_ERROR(SCIPsetCharParam(scip_, char_param.first.c_str(), + char_param.second[0])); } for (const auto& string_param : params.string_params()) { - RETURN_IF_SCIP_ERROR((SCIPsetStringParam(scip_, string_param.first.c_str(), - string_param.second.c_str()))); + RETURN_IF_SCIP_ERROR(SCIPsetStringParam(scip_, string_param.first.c_str(), + string_param.second.c_str())); } for (const auto& real_param : params.real_params()) { RETURN_IF_SCIP_ERROR( - (SCIPsetRealParam(scip_, real_param.first.c_str(), real_param.second))); + SCIPsetRealParam(scip_, real_param.first.c_str(), real_param.second)); } if (!legacy_params.empty()) { RETURN_IF_ERROR( diff --git a/ortools/gurobi/environment.cc b/ortools/gurobi/environment.cc index 28aef1cfe8..db38c0af73 100644 --- a/ortools/gurobi/environment.cc +++ b/ortools/gurobi/environment.cc @@ -46,9 +46,6 @@ bool GurobiIsCorrectlyInstalled() { // See the comment at the top of the script. // This is the 'define' section. -std::function - GRBisqp = nullptr; std::function GRBisattravailable = nullptr; std::function @@ -239,7 +236,6 @@ void LoadGurobiFunctions(DynamicLibrary* gurobi_dynamic_library) { // See the comment at the top of the script. // This is the 'assign' section. - gurobi_dynamic_library->GetFunction(&GRBisqp, "GRBisqp"); gurobi_dynamic_library->GetFunction(&GRBisattravailable, "GRBisattravailable"); gurobi_dynamic_library->GetFunction(&GRBgetintattr, "GRBgetintattr"); diff --git a/ortools/gurobi/environment.h b/ortools/gurobi/environment.h index 0de7107c06..3e723f3567 100644 --- a/ortools/gurobi/environment.h +++ b/ortools/gurobi/environment.h @@ -119,7 +119,6 @@ absl::Status LoadGurobiDynamicLibrary(std::vector potential_paths); #define GRB_MAX_CONCURRENT 64 #define CB_ARGS GRBmodel *model, void *cbdata, int where, void *usrdata #define LOGCB_ARGS char *msg, void *logdata -extern std::function GRBisqp; extern std::function GRBisattravailable; extern std::function GRBgetintattr; extern std::function GRBsetintattr; diff --git a/ortools/java/com/google/ortools/sat/CpModel.java b/ortools/java/com/google/ortools/sat/CpModel.java index b7025f82d0..a44b96fa35 100644 --- a/ortools/java/com/google/ortools/sat/CpModel.java +++ b/ortools/java/com/google/ortools/sat/CpModel.java @@ -341,9 +341,9 @@ public final class CpModel { /** * Adds {@code AllDifferent(expressions)}. * - *

This constraint forces all affine expressions to have different values. + *

This constraint forces all expressions to have different values. * - * @param expressions a list of affine integer expressions + * @param expressions a list of 1-var affine integer expressions * @return an instance of the Constraint class */ public Constraint addAllDifferent(LinearArgument[] expressions) { @@ -450,11 +450,11 @@ public final class CpModel { /** * Adds {@code AllowedAssignments(expressions)}. * - *

An AllowedAssignments constraint is a constraint on an array of affine expressions that - * forces, when all expressions are fixed to a single value, that the corresponding list of values - * is equal to one of the tuples of the tupleList. + *

An AllowedAssignments constraint is a constraint on an array of 1-var affine expressions + * that forces, when all expressions are fixed to a single value, that the corresponding list of + * values is equal to one of the tuples of the tupleList. * - * @param expressions a list of affine expressions (a * var + b) + * @param expressions a list of 1-var affine expressions (a * var + b) * @return an instance of the TableConstraint class without any tuples. Tuples can be added * directly to the table constraint. */ @@ -480,10 +480,10 @@ public final class CpModel { /** * Adds {@code ForbiddenAssignments(expressions)}. * - *

A ForbiddenAssignments constraint is a constraint on an array of affine expressions where - * the list of impossible combinations is provided in the tuples list. + *

A ForbiddenAssignments constraint is a constraint on an array of 1-var affine expressions + * where the list of impossible combinations is provided in the tuples list. * - * @param expressions a list of affine expressions (a * var + b) + * @param expressions a list of 1-var affine expressions (a * var + b) * @return an instance of the TableConstraint class without any tuples. Tuples can be added * directly to the table constraint. */ @@ -509,11 +509,12 @@ public final class CpModel { /** * Adds an automaton constraint. * - *

An automaton constraint takes a list of affine expressions (of size n), an initial state, a - * set of final states, and a set of transitions that will be added incrementally directly on the - * returned AutomatonConstraint instance. A transition is a triplet ('tail', 'transition', - * 'head'), where 'tail' and 'head' are states, and 'transition' is the label of an arc from - * 'head' to 'tail', corresponding to the value of one expression in the list of expressions. + *

An automaton constraint takes a list of 1-var affine expressions (of size n), an initial + * state, a set of final states, and a set of transitions that will be added incrementally + * directly on the returned AutomatonConstraint instance. A transition is a triplet ('tail', + * 'transition', 'head'), where 'tail' and 'head' are states, and 'transition' is the label of an + * arc from 'head' to 'tail', corresponding to the value of one expression in the list of + * expressions. * *

This automaton will be unrolled into a flow with n + 1 phases. Each phase contains the * possible states of the automaton. The first state contains the initial state. The last phase @@ -529,8 +530,8 @@ public final class CpModel { * from the initial state in phase 0, there is a path labeled by the values of the expressions * that ends in one of the final states in the final phase. * - * @param transitionExpressions a non empty list of affine expressions (a * var + b) whose values - * correspond to the labels of the arcs traversed by the automaton + * @param transitionExpressions a non empty list of 1-var affine expressions (a * var + b) whose + * values correspond to the labels of the arcs traversed by the automaton * @param startingState the initial state of the automaton * @param finalStates a non empty list of admissible final states * @return an instance of the Constraint class @@ -757,16 +758,16 @@ public final class CpModel { // Scheduling support. /** - * Creates an interval variable from three affine expressions start, size, and end. + * Creates an interval variable from three 1-var affine expressions start, size, and end. * *

An interval variable is a constraint, that is itself used in other constraints like * NoOverlap. * *

Internally, it ensures that {@code start + size == end}. * - * @param start the start of the interval. It needs to be an affine or constant expression. - * @param size the size of the interval. It needs to be an affine or constant expression. - * @param end the end of the interval. It needs to be an affine or constant expression. + * @param start the start of the interval. It needs to be an 1-var affine or constant expression. + * @param size the size of the interval. It needs to be an 1-var affine or constant expression. + * @param end the end of the interval. It needs to be an 1-var affine or constant expression. * @param name the name of the interval variable * @return An IntervalVar object */ @@ -779,12 +780,12 @@ public final class CpModel { } /** - * Creates an interval variable from an affine expression start, and a fixed size. + * Creates an interval variable from an 1-var affine expression start, and a fixed size. * *

An interval variable is a constraint, that is itself used in other constraints like * NoOverlap. * - * @param start the start of the interval. It needs to be an affine or constant expression. + * @param start the start of the interval. It needs to be an 1-var affine or constant expression. * @param size the fixed size of the interval. * @param name the name of the interval variable. * @return An IntervalVar object @@ -806,7 +807,7 @@ public final class CpModel { } /** - * Creates an optional interval variable from three affine expressions start, size, end, and + * Creates an optional interval variable from three 1-var affine expressions start, size, end, and * isPresent. * *

An optional interval variable is a constraint, that is itself used in other constraints like @@ -815,9 +816,9 @@ public final class CpModel { * *

Internally, it ensures that {@code isPresent => start + size == end}. * - * @param start the start of the interval. It needs to be an affine or constant expression. - * @param size the size of the interval. It needs to be an affine or constant expression. - * @param end the end of the interval. It needs to be an affine or constant expression. + * @param start the start of the interval. It needs to be an 1-var affine or constant expression. + * @param size the size of the interval. It needs to be an 1-var affine or constant expression. + * @param end the end of the interval. It needs to be an 1-var affine or constant expression. * @param isPresent a literal that indicates if the interval is active or not. A inactive interval * is simply ignored by all constraints. * @param name The name of the interval variable @@ -833,12 +834,12 @@ public final class CpModel { } /** - * Creates an optional interval variable from an affine expression start, and a fixed size. + * Creates an optional interval variable from an 1-var affine expression start, and a fixed size. * *

An interval variable is a constraint, that is itself used in other constraints like * NoOverlap. * - * @param start the start of the interval. It needs to be an affine or constant expression. + * @param start the start of the interval. It needs to be an 1-var affine or constant expression. * @param size the fixed size of the interval. * @param isPresent a literal that indicates if the interval is active or not. A inactive interval * is simply ignored by all constraints. @@ -913,8 +914,8 @@ public final class CpModel { *

{@code forall t: sum(demands[i] if (start(intervals[t]) <= t < end(intervals[t])) and (t is * present)) <= capacity}. * - * @param capacity the maximum capacity of the cumulative constraint. It must be a positive affine - * expression. + * @param capacity the maximum capacity of the cumulative constraint. It must be a positive 1-var + * affine expression. * @return an instance of the CumulativeConstraint class. this class allows adding (interval, * demand) pairs incrementally. */ diff --git a/ortools/julia/ORTools.jl/src/c_wrapper/c_wrapper.jl b/ortools/julia/ORTools.jl/src/c_wrapper/c_wrapper.jl index 4d1c11260f..9cac6a0ac6 100644 --- a/ortools/julia/ORTools.jl/src/c_wrapper/c_wrapper.jl +++ b/ortools/julia/ORTools.jl/src/c_wrapper/c_wrapper.jl @@ -1,10 +1,14 @@ libortools = ORTools_jll.libortools +# Keep this file in sync with math_opt/core/c_api/solver.h. + +# There is no need to explicitly have a `mutable struct MathOptInterrupter` +# on the Julia side, as it's only an opaque pointer for this API. + function MathOptNewInterrupter() return ccall((:MathOptNewInterrupter, libortools), Ptr{Cvoid}, - (Cvoid,), - ptr) + ()) end function MathOptFreeInterrupter(ptr) @@ -53,4 +57,3 @@ function MathOptSolve(model, model_size, solver_type, interrupter, solve_result, solve_result_size, status_msg) end - diff --git a/ortools/linear_solver/BUILD.bazel b/ortools/linear_solver/BUILD.bazel index 59946919e8..d4af6f1bf7 100644 --- a/ortools/linear_solver/BUILD.bazel +++ b/ortools/linear_solver/BUILD.bazel @@ -15,8 +15,8 @@ load("@bazel_skylib//rules:common_settings.bzl", "bool_flag") load("@bazel_skylib//rules:copy_file.bzl", "copy_file") load("@com_google_protobuf//bazel:cc_proto_library.bzl", "cc_proto_library") load("@com_google_protobuf//bazel:proto_library.bzl", "proto_library") +load("@com_google_protobuf//bazel:py_proto_library.bzl", "py_proto_library") load("@rules_cc//cc:defs.bzl", "cc_library") -load("@rules_python//python:proto.bzl", "py_proto_library") package(default_visibility = ["//visibility:public"]) diff --git a/ortools/linear_solver/highs_interface.cc b/ortools/linear_solver/highs_interface.cc index 92e0dea821..3dc5a8e499 100644 --- a/ortools/linear_solver/highs_interface.cc +++ b/ortools/linear_solver/highs_interface.cc @@ -15,7 +15,6 @@ #include #include -#include #include #include #include @@ -25,14 +24,11 @@ #include "absl/status/status.h" #include "absl/status/statusor.h" #include "absl/strings/str_cat.h" -#include "absl/types/optional.h" -#include "google/protobuf/text_format.h" #include "ortools/base/logging.h" #include "ortools/linear_solver/linear_solver.h" #include "ortools/linear_solver/linear_solver.pb.h" #include "ortools/linear_solver/proto_solver/highs_proto_solver.h" #include "ortools/linear_solver/proto_solver/proto_utils.h" -#include "ortools/port/proto_utils.h" #include "ortools/util/lazy_mutable_copy.h" namespace operations_research { diff --git a/ortools/linear_solver/linear_solver.cc b/ortools/linear_solver/linear_solver.cc index 74e4aca027..7058e72038 100644 --- a/ortools/linear_solver/linear_solver.cc +++ b/ortools/linear_solver/linear_solver.cc @@ -35,6 +35,7 @@ #include "absl/container/flat_hash_set.h" #include "absl/flags/flag.h" #include "absl/log/check.h" +#include "absl/log/log.h" #include "absl/status/status.h" #include "absl/status/statusor.h" #include "absl/strings/ascii.h" @@ -49,7 +50,6 @@ #include "absl/time/time.h" #include "google/protobuf/text_format.h" #include "ortools/base/accurate_sum.h" -#include "ortools/base/logging.h" #include "ortools/base/map_util.h" #include "ortools/base/stl_util.h" #include "ortools/base/threadpool.h" diff --git a/ortools/linear_solver/linear_solver.h b/ortools/linear_solver/linear_solver.h index 884067c3ba..e3962f5c01 100644 --- a/ortools/linear_solver/linear_solver.h +++ b/ortools/linear_solver/linear_solver.h @@ -158,6 +158,7 @@ #include "absl/time/clock.h" #include "absl/time/time.h" #include "absl/types/optional.h" +#include "ortools/base/base_export.h" #include "ortools/base/logging.h" #include "ortools/linear_solver/linear_expr.h" #include "ortools/linear_solver/linear_solver.pb.h" diff --git a/ortools/linear_solver/proto_solver/scip_proto_solver.cc b/ortools/linear_solver/proto_solver/scip_proto_solver.cc index c5c71fc0b6..3829b73204 100644 --- a/ortools/linear_solver/proto_solver/scip_proto_solver.cc +++ b/ortools/linear_solver/proto_solver/scip_proto_solver.cc @@ -320,7 +320,16 @@ absl::Status AddAbsConstraint(const MPGeneralConstraintProto& gen_cst, std::vector vars; std::vector vals; + // Constraints created by add_abs_constraint. std::vector cons; + // Make sure the constraints don't leak when we exit this scope. + absl::Cleanup cons_cleanup = [&]() { + for (SCIP_CONS* c : cons) { + const absl::Status status = SCIP_TO_STATUS(SCIPreleaseCons(scip, &c)); + LOG_IF(ERROR, !status.ok()) << status; + } + cons.clear(); + }; auto add_abs_constraint = [&](absl::string_view name_prefix) -> absl::Status { SCIP_CONS* scip_cons = nullptr; CHECK(vars.size() == vals.size()); diff --git a/ortools/linear_solver/wrappers/model_builder_helper.cc b/ortools/linear_solver/wrappers/model_builder_helper.cc index 41431acbf8..592a48c1a4 100644 --- a/ortools/linear_solver/wrappers/model_builder_helper.cc +++ b/ortools/linear_solver/wrappers/model_builder_helper.cc @@ -24,11 +24,11 @@ #include #include "absl/log/check.h" +#include "absl/log/log.h" #include "absl/strings/match.h" #include "absl/strings/str_cat.h" #include "absl/strings/str_join.h" #include "ortools/base/helpers.h" -#include "ortools/base/logging.h" #include "ortools/base/options.h" #include "ortools/gurobi/environment.h" #include "ortools/linear_solver/linear_solver.h" diff --git a/ortools/linear_solver/xpress_interface_test.cc b/ortools/linear_solver/xpress_interface_test.cc index 18c39c66eb..36e020cb07 100644 --- a/ortools/linear_solver/xpress_interface_test.cc +++ b/ortools/linear_solver/xpress_interface_test.cc @@ -11,9 +11,13 @@ // See the License for the specific language governing permissions and // limitations under the License. +#include +#include #include #include -#include +#include +#include +#include #include "gtest/gtest.h" #include "ortools/base/init_google.h" @@ -1336,7 +1340,7 @@ TEST_F(XpressFixtureMIP, SetHint) { // back using the API // In this test we send the (near) optimal solution as a hint (with // obj=56774). Usually XPRESS finds it in ~3000 seconds but in this case it - // should be able to retain it in juste a few seconds using the hint. Note + // should be able to retain it in just a few seconds using the hint. Note // that the logs should mention "User solution (USER_HINT) stored." buildLargeMipWithCallback(solver, 60, 2); @@ -1414,7 +1418,7 @@ TEST_F(XpressFixtureMIP, CallbackThrowsException) { } // namespace operations_research int main(int argc, char** argv) { - absl::SetFlag(&FLAGS_stderrthreshold, 0); + absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); testing::InitGoogleTest(&argc, argv); auto solver = operations_research::MPSolver::CreateSolver("XPRESS_LP"); if (solver == nullptr) { diff --git a/ortools/math_opt/BUILD.bazel b/ortools/math_opt/BUILD.bazel index f74d398cbb..5ee4a2102d 100644 --- a/ortools/math_opt/BUILD.bazel +++ b/ortools/math_opt/BUILD.bazel @@ -13,7 +13,7 @@ load("@com_google_protobuf//bazel:cc_proto_library.bzl", "cc_proto_library") load("@com_google_protobuf//bazel:proto_library.bzl", "proto_library") -load("@rules_python//python:proto.bzl", "py_proto_library") +load("@com_google_protobuf//bazel:py_proto_library.bzl", "py_proto_library") package(default_visibility = ["//visibility:public"]) diff --git a/ortools/math_opt/solvers/BUILD.bazel b/ortools/math_opt/solvers/BUILD.bazel index 5c1af9879a..547b352a02 100644 --- a/ortools/math_opt/solvers/BUILD.bazel +++ b/ortools/math_opt/solvers/BUILD.bazel @@ -13,7 +13,7 @@ load("@com_google_protobuf//bazel:cc_proto_library.bzl", "cc_proto_library") load("@com_google_protobuf//bazel:proto_library.bzl", "proto_library") -load("@rules_python//python:proto.bzl", "py_proto_library") +load("@com_google_protobuf//bazel:py_proto_library.bzl", "py_proto_library") package(default_visibility = ["//ortools/math_opt:__subpackages__"]) diff --git a/ortools/pdlp/BUILD.bazel b/ortools/pdlp/BUILD.bazel index 28166d60d9..59ac2414cc 100644 --- a/ortools/pdlp/BUILD.bazel +++ b/ortools/pdlp/BUILD.bazel @@ -13,8 +13,8 @@ load("@com_google_protobuf//bazel:cc_proto_library.bzl", "cc_proto_library") load("@com_google_protobuf//bazel:proto_library.bzl", "proto_library") +load("@com_google_protobuf//bazel:py_proto_library.bzl", "py_proto_library") load("@rules_cc//cc:defs.bzl", "cc_library") -load("@rules_python//python:proto.bzl", "py_proto_library") package(default_visibility = ["//visibility:public"]) @@ -26,6 +26,7 @@ cc_library( ":solvers_cc_proto", "//ortools/base:threadpool", "@com_google_absl//absl/functional:any_invocable", + "@com_google_absl//absl/log", "@eigen", ], ) @@ -321,6 +322,7 @@ cc_test( ":gtest_main", ":scheduler", ":sharder", + ":solvers_cc_proto", "//ortools/base", "//ortools/base:mathutil", "@com_google_absl//absl/random:distributions", diff --git a/ortools/pdlp/scheduler.h b/ortools/pdlp/scheduler.h index dd86dd7d2d..bd908f2e93 100644 --- a/ortools/pdlp/scheduler.h +++ b/ortools/pdlp/scheduler.h @@ -19,8 +19,12 @@ #define EIGEN_USE_CUSTOM_THREAD_POOL #endif +#include + +#include #include #include +#include #include "absl/functional/any_invocable.h" #include "absl/log/log.h" diff --git a/ortools/pdlp/sharder_test.cc b/ortools/pdlp/sharder_test.cc index 2db1f34cfa..4bce8725d7 100644 --- a/ortools/pdlp/sharder_test.cc +++ b/ortools/pdlp/sharder_test.cc @@ -16,8 +16,10 @@ #include #include #include +#include #include #include +#include #include #include "Eigen/Core" @@ -28,6 +30,7 @@ #include "ortools/base/gmock.h" #include "ortools/base/mathutil.h" #include "ortools/pdlp/scheduler.h" +#include "ortools/pdlp/solvers.pb.h" namespace operations_research::pdlp { namespace { @@ -426,35 +429,42 @@ TEST(ScaledColL2Norm, SmallExample) { EXPECT_THAT(answer, ElementsAre(std::sqrt(54), 1.0, 6.0, std::sqrt(41))); } -class VariousSizesTest : public testing::TestWithParam {}; +class VariousSizesAndSchedulerTest + : public testing::TestWithParam< + std::tuple> {}; -TEST_P(VariousSizesTest, LargeMatVec) { - const int64_t size = GetParam(); +TEST_P(VariousSizesAndSchedulerTest, LargeMatVec) { + const auto [size, scheduler_type] = GetParam(); Eigen::SparseMatrix mat = LargeSparseMatrix(size); const int num_threads = 5; const int shards_per_thread = 3; - GoogleThreadPoolScheduler scheduler(num_threads); - Sharder sharder(mat, shards_per_thread * num_threads, &scheduler); + std::unique_ptr scheduler = + MakeScheduler(scheduler_type, num_threads); + Sharder sharder(mat, shards_per_thread * num_threads, scheduler.get()); VectorXd rhs = VectorXd::Random(size); VectorXd direct = mat.transpose() * rhs; VectorXd threaded = TransposedMatrixVectorProduct(mat, rhs, sharder); EXPECT_LE((direct - threaded).norm(), 1.0e-8); } -TEST_P(VariousSizesTest, LargeVectors) { - const int64_t size = GetParam(); +TEST_P(VariousSizesAndSchedulerTest, LargeVectors) { + const auto [size, scheduler_type] = GetParam(); const int num_threads = 5; - GoogleThreadPoolScheduler scheduler(num_threads); - Sharder sharder(size, num_threads, &scheduler); + std::unique_ptr scheduler = + MakeScheduler(scheduler_type, num_threads); + Sharder sharder(size, num_threads, scheduler.get()); VectorXd vec = VectorXd::Random(size); const double direct = vec.squaredNorm(); const double threaded = SquaredNorm(vec, sharder); EXPECT_THAT(threaded, DoubleNear(direct, size * 1.0e-14)); } -INSTANTIATE_TEST_SUITE_P(VariousSizesTestInstantiation, VariousSizesTest, - testing::Values(10, 1000, 100 * 1000)); +INSTANTIATE_TEST_SUITE_P( + VariousSizesAndSchedulerTestInstantiation, VariousSizesAndSchedulerTest, + testing::Combine(testing::Values(10, 1000, 100 * 1000), + testing::Values(SCHEDULER_TYPE_GOOGLE_THREADPOOL, + SCHEDULER_TYPE_EIGEN_THREADPOOL))); } // namespace } // namespace operations_research::pdlp diff --git a/ortools/sat/2d_mandatory_overlap_propagator.cc b/ortools/sat/2d_mandatory_overlap_propagator.cc index c89eae4d64..4ff0e56cec 100644 --- a/ortools/sat/2d_mandatory_overlap_propagator.cc +++ b/ortools/sat/2d_mandatory_overlap_propagator.cc @@ -19,8 +19,8 @@ #include #include +#include "absl/log/log.h" #include "absl/types/span.h" -#include "ortools/base/logging.h" #include "ortools/sat/diffn_util.h" #include "ortools/sat/integer.h" #include "ortools/sat/model.h" diff --git a/ortools/sat/2d_orthogonal_packing.cc b/ortools/sat/2d_orthogonal_packing.cc index adbaeec63b..716108bc3b 100644 --- a/ortools/sat/2d_orthogonal_packing.cc +++ b/ortools/sat/2d_orthogonal_packing.cc @@ -23,11 +23,11 @@ #include #include "absl/log/check.h" +#include "absl/log/log.h" #include "absl/numeric/bits.h" #include "absl/random/distributions.h" #include "absl/types/span.h" #include "ortools/base/constant_divisor.h" -#include "ortools/base/logging.h" #include "ortools/sat/2d_packing_brute_force.h" #include "ortools/sat/integer_base.h" #include "ortools/sat/util.h" diff --git a/ortools/sat/2d_packing_brute_force.cc b/ortools/sat/2d_packing_brute_force.cc index 124238ab2b..d4f2ae5037 100644 --- a/ortools/sat/2d_packing_brute_force.cc +++ b/ortools/sat/2d_packing_brute_force.cc @@ -20,9 +20,9 @@ #include #include "absl/log/check.h" +#include "absl/log/log.h" #include "absl/strings/str_cat.h" #include "absl/types/span.h" -#include "ortools/base/logging.h" #include "ortools/sat/diffn_util.h" #include "ortools/sat/integer_base.h" #include "ortools/sat/util.h" diff --git a/ortools/sat/2d_rectangle_presolve.cc b/ortools/sat/2d_rectangle_presolve.cc index b755bcd58c..edad837bbc 100644 --- a/ortools/sat/2d_rectangle_presolve.cc +++ b/ortools/sat/2d_rectangle_presolve.cc @@ -28,9 +28,9 @@ #include "absl/container/flat_hash_map.h" #include "absl/container/flat_hash_set.h" #include "absl/log/check.h" +#include "absl/log/log.h" #include "absl/strings/str_cat.h" #include "absl/types/span.h" -#include "ortools/base/logging.h" #include "ortools/base/stl_util.h" #include "ortools/graph/minimum_vertex_cover.h" #include "ortools/graph/strongly_connected_components.h" diff --git a/ortools/sat/2d_rectangle_presolve_test.cc b/ortools/sat/2d_rectangle_presolve_test.cc index 54a08aa8fe..f02a3338ce 100644 --- a/ortools/sat/2d_rectangle_presolve_test.cc +++ b/ortools/sat/2d_rectangle_presolve_test.cc @@ -27,13 +27,13 @@ #include "absl/container/flat_hash_map.h" #include "absl/container/flat_hash_set.h" #include "absl/log/check.h" +#include "absl/log/log.h" #include "absl/random/bit_gen_ref.h" #include "absl/random/random.h" #include "absl/strings/str_split.h" #include "absl/types/span.h" #include "gtest/gtest.h" #include "ortools/base/gmock.h" -#include "ortools/base/logging.h" #include "ortools/sat/2d_orthogonal_packing_testing.h" #include "ortools/sat/diffn_util.h" #include "ortools/sat/integer_base.h" diff --git a/ortools/sat/2d_try_edge_propagator.cc b/ortools/sat/2d_try_edge_propagator.cc index f625a011c8..3e1484d9ea 100644 --- a/ortools/sat/2d_try_edge_propagator.cc +++ b/ortools/sat/2d_try_edge_propagator.cc @@ -21,7 +21,7 @@ #include "absl/algorithm/container.h" #include "absl/log/check.h" -#include "ortools/base/logging.h" +#include "absl/log/log.h" #include "ortools/base/stl_util.h" #include "ortools/sat/diffn_util.h" #include "ortools/sat/integer.h" diff --git a/ortools/sat/BUILD.bazel b/ortools/sat/BUILD.bazel index f32be59fc4..351ade114e 100644 --- a/ortools/sat/BUILD.bazel +++ b/ortools/sat/BUILD.bazel @@ -16,9 +16,9 @@ load("@com_google_protobuf//bazel:cc_proto_library.bzl", "cc_proto_library") load("@com_google_protobuf//bazel:java_proto_library.bzl", "java_proto_library") load("@com_google_protobuf//bazel:proto_library.bzl", "proto_library") +load("@com_google_protobuf//bazel:py_proto_library.bzl", "py_proto_library") load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library") load("@rules_cc//cc:defs.bzl", "cc_library") -load("@rules_python//python:proto.bzl", "py_proto_library") package(default_visibility = ["//visibility:public"]) @@ -287,9 +287,11 @@ cc_library( "//ortools/base:mathutil", "//ortools/base:stl_util", "//ortools/graph:strongly_connected_components", + "//ortools/util:bitset", "//ortools/util:dense_set", "//ortools/util:saturated_arithmetic", "//ortools/util:sorted_interval_list", + "//ortools/util:time_limit", "@com_google_absl//absl/algorithm:container", "@com_google_absl//absl/container:flat_hash_map", "@com_google_absl//absl/container:flat_hash_set", @@ -312,6 +314,7 @@ cc_test( "//ortools/base:gmock_main", "//ortools/base:parse_test_proto", "//ortools/util:sorted_interval_list", + "//ortools/util:time_limit", "@com_google_absl//absl/types:span", ], ) @@ -337,6 +340,7 @@ cc_library( "//ortools/algorithms:binary_search", "//ortools/util:sorted_interval_list", "//ortools/util:strong_integers", + "//ortools/util:time_limit", "@com_google_absl//absl/container:flat_hash_map", "@com_google_absl//absl/functional:any_invocable", "@com_google_absl//absl/functional:bind_front", @@ -621,6 +625,7 @@ cc_library( ":presolve_context", ":probing", ":rins", + ":routing_cuts", ":sat_base", ":sat_inprocessing", ":sat_parameters_cc_proto", @@ -730,6 +735,7 @@ cc_library( "//ortools/util:saturated_arithmetic", "//ortools/util:sorted_interval_list", "//ortools/util:strong_integers", + "//ortools/util:time_limit", "@com_google_absl//absl/base:core_headers", "@com_google_absl//absl/container:btree", "@com_google_absl//absl/container:flat_hash_map", @@ -1888,6 +1894,7 @@ cc_test( ":sat_solver", "//ortools/base:gmock_main", "//ortools/util:sorted_interval_list", + "@com_google_absl//absl/container:flat_hash_map", "@com_google_absl//absl/types:span", ], ) @@ -2335,6 +2342,7 @@ cc_library( "@com_google_absl//absl/status", "@com_google_absl//absl/strings", "@com_google_absl//absl/strings:str_format", + "@com_google_absl//absl/types:span", ], ) @@ -2654,6 +2662,7 @@ cc_library( hdrs = ["routing_cuts.h"], deps = [ ":cp_model_cc_proto", + ":cp_model_utils", ":cuts", ":integer", ":integer_base", @@ -2662,18 +2671,23 @@ cc_library( ":model", ":precedences", ":sat_base", + ":synchronization", ":util", "//ortools/base", "//ortools/base:mathutil", + "//ortools/base:stl_util", "//ortools/base:strong_vector", "//ortools/graph", + "//ortools/graph:connected_components", "//ortools/graph:max_flow", "//ortools/util:strong_integers", "@com_google_absl//absl/algorithm:container", "@com_google_absl//absl/cleanup", "@com_google_absl//absl/container:flat_hash_map", "@com_google_absl//absl/container:flat_hash_set", + "@com_google_absl//absl/log", "@com_google_absl//absl/log:check", + "@com_google_absl//absl/numeric:bits", "@com_google_absl//absl/random:distributions", "@com_google_absl//absl/strings", "@com_google_absl//absl/types:span", @@ -2684,6 +2698,7 @@ cc_test( name = "routing_cuts_test", srcs = ["routing_cuts_test.cc"], deps = [ + ":cp_model", ":cuts", ":integer", ":integer_base", @@ -2694,6 +2709,7 @@ cc_test( ":routing_cuts", ":sat_base", "//ortools/base:gmock_main", + "//ortools/base:parse_test_proto", "//ortools/base:strong_vector", "//ortools/graph:max_flow", "//ortools/util:strong_integers", @@ -2713,7 +2729,6 @@ cc_library( ":cuts", ":integer", ":integer_base", - ":intervals", ":linear_constraint", ":linear_constraint_manager", ":model", @@ -2729,6 +2744,7 @@ cc_library( "@com_google_absl//absl/base:core_headers", "@com_google_absl//absl/container:btree", "@com_google_absl//absl/container:flat_hash_map", + "@com_google_absl//absl/log", "@com_google_absl//absl/log:check", "@com_google_absl//absl/strings", "@com_google_absl//absl/types:span", @@ -3253,10 +3269,10 @@ cc_library( ":no_overlap_2d_helper", ":synchronization", ":util", - "//ortools/algorithms:set_cover_heuristics", - "//ortools/algorithms:set_cover_invariant", - "//ortools/algorithms:set_cover_model", "//ortools/base:stl_util", + "//ortools/set_cover:set_cover_heuristics", + "//ortools/set_cover:set_cover_invariant", + "//ortools/set_cover:set_cover_model", "//ortools/util:bitset", "@com_google_absl//absl/algorithm:container", "@com_google_absl//absl/log", @@ -3787,6 +3803,7 @@ cc_test( "//ortools/base:gmock_main", "//ortools/base:parse_test_proto", "//ortools/util:logging", + "//ortools/util:time_limit", "@com_google_absl//absl/log:check", "@com_google_absl//absl/strings", ], diff --git a/ortools/sat/boolean_problem.cc b/ortools/sat/boolean_problem.cc index a013548583..e9b37bc923 100644 --- a/ortools/sat/boolean_problem.cc +++ b/ortools/sat/boolean_problem.cc @@ -30,6 +30,7 @@ #include "absl/status/status.h" #include "absl/strings/str_format.h" #include "absl/strings/string_view.h" +#include "absl/types/span.h" #include "ortools/base/logging.h" #include "ortools/graph/graph.h" #if !defined(__PORTABLE_PLATFORM__) @@ -497,7 +498,7 @@ void StoreAssignment(const VariablesAssignment& assignment, } void ExtractSubproblem(const LinearBooleanProblem& problem, - const std::vector& constraint_indices, + absl::Span constraint_indices, LinearBooleanProblem* subproblem) { *subproblem = problem; subproblem->set_name("Subproblem of " + problem.name()); diff --git a/ortools/sat/boolean_problem.h b/ortools/sat/boolean_problem.h index 287dcd5ef3..2bb2ca6684 100644 --- a/ortools/sat/boolean_problem.h +++ b/ortools/sat/boolean_problem.h @@ -19,6 +19,7 @@ #include #include "absl/status/status.h" +#include "absl/types/span.h" #include "ortools/algorithms/sparse_permutation.h" #include "ortools/base/strong_vector.h" #include "ortools/sat/boolean_problem.pb.h" @@ -106,7 +107,7 @@ void StoreAssignment(const VariablesAssignment& assignment, // Constructs a sub-problem formed by the constraints with given indices. void ExtractSubproblem(const LinearBooleanProblem& problem, - const std::vector& constraint_indices, + absl::Span constraint_indices, LinearBooleanProblem* subproblem); // Modifies the given LinearBooleanProblem so that all the literals appearing diff --git a/ortools/sat/clause.cc b/ortools/sat/clause.cc index 6397a34ed3..71817afe01 100644 --- a/ortools/sat/clause.cc +++ b/ortools/sat/clause.cc @@ -28,6 +28,7 @@ #include "absl/container/flat_hash_set.h" #include "absl/container/inlined_vector.h" #include "absl/log/check.h" +#include "absl/log/log.h" #include "absl/random/bit_gen_ref.h" #include "absl/random/distributions.h" #include "absl/types/span.h" diff --git a/ortools/sat/constraint_violation.cc b/ortools/sat/constraint_violation.cc index 945e2d4f65..561173715c 100644 --- a/ortools/sat/constraint_violation.cc +++ b/ortools/sat/constraint_violation.cc @@ -26,8 +26,8 @@ #include "absl/container/flat_hash_map.h" #include "absl/container/flat_hash_set.h" #include "absl/log/check.h" +#include "absl/log/log.h" #include "absl/types/span.h" -#include "ortools/base/logging.h" #include "ortools/base/mathutil.h" #include "ortools/base/stl_util.h" #include "ortools/graph/strongly_connected_components.h" @@ -37,6 +37,7 @@ #include "ortools/util/dense_set.h" #include "ortools/util/saturated_arithmetic.h" #include "ortools/util/sorted_interval_list.h" +#include "ortools/util/time_limit.h" namespace operations_research { namespace sat { @@ -215,7 +216,7 @@ void LinearIncrementalEvaluator::ComputeInitialActivities( // Resets the activity as the offset and the number of false enforcement to 0. activities_ = offsets_; - in_last_affected_variables_.resize(columns_.size(), false); + last_affected_variables_.ClearAndResize(columns_.size()); num_false_enforcement_.assign(num_constraints_, 0); // Update these numbers for all columns. @@ -256,23 +257,10 @@ void LinearIncrementalEvaluator::ComputeInitialActivities( } void LinearIncrementalEvaluator::ClearAffectedVariables() { - if (10 * last_affected_variables_.size() < columns_.size()) { - // Sparse. - in_last_affected_variables_.resize(columns_.size(), false); - for (const int var : last_affected_variables_) { - in_last_affected_variables_[var] = false; - } - } else { - // Dense. - in_last_affected_variables_.assign(columns_.size(), false); - } - last_affected_variables_.clear(); - DCHECK(std::all_of(in_last_affected_variables_.begin(), - in_last_affected_variables_.end(), - [](bool b) { return !b; })); + last_affected_variables_.ClearAndResize(columns_.size()); } -// Tricky: Here we re-use in_last_affected_variables_ to resest +// Tricky: Here we reuse last_affected_variables_ to reset // var_to_score_change. And in particular we need to list all variable whose // score changed here. Not just the one for which we have a decrease. void LinearIncrementalEvaluator::UpdateScoreOnWeightUpdate( @@ -293,10 +281,9 @@ void LinearIncrementalEvaluator::UpdateScoreOnWeightUpdate( num_ops_ += end; for (int k = 0; k < end; ++k, ++i) { const int var = row_var_buffer_[i]; - if (!in_last_affected_variables_[var]) { + if (!last_affected_variables_[var]) { var_to_score_change[var] = enforcement_change; - in_last_affected_variables_[var] = true; - last_affected_variables_.push_back(var); + last_affected_variables_.Set(var); } else { var_to_score_change[var] += enforcement_change; } @@ -334,10 +321,9 @@ void LinearIncrementalEvaluator::UpdateScoreOnWeightUpdate( const int64_t coeff = row_coeffs[k]; const int64_t diff = violation(activity + coeff * jump_deltas[var]) - old_distance; - if (!in_last_affected_variables_[var]) { + if (!last_affected_variables_[var]) { var_to_score_change[var] = static_cast(diff); - in_last_affected_variables_[var] = true; - last_affected_variables_.push_back(var); + last_affected_variables_.Set(var); } else { var_to_score_change[var] += static_cast(diff); } @@ -361,10 +347,7 @@ void LinearIncrementalEvaluator::UpdateScoreOnNewlyEnforced( for (int k = 0; k < end; ++k, ++i) { const int var = row_var_buffer_[i]; jump_scores[var] -= weight_time_violation; - if (!in_last_affected_variables_[var]) { - in_last_affected_variables_[var] = true; - last_affected_variables_.push_back(var); - } + last_affected_variables_.Set(var); } } @@ -381,10 +364,7 @@ void LinearIncrementalEvaluator::UpdateScoreOnNewlyEnforced( domains_[c].Distance(activities_[c] + coeff * jump_deltas[var]); jump_scores[var] += weight * static_cast(new_distance - old_distance); - if (!in_last_affected_variables_[var]) { - in_last_affected_variables_[var] = true; - last_affected_variables_.push_back(var); - } + last_affected_variables_.Set(var); } } } @@ -422,10 +402,7 @@ void LinearIncrementalEvaluator::UpdateScoreOnNewlyUnenforced( domains_[c].Distance(activities_[c] + coeff * jump_deltas[var]); jump_scores[var] -= weight * static_cast(new_distance - old_distance); - if (!in_last_affected_variables_[var]) { - in_last_affected_variables_[var] = true; - last_affected_variables_.push_back(var); - } + last_affected_variables_.Set(var); } } } @@ -444,9 +421,8 @@ void LinearIncrementalEvaluator::UpdateScoreOfEnforcementIncrease( const int var = row_var_buffer_[i]; if (jump_deltas[var] == 1) { jump_scores[var] += score_change; - if (score_change < 0.0 && !in_last_affected_variables_[var]) { - in_last_affected_variables_[var] = true; - last_affected_variables_.push_back(var); + if (score_change < 0.0) { + last_affected_variables_.Set(var); } } } @@ -455,9 +431,8 @@ void LinearIncrementalEvaluator::UpdateScoreOfEnforcementIncrease( const int var = row_var_buffer_[i]; if (jump_deltas[var] == -1) { jump_scores[var] += score_change; - if (score_change < 0.0 && !in_last_affected_variables_[var]) { - in_last_affected_variables_[var] = true; - last_affected_variables_.push_back(var); + if (score_change < 0.0) { + last_affected_variables_.Set(var); } } } @@ -505,9 +480,8 @@ void LinearIncrementalEvaluator::UpdateScoreOnActivityChange( for (int k = 0; k < end; ++k, ++i) { const int var = row_var_buffer_[i]; jump_scores[var] += delta; - if (delta < 0.0 && !in_last_affected_variables_[var]) { - in_last_affected_variables_[var] = true; - last_affected_variables_.push_back(var); + if (delta < 0.0) { + last_affected_variables_.Set(var); } } } @@ -560,10 +534,7 @@ void LinearIncrementalEvaluator::UpdateScoreOnActivityChange( // we know that the score will always move in the same direction, so we // might skip the last_affected_variables_ update. jump_scores[var] += weight * static_cast(diff); - if (!in_last_affected_variables_[var]) { - in_last_affected_variables_[var] = true; - last_affected_variables_.push_back(var); - } + last_affected_variables_.Set(var); } } } @@ -948,7 +919,7 @@ void LinearIncrementalEvaluator::PrecomputeCompactView( cached_deltas_.assign(columns_.size(), 0); cached_scores_.assign(columns_.size(), 0); - last_affected_variables_.ClearAndReserve(columns_.size()); + last_affected_variables_.ClearAndResize(columns_.size()); } bool LinearIncrementalEvaluator::ViolationChangeIsConvex(int var) const { @@ -1489,8 +1460,8 @@ void AddCircuitFlowConstraints(LinearIncrementalEvaluator& linear_evaluator, // ----- LsEvaluator ----- LsEvaluator::LsEvaluator(const CpModelProto& cp_model, - const SatParameters& params) - : cp_model_(cp_model), params_(params) { + const SatParameters& params, TimeLimit* time_limit) + : cp_model_(cp_model), params_(params), time_limit_(time_limit) { var_to_constraints_.resize(cp_model_.variables_size()); jump_value_optimal_.resize(cp_model_.variables_size(), true); num_violated_constraint_per_var_ignoring_objective_.assign( @@ -1506,8 +1477,9 @@ LsEvaluator::LsEvaluator(const CpModelProto& cp_model, LsEvaluator::LsEvaluator( const CpModelProto& cp_model, const SatParameters& params, const std::vector& ignored_constraints, - const std::vector& additional_constraints) - : cp_model_(cp_model), params_(params) { + const std::vector& additional_constraints, + TimeLimit* time_limit) + : cp_model_(cp_model), params_(params), time_limit_(time_limit) { var_to_constraints_.resize(cp_model_.variables_size()); jump_value_optimal_.resize(cp_model_.variables_size(), true); num_violated_constraint_per_var_ignoring_objective_.assign( @@ -1827,9 +1799,17 @@ void LsEvaluator::CompileConstraintsAndObjective( } } + static constexpr int kTimeoutCheckInterval = 1000; + int next_timeout_check_counter = 0; for (int c = 0; c < cp_model_.constraints_size(); ++c) { if (ignored_constraints[c]) continue; CompileOneConstraint(cp_model_.constraints(c)); + if (next_timeout_check_counter++ == kTimeoutCheckInterval) { + if (time_limit_->LimitReached()) { + break; + } + next_timeout_check_counter = 0; + } } for (const ConstraintProto& ct : additional_constraints) { diff --git a/ortools/sat/constraint_violation.h b/ortools/sat/constraint_violation.h index 437b141a32..014ffab1fc 100644 --- a/ortools/sat/constraint_violation.h +++ b/ortools/sat/constraint_violation.h @@ -26,8 +26,10 @@ #include "ortools/sat/cp_model.pb.h" #include "ortools/sat/sat_parameters.pb.h" #include "ortools/sat/util.h" +#include "ortools/util/bitset.h" #include "ortools/util/dense_set.h" #include "ortools/util/sorted_interval_list.h" +#include "ortools/util/time_limit.h" namespace operations_research { namespace sat { @@ -80,7 +82,7 @@ class LinearIncrementalEvaluator { // independent of the set of positive constraint weight used. void ClearAffectedVariables(); absl::Span VariablesAffectedByLastUpdate() const { - return last_affected_variables_; + return last_affected_variables_.PositionsSetAtLeastOnce(); } // Query violation. @@ -234,8 +236,7 @@ class LinearIncrementalEvaluator { std::vector cached_deltas_; std::vector cached_scores_; - std::vector in_last_affected_variables_; - FixedCapacityVector last_affected_variables_; + SparseBitset last_affected_variables_; mutable size_t num_ops_ = 0; }; @@ -306,10 +307,12 @@ class CompiledConstraintWithProto : public CompiledConstraint { class LsEvaluator { public: // The cp_model must outlive this class. - LsEvaluator(const CpModelProto& cp_model, const SatParameters& params); + LsEvaluator(const CpModelProto& cp_model, const SatParameters& params, + TimeLimit* time_limit); LsEvaluator(const CpModelProto& cp_model, const SatParameters& params, const std::vector& ignored_constraints, - const std::vector& additional_constraints); + const std::vector& additional_constraints, + TimeLimit* time_limit); // Intersects the domain of the objective with [lb..ub]. // It returns true if a reduction of the domain took place. @@ -443,6 +446,7 @@ class LsEvaluator { std::vector> var_to_constraints_; std::vector> constraint_to_vars_; std::vector jump_value_optimal_; + TimeLimit* time_limit_; UnsafeDenseSet violated_constraints_; std::vector num_violated_constraint_per_var_ignoring_objective_; diff --git a/ortools/sat/constraint_violation_test.cc b/ortools/sat/constraint_violation_test.cc index 07fca3e79c..eb1199c043 100644 --- a/ortools/sat/constraint_violation_test.cc +++ b/ortools/sat/constraint_violation_test.cc @@ -25,6 +25,7 @@ #include "ortools/sat/cp_model.pb.h" #include "ortools/sat/cp_model_utils.h" #include "ortools/util/sorted_interval_list.h" +#include "ortools/util/time_limit.h" namespace operations_research { namespace sat { @@ -128,7 +129,8 @@ TEST(ConstraintViolationTest, BasicExactlyOneExampleNonViolated) { constraints { exactly_one { literals: [ 0, 1, 2, 3 ] } } )pb"); SatParameters params; - LsEvaluator ls(model, params); + TimeLimit time_limit; + LsEvaluator ls(model, params, &time_limit); ls.ComputeAllViolations({0, 0, 0, 1}); EXPECT_EQ(0, ls.SumOfViolations()); } @@ -142,7 +144,8 @@ TEST(ConstraintViolationTest, BasicExactlyOneExampleViolated) { constraints { exactly_one { literals: [ 0, 1, 2, 3 ] } } )pb"); SatParameters params; - LsEvaluator ls(model, params); + TimeLimit time_limit; + LsEvaluator ls(model, params, &time_limit); ls.ComputeAllViolations({0, 0, 1, 1}); EXPECT_EQ(1, ls.SumOfViolations()); EXPECT_THAT(ls.ViolatedConstraints(), ElementsAre(0)); @@ -161,7 +164,8 @@ TEST(ConstraintViolationTest, BasicBoolOrViolated) { constraints { bool_or { literals: [ 0, -2, 2, -4 ] } } )pb"); SatParameters params; - LsEvaluator ls(model, params); + TimeLimit time_limit; + LsEvaluator ls(model, params, &time_limit); ls.ComputeAllViolations({0, 1, 0, 1}); EXPECT_EQ(1, ls.SumOfViolations()); ls.ComputeAllViolations({0, 0, 0, 1}); @@ -183,7 +187,8 @@ TEST(ConstraintViolationTest, BasicLinearExample) { } )pb"); SatParameters params; - LsEvaluator ls(model, params); + TimeLimit time_limit; + LsEvaluator ls(model, params, &time_limit); ls.ComputeAllViolations({0, 0}); EXPECT_EQ(1, ls.SumOfViolations()); ls.ComputeAllViolations({2, 0}); @@ -206,7 +211,8 @@ TEST(ConstraintViolationTest, BasicObjectiveExampleWithChange) { } )pb"); SatParameters params; - LsEvaluator ls(model, params); + TimeLimit time_limit; + LsEvaluator ls(model, params, &time_limit); ls.ComputeAllViolations({0, 0, 0, 1}); EXPECT_EQ(0, ls.SumOfViolations()); EXPECT_EQ(ls.NumViolatedConstraintsForVarIgnoringObjective(0), 0); @@ -240,7 +246,8 @@ TEST(ConstraintViolationTest, BasicLinMaxExampleNoViolation) { } )pb"); SatParameters params; - LsEvaluator ls(model, params); + TimeLimit time_limit; + LsEvaluator ls(model, params, &time_limit); ls.ComputeAllViolations({1, 1, 0}); EXPECT_EQ(0, ls.SumOfViolations()); } @@ -260,7 +267,8 @@ TEST(ConstraintViolationTest, BasicLinMaxExampleExcessViolation) { } )pb"); SatParameters params; - LsEvaluator ls(model, params); + TimeLimit time_limit; + LsEvaluator ls(model, params, &time_limit); ls.ComputeAllViolations({0, 0, 0}); EXPECT_EQ(3, ls.SumOfViolations()); } @@ -280,7 +288,8 @@ TEST(ConstraintViolationTest, BasicLinMaxExampleMissingViolation) { } )pb"); SatParameters params; - LsEvaluator ls(model, params); + TimeLimit time_limit; + LsEvaluator ls(model, params, &time_limit); ls.ComputeAllViolations({1, 0, 0}); EXPECT_EQ(1, ls.SumOfViolations()); } @@ -299,7 +308,8 @@ TEST(ConstraintViolationTest, BasicLinMaxExampleNegativeCoeffs) { } )pb"); SatParameters params; - LsEvaluator ls(model, params); + TimeLimit time_limit; + LsEvaluator ls(model, params, &time_limit); ls.ComputeAllViolations({33, 33, 33}); EXPECT_EQ(1, ls.SumOfViolations()); } @@ -399,7 +409,8 @@ TEST(ConstraintViolationTest, BasicNoOverlapExample) { )pb"); SatParameters params; - LsEvaluator ls(model, params); + TimeLimit time_limit; + LsEvaluator ls(model, params, &time_limit); ls.ComputeAllViolations({0, 4, 8, 1}); EXPECT_EQ(0, ls.SumOfViolations()); @@ -440,7 +451,8 @@ TEST(ConstraintViolationTest, TwoIntervalsNoOverlapExample) { )pb"); SatParameters params; - LsEvaluator ls(model, params); + TimeLimit time_limit; + LsEvaluator ls(model, params, &time_limit); ls.ComputeAllViolations({0, 4, 1}); EXPECT_EQ(0, ls.SumOfViolations()); @@ -499,7 +511,8 @@ TEST(ConstraintViolationTest, BasicCumulativeExample) { )pb"); SatParameters params; - LsEvaluator ls(model, params); + TimeLimit time_limit; + LsEvaluator ls(model, params, &time_limit); ls.ComputeAllViolations({0, 4, 8, 1, 2, 2}); EXPECT_EQ(0, ls.SumOfViolations()); @@ -525,7 +538,8 @@ TEST(ConstraintViolationTest, EmptyNoOverlap) { )pb"); SatParameters params; - LsEvaluator ls(model, params); + TimeLimit time_limit; + LsEvaluator ls(model, params, &time_limit); ls.ComputeAllViolations({0, 4, 8}); EXPECT_EQ(0, ls.SumOfViolations()); } @@ -551,7 +565,8 @@ TEST(ConstraintViolationTest, WeightedViolationAndDelta) { )pb"); SatParameters params; - LsEvaluator ls(model, params); + TimeLimit time_limit; + LsEvaluator ls(model, params, &time_limit); std::vector solution{0, 0}; std::vector weight{0.0, 0.0}; @@ -593,7 +608,8 @@ TEST(ConstraintViolationTest, Breakpoints) { } )pb"); SatParameters params; - LsEvaluator ls(model, params); + TimeLimit time_limit; + LsEvaluator ls(model, params, &time_limit); ls.ComputeAllViolations({0, 0}); // We don't want the same value as zero, so we should include both values @@ -626,7 +642,8 @@ TEST(ConstraintViolationTest, BasicCircuit) { )pb"); SatParameters params; - LsEvaluator ls(model, params); + TimeLimit time_limit; + LsEvaluator ls(model, params, &time_limit); ls.ComputeAllViolations({0, 0, 0, 0, 0, 0, 0, 0}); EXPECT_GE(ls.SumOfViolations(), 1); ls.ComputeAllViolations({1, 0, 1, 0, 0, 0, 0, 0}); @@ -662,7 +679,8 @@ TEST(ConstraintViolationTest, BasicMultiCircuit) { )pb"); SatParameters params; - LsEvaluator ls(model, params); + TimeLimit time_limit; + LsEvaluator ls(model, params, &time_limit); ls.ComputeAllViolations({0, 0, 0, 0, 0, 0, 0}); EXPECT_GE(ls.SumOfViolations(), 1) << "arcs: None"; ls.ComputeAllViolations({1, 0, 1, 0, 0, 0, 0}); @@ -702,7 +720,8 @@ TEST(ConstraintViolationTest, LastUpdateViolationChanges) { } )pb"); SatParameters params; - LsEvaluator ls(model, params); + TimeLimit time_limit; + LsEvaluator ls(model, params, &time_limit); std::vector unused_jump_scores = {0.0, 0.0, 0.0}; std::vector solution = {2, 1, 3}; diff --git a/ortools/sat/cp_model.proto b/ortools/sat/cp_model.proto index 625471ad29..6888762149 100644 --- a/ortools/sat/cp_model.proto +++ b/ortools/sat/cp_model.proto @@ -221,7 +221,7 @@ message CircuitConstraintProto { // - Self-arcs are allowed except for node 0. // - There is no cycle in this graph, except through node 0. // -// Note: Currently this constraint expect all the nodes in [0, num_nodes) to +// Note: Currently this constraint expects all the nodes in [0, num_nodes) to // have at least one incident arc. The model will be considered invalid if it // is not the case. You can add self-arc fixed to one to ignore some nodes if // needed. @@ -246,6 +246,24 @@ message RoutesConstraintProto { // arc_literal => (current_capacity_tail + demand <= current_capacity_head) repeated int32 demands = 4; int64 capacity = 5; + + // A set of linear expressions associated with the nodes. + message NodeExpressions { + // The i-th element is the linear expression associated with the i-th node. + // All expressions must be affine expressions (a * var + b). + repeated LinearExpressionProto exprs = 1; + } + + // Expressions associated with the nodes of the graph, such as the load of the + // vehicle arriving at a node, or the time at which a vehicle arrives at a + // node. Expressions with the same "dimension" (such as "load" or "time") must + // be listed together. + // This field is optional. If it is set, the linear constraints of size 1 or 2 + // between the variables in these expressions will be used to derive cuts for + // this constraint. If it is not set, the solver will try to automatically + // derive it, from the linear constraints of size 1 or 2 in the model (this + // can fail in complex cases). + repeated NodeExpressions dimensions = 6; } // The values of the n-tuple formed by the given expression can only be one of diff --git a/ortools/sat/cp_model_checker.cc b/ortools/sat/cp_model_checker.cc index a7c8475570..8ea56c453f 100644 --- a/ortools/sat/cp_model_checker.cc +++ b/ortools/sat/cp_model_checker.cc @@ -625,7 +625,8 @@ std::string ValidateGraphInput(bool is_route, const GraphProto& graph) { return ""; } -std::string ValidateRoutesConstraint(const ConstraintProto& ct) { +std::string ValidateRoutesConstraint(const CpModelProto& model, + const ConstraintProto& ct) { int max_node = 0; absl::flat_hash_set nodes; for (const int node : ct.routes().tails()) { @@ -651,10 +652,31 @@ std::string ValidateRoutesConstraint(const ConstraintProto& ct) { if (!ct.routes().demands().empty() && ct.routes().demands().size() != nodes.size()) { return absl::StrCat( - "If the demands fields is set, it must be of size num_nodes:", + "If the demands fields in a route constraint is set, it must be of " + "size num_nodes:", nodes.size()); } + for (const RoutesConstraintProto::NodeExpressions& dimension : + ct.routes().dimensions()) { + if (dimension.exprs().size() != nodes.size()) { + return absl::StrCat( + "If the dimensions field in a route constraint is set, its elements " + "must be of size num_nodes:", + nodes.size()); + } + for (const LinearExpressionProto& expr : dimension.exprs()) { + for (const int v : expr.vars()) { + if (!VariableReferenceIsValid(model, v)) { + return absl::StrCat("Out of bound integer variable ", v, + " in route constraint ", + ProtobufShortDebugString(ct)); + } + } + RETURN_IF_NOT_EMPTY(ValidateAffineExpression(model, expr)); + } + } + return ValidateGraphInput(/*is_route=*/true, ct.routes()); } @@ -1130,7 +1152,7 @@ std::string ValidateCpModel(const CpModelProto& model, bool after_presolve) { ValidateGraphInput(/*is_route=*/false, ct.circuit())); break; case ConstraintProto::ConstraintCase::kRoutes: - RETURN_IF_NOT_EMPTY(ValidateRoutesConstraint(ct)); + RETURN_IF_NOT_EMPTY(ValidateRoutesConstraint(model, ct)); break; case ConstraintProto::ConstraintCase::kInterval: RETURN_IF_NOT_EMPTY(ValidateIntervalConstraint(model, ct)); diff --git a/ortools/sat/cp_model_checker_test.cc b/ortools/sat/cp_model_checker_test.cc index cca6d9b0a5..23f9678959 100644 --- a/ortools/sat/cp_model_checker_test.cc +++ b/ortools/sat/cp_model_checker_test.cc @@ -665,6 +665,121 @@ TEST(ValidateCpModelTest, IntervalMustAppearBeforeTheyAreUsed) { HasSubstr("must appear before")); } +TEST(ValidateCpModelTest, ValidNodeExpressions) { + const CpModelProto model = ParseTestProto(R"pb( + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 10 ] } + variables { domain: [ 0, 10 ] } + constraints { + routes { + tails: [ 0, 1 ] + heads: [ 1, 0 ] + literals: [ 0, 1 ] + dimensions { + exprs { + vars: [ 2 ] + coeffs: [ 1 ] + } + exprs { + vars: [ 3 ] + coeffs: [ 2 ] + } + } + dimensions { + exprs {} + exprs {} + } + } + } + )pb"); + EXPECT_TRUE(ValidateCpModel(model).empty()); +} + +TEST(ValidateCpModelTest, InvalidNodeExpressionsCount) { + const CpModelProto model = ParseTestProto(R"pb( + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 10 ] } + constraints { + routes { + tails: [ 0, 1 ] + heads: [ 1, 0 ] + literals: [ 0, 1 ] + dimensions { + exprs { + vars: [ 2 ] + coeffs: [ 1 ] + } + exprs { + vars: [ 3 ] + coeffs: [ 1 ] + } + exprs { + vars: [ 2 ] + coeffs: [ 1 ] + } + } + } + } + )pb"); + EXPECT_THAT(ValidateCpModel(model), HasSubstr("must be of size num_nodes:2")); +} + +TEST(ValidateCpModelTest, NonAffineExpressionInRoutesConstraint) { + const CpModelProto model = ParseTestProto(R"pb( + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 10 ] } + variables { domain: [ 0, 10 ] } + constraints { + routes { + tails: [ 0, 1 ] + heads: [ 1, 0 ] + literals: [ 0, 1 ] + dimensions { + exprs { + vars: [ 2, 3 ] + coeffs: [ 1, 2 ] + } + exprs { + vars: [ 3 ] + coeffs: [ 1 ] + } + } + } + } + )pb"); + EXPECT_THAT(ValidateCpModel(model), HasSubstr("expression must be affine")); +} + +TEST(ValidateCpModelTest, InvalidNodeExpressionInRoutesConstraint) { + const CpModelProto model = ParseTestProto(R"pb( + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 10 ] } + constraints { + routes { + tails: [ 0, 1 ] + heads: [ 1, 0 ] + literals: [ 0, 1 ] + dimensions { + exprs { + vars: [ 2 ] + coeffs: [ 1 ] + } + exprs { + vars: [ 3 ] + coeffs: [ 1 ] + } + } + } + } + )pb"); + EXPECT_THAT(ValidateCpModel(model), + HasSubstr("Out of bound integer variable 3 in route constraint")); +} + } // namespace } // namespace sat } // namespace operations_research diff --git a/ortools/sat/cp_model_copy.cc b/ortools/sat/cp_model_copy.cc index 72eea80387..967cde5371 100644 --- a/ortools/sat/cp_model_copy.cc +++ b/ortools/sat/cp_model_copy.cc @@ -23,6 +23,7 @@ #include "absl/container/flat_hash_set.h" #include "absl/log/check.h" +#include "absl/log/log.h" #include "absl/strings/str_cat.h" #include "absl/types/span.h" #include "google/protobuf/arena.h" @@ -524,6 +525,14 @@ bool ModelCopy::CopyLinear(const ConstraintProto& ct, bool canonicalize) { FillDomainInProto(tight_domain, linear); if (canonicalize) { context_->CanonicalizeLinearConstraint(new_ct); + // We checked if the constraint was trivial above, but canonicalization can + // make it trivial again by simplifying expressions like (x - x). + if (new_ct->linear().vars().empty() && + ReadDomainFromProto(new_ct->linear()).Contains(0)) { + context_->UpdateRuleStats("linear: trivial 0=0"); + context_->working_model->mutable_constraints()->RemoveLast(); + return true; + } } return true; } @@ -858,7 +867,7 @@ bool ModelCopy::CopyAndMapCumulative(const ConstraintProto& ct) { const int new_index = interval_mapping_[ct.cumulative().intervals(i)]; if (new_index != -1) { new_ct->add_intervals(new_index); - *new_ct->add_demands() = ct.cumulative().demands(i); + CopyLinearExpression(ct.cumulative().demands(i), new_ct->add_demands()); } } diff --git a/ortools/sat/cp_model_expand.cc b/ortools/sat/cp_model_expand.cc index 9ab3045ec1..3270d1beac 100644 --- a/ortools/sat/cp_model_expand.cc +++ b/ortools/sat/cp_model_expand.cc @@ -1961,6 +1961,10 @@ void ExpandPositiveTable(ConstraintProto* ct, PresolveContext* context) { bool AllDiffShouldBeExpanded(const Domain& union_of_domains, ConstraintProto* ct, PresolveContext* context) { + if (union_of_domains.Size() > context->params().max_alldiff_domain_size()) { + return false; + } + const AllDifferentConstraintProto& proto = *ct->mutable_all_diff(); const int num_exprs = proto.exprs_size(); int num_fully_encoded = 0; @@ -1976,7 +1980,7 @@ bool AllDiffShouldBeExpanded(const Domain& union_of_domains, return true; } - if (num_fully_encoded == num_exprs && union_of_domains.Size() < 256) { + if (num_fully_encoded == num_exprs) { // All variables fully encoded, and domains are small enough. return true; } diff --git a/ortools/sat/cp_model_lns.cc b/ortools/sat/cp_model_lns.cc index 726cef5184..8cca0d7839 100644 --- a/ortools/sat/cp_model_lns.cc +++ b/ortools/sat/cp_model_lns.cc @@ -32,6 +32,7 @@ #include "absl/container/flat_hash_set.h" #include "absl/flags/flag.h" #include "absl/log/check.h" +#include "absl/log/log.h" #include "absl/meta/type_traits.h" #include "absl/random/bit_gen_ref.h" #include "absl/random/distributions.h" @@ -40,7 +41,6 @@ #include "absl/synchronization/mutex.h" #include "absl/types/span.h" #include "google/protobuf/arena.h" -#include "ortools/base/logging.h" #include "ortools/base/stl_util.h" #include "ortools/graph/connected_components.h" #include "ortools/sat/cp_model.pb.h" diff --git a/ortools/sat/cp_model_loader.cc b/ortools/sat/cp_model_loader.cc index df2b6bc4fb..431afd348e 100644 --- a/ortools/sat/cp_model_loader.cc +++ b/ortools/sat/cp_model_loader.cc @@ -27,11 +27,11 @@ #include "absl/container/flat_hash_map.h" #include "absl/container/flat_hash_set.h" #include "absl/log/check.h" +#include "absl/log/log.h" #include "absl/meta/type_traits.h" #include "absl/strings/str_cat.h" #include "absl/types/span.h" #include "ortools/algorithms/sparse_permutation.h" -#include "ortools/base/logging.h" #include "ortools/base/mathutil.h" #include "ortools/base/stl_util.h" #include "ortools/base/strong_vector.h" @@ -62,6 +62,7 @@ #include "ortools/util/logging.h" #include "ortools/util/sorted_interval_list.h" #include "ortools/util/strong_integers.h" +#include "ortools/util/time_limit.h" namespace operations_research { namespace sat { @@ -400,6 +401,7 @@ void ExtractEncoding(const CpModelProto& model_proto, Model* m) { auto* logger = m->GetOrCreate(); auto* integer_trail = m->GetOrCreate(); auto* sat_solver = m->GetOrCreate(); + auto* time_limit = m->GetOrCreate(); // TODO(user): Debug what makes it unsat at this point. if (sat_solver->ModelIsUnsat()) return; @@ -618,6 +620,8 @@ void ExtractEncoding(const CpModelProto& model_proto, Model* m) { // the time being. if (sat_solver->ModelIsUnsat()) return; + if (time_limit->LimitReached()) return; + // Encode the half-equalities. // // TODO(user): delay this after PropagateEncodingFromEquivalenceRelations()? @@ -1276,10 +1280,9 @@ void LoadLinearConstraint(const ConstraintProto& ct, Model* m) { max_sum += std::max(term_a, term_b); } - // Load conditional precedences. + // Load conditional precedences and always true binary relations. const SatParameters& params = *m->GetOrCreate(); - if (params.auto_detect_greater_than_at_least_one_of() && - ct.enforcement_literal().size() == 1 && vars.size() <= 2) { + if (ct.enforcement_literal().size() <= 1 && vars.size() <= 2) { // To avoid overflow in the code below, we tighten the bounds. int64_t rhs_min = ct.linear().domain(0); int64_t rhs_max = ct.linear().domain(ct.linear().domain().size() - 1); @@ -1287,13 +1290,19 @@ void LoadLinearConstraint(const ConstraintProto& ct, Model* m) { rhs_max = std::min(rhs_max, max_sum.value()); auto* repository = m->GetOrCreate(); - const Literal lit = mapping->Literal(ct.enforcement_literal(0)); - const Domain domain = ReadDomainFromProto(ct.linear()); - if (vars.size() == 1) { - repository->Add(lit, {vars[0], coeffs[0]}, {}, rhs_min, rhs_max); - } else if (vars.size() == 2) { - repository->Add(lit, {vars[0], coeffs[0]}, {vars[1], coeffs[1]}, rhs_min, - rhs_max); + if (ct.enforcement_literal().empty()) { + if (vars.size() == 2) { + repository->Add(Literal(kNoLiteralIndex), {vars[0], coeffs[0]}, + {vars[1], coeffs[1]}, rhs_min, rhs_max); + } + } else { + const Literal lit = mapping->Literal(ct.enforcement_literal(0)); + if (vars.size() == 1) { + repository->Add(lit, {vars[0], coeffs[0]}, {}, rhs_min, rhs_max); + } else if (vars.size() == 2) { + repository->Add(lit, {vars[0], coeffs[0]}, {vars[1], coeffs[1]}, + rhs_min, rhs_max); + } } } diff --git a/ortools/sat/cp_model_presolve.cc b/ortools/sat/cp_model_presolve.cc index ae405362b4..21ce011550 100644 --- a/ortools/sat/cp_model_presolve.cc +++ b/ortools/sat/cp_model_presolve.cc @@ -38,6 +38,7 @@ #include "absl/flags/flag.h" #include "absl/hash/hash.h" #include "absl/log/check.h" +#include "absl/log/log.h" #include "absl/meta/type_traits.h" #include "absl/numeric/int128.h" #include "absl/random/distributions.h" @@ -6207,8 +6208,9 @@ bool CpModelPresolver::PresolveNoOverlap2D(int /*c*/, ConstraintProto* ct) { non_fixed_boxes.push_back( {.box_index = new_size, .bounding_area = bounding_boxes.back(), - .x_size = context_->SizeMin(x_interval_index), - .y_size = context_->SizeMin(y_interval_index)}); + .x_size = std::max(int64_t{0}, context_->SizeMin(x_interval_index)), + .y_size = + std::max(int64_t{0}, context_->SizeMin(y_interval_index))}); } new_size++; @@ -6218,12 +6220,12 @@ bool CpModelPresolver::PresolveNoOverlap2D(int /*c*/, ConstraintProto* ct) { if (y_constant && !context_->IntervalIsConstant(y_interval_index)) { y_constant = false; } - if (context_->SizeMax(x_interval_index) == 0 || - context_->SizeMax(y_interval_index) == 0) { + if (context_->SizeMax(x_interval_index) <= 0 || + context_->SizeMax(y_interval_index) <= 0) { has_zero_sized_interval = true; } - if (context_->SizeMin(x_interval_index) == 0 || - context_->SizeMin(y_interval_index) == 0) { + if (context_->SizeMin(x_interval_index) <= 0 || + context_->SizeMin(y_interval_index) <= 0) { has_potential_zero_sized_interval = true; } } @@ -13173,6 +13175,21 @@ void UpdateHintInProto(PresolveContext* context) { crush.StoreSolutionAsHint(*proto); } +// Canonicalizes the routes constraints node expressions. In particular, +// replaces the variables in these expressions with their representative. +void CanonicalizeRoutesConstraintNodeExpressions(PresolveContext* context) { + CpModelProto& proto = *context->working_model; + for (ConstraintProto& ct_ref : *proto.mutable_constraints()) { + if (ct_ref.constraint_case() != ConstraintProto::kRoutes) continue; + for (RoutesConstraintProto::NodeExpressions& node_exprs : + *ct_ref.mutable_routes()->mutable_dimensions()) { + for (LinearExpressionProto& expr : *node_exprs.mutable_exprs()) { + context->CanonicalizeLinearExpression({}, &expr); + } + } + } +} + } // namespace // The presolve works as follow: @@ -13359,7 +13376,8 @@ CpSolverStatus CpModelPresolver::Presolve() { // If the presolve always keep symmetry, we compute it once and for all. if (!context_->working_model->has_symmetry()) { DetectAndAddSymmetryToProto(context_->params(), - context_->working_model, logger_); + context_->working_model, logger_, + context_->time_limit()); } // We distinguish an empty symmetry message meaning that symmetry were @@ -13643,6 +13661,7 @@ CpSolverStatus CpModelPresolver::Presolve() { } DCHECK(context_->ConstraintVariableUsageIsConsistent()); + CanonicalizeRoutesConstraintNodeExpressions(context_); UpdateHintInProto(context_); const int old_size = postsolve_mapping_->size(); ApplyVariableMapping(absl::MakeSpan(mapping), postsolve_mapping_, @@ -13702,6 +13721,23 @@ void ApplyVariableMapping(absl::Span mapping, for (ConstraintProto& ct_ref : *proto->mutable_constraints()) { ApplyToAllVariableIndices(mapping_function, &ct_ref); ApplyToAllLiteralIndices(mapping_function, &ct_ref); + if (ct_ref.constraint_case() == ConstraintProto::kRoutes) { + for (RoutesConstraintProto::NodeExpressions& node_exprs : + *ct_ref.mutable_routes()->mutable_dimensions()) { + for (LinearExpressionProto& expr : *node_exprs.mutable_exprs()) { + if (expr.vars().empty()) continue; + DCHECK_EQ(expr.vars().size(), 1); + const int ref = expr.vars(0); + const int image = mapping[PositiveRef(ref)]; + if (image < 0) { + expr.clear_vars(); + expr.clear_coeffs(); + continue; + } + expr.set_vars(0, RefIsPositive(ref) ? image : NegatedRef(image)); + } + } + } } // Remap the objective variables. diff --git a/ortools/sat/cp_model_search_test.cc b/ortools/sat/cp_model_search_test.cc index a1a99f7853..155a877835 100644 --- a/ortools/sat/cp_model_search_test.cc +++ b/ortools/sat/cp_model_search_test.cc @@ -19,10 +19,10 @@ #include #include "absl/container/flat_hash_map.h" +#include "absl/log/log.h" #include "absl/strings/str_join.h" #include "gtest/gtest.h" #include "ortools/base/gmock.h" -#include "ortools/base/logging.h" #include "ortools/base/parse_test_proto.h" #include "ortools/sat/cp_model.pb.h" #include "ortools/sat/cp_model_solver.h" diff --git a/ortools/sat/cp_model_solver.cc b/ortools/sat/cp_model_solver.cc index f0b124ed9c..98fd5ca895 100644 --- a/ortools/sat/cp_model_solver.cc +++ b/ortools/sat/cp_model_solver.cc @@ -42,6 +42,7 @@ #include "absl/container/flat_hash_map.h" #include "absl/flags/flag.h" #include "absl/log/check.h" +#include "absl/log/log.h" #include "absl/random/distributions.h" #include "absl/strings/str_cat.h" #include "absl/strings/str_format.h" @@ -52,7 +53,6 @@ #include "absl/types/span.h" #include "google/protobuf/arena.h" #include "google/protobuf/text_format.h" -#include "ortools/base/logging.h" #include "ortools/port/proto_utils.h" #include "ortools/sat/combine_solutions.h" #include "ortools/sat/cp_model.pb.h" @@ -77,6 +77,7 @@ #include "ortools/sat/model.h" #include "ortools/sat/parameters_validation.h" #include "ortools/sat/presolve_context.h" +#include "ortools/sat/routing_cuts.h" #include "ortools/sat/sat_base.h" #include "ortools/sat/sat_inprocessing.h" #include "ortools/sat/sat_parameters.pb.h" @@ -1730,7 +1731,10 @@ class LnsSolver : public SubSolver { // latest LNS fragment. absl::Mutex next_arena_size_mutex_; int64_t next_arena_size_ ABSL_GUARDED_BY(next_arena_size_mutex_) = - helper_->ModelProto().SpaceUsedLong(); + helper_->ModelProto().GetArena() == nullptr + ? Neighborhood::kDefaultArenaSizePerVariable + * helper_->ModelProto().variables_size() + : helper_->ModelProto().GetArena()->SpaceUsed(); }; void SolveCpModelParallel(SharedClasses* shared, Model* global_model) { @@ -2479,6 +2483,19 @@ CpSolverResponse SolveCpModel(const CpModelProto& model_proto, Model* model) { return shared_response_manager->GetResponse(); } + // This uses the relations from the model_proto to fill the node expressions + // of new_cp_model_proto. This is useful to have as many binary relations as + // possible (new_cp_model_proto can have less relations because the model + // copier can remove the ones which are always true). + const auto [num_routes, num_dimensions] = + MaybeFillMissingRoutesConstraintNodeExpressions(model_proto, + *new_cp_model_proto); + if (num_dimensions > 0) { + SOLVER_LOG(logger, "Routes: ", num_dimensions, + " dimension(s) automatically inferred for ", num_routes, + " routes constraint(s)."); + } + if (context->working_model->has_symmetry()) { SOLVER_LOG(logger, "Ignoring internal symmetry field"); context->working_model->clear_symmetry(); @@ -2692,7 +2709,10 @@ CpSolverResponse SolveCpModel(const CpModelProto& model_proto, Model* model) { // Moreover it is possible we will not find them again as the constraints // might have changed. } else { - DetectAndAddSymmetryToProto(params, new_cp_model_proto, logger); + TimeLimit time_limit; + shared_time_limit->UpdateLocalLimit(&time_limit); + DetectAndAddSymmetryToProto(params, new_cp_model_proto, logger, + &time_limit); } } diff --git a/ortools/sat/cp_model_solver_helpers.cc b/ortools/sat/cp_model_solver_helpers.cc index 1a035ab89f..ab6c7c2fb3 100644 --- a/ortools/sat/cp_model_solver_helpers.cc +++ b/ortools/sat/cp_model_solver_helpers.cc @@ -35,6 +35,7 @@ #include "absl/container/flat_hash_set.h" #include "absl/flags/flag.h" #include "absl/log/check.h" +#include "absl/log/log.h" #include "absl/strings/escaping.h" #include "absl/strings/str_cat.h" #include "absl/strings/string_view.h" @@ -42,7 +43,6 @@ #include "absl/types/span.h" #include "google/protobuf/arena.h" #include "ortools/algorithms/sparse_permutation.h" -#include "ortools/base/logging.h" #include "ortools/base/strong_vector.h" #include "ortools/graph/connected_components.h" #include "ortools/port/proto_utils.h" diff --git a/ortools/sat/cp_model_symmetries.cc b/ortools/sat/cp_model_symmetries.cc index 18645dc75b..98a1ca6ab0 100644 --- a/ortools/sat/cp_model_symmetries.cc +++ b/ortools/sat/cp_model_symmetries.cc @@ -646,7 +646,7 @@ std::unique_ptr GenerateGraphForSymmetryDetection( void FindCpModelSymmetries( const SatParameters& params, const CpModelProto& problem, std::vector>* generators, - double deterministic_limit, SolverLogger* logger) { + SolverLogger* logger, TimeLimit* solver_time_limit) { CHECK(generators != nullptr); generators->clear(); @@ -678,10 +678,11 @@ void FindCpModelSymmetries( return; } + std::unique_ptr time_limit = TimeLimit::FromDeterministicTime( + params.symmetry_detection_deterministic_time_limit()); + time_limit->MergeWithGlobalTimeLimit(solver_time_limit); GraphSymmetryFinder symmetry_finder(*graph, /*is_undirected=*/false); std::vector factorized_automorphism_group_size; - std::unique_ptr time_limit = - TimeLimit::FromDeterministicTime(deterministic_limit); const absl::Status status = symmetry_finder.FindSymmetries( &equivalence_classes, generators, &factorized_automorphism_group_size, time_limit.get()); @@ -767,14 +768,13 @@ void LogOrbitInformation(absl::Span var_to_orbit_index, } // namespace void DetectAndAddSymmetryToProto(const SatParameters& params, - CpModelProto* proto, SolverLogger* logger) { + CpModelProto* proto, SolverLogger* logger, + TimeLimit* time_limit) { SymmetryProto* symmetry = proto->mutable_symmetry(); symmetry->Clear(); std::vector> generators; - FindCpModelSymmetries(params, *proto, &generators, - params.symmetry_detection_deterministic_time_limit(), - logger); + FindCpModelSymmetries(params, *proto, &generators, logger, time_limit); if (generators.empty()) { proto->clear_symmetry(); return; @@ -968,10 +968,8 @@ bool DetectAndExploitSymmetriesInPresolve(PresolveContext* context) { } std::vector> generators; - FindCpModelSymmetries( - params, proto, &generators, - context->params().symmetry_detection_deterministic_time_limit(), - context->logger()); + FindCpModelSymmetries(params, proto, &generators, context->logger(), + context->time_limit()); // Remove temporary affine relation. context->working_model->mutable_constraints()->DeleteSubrange( diff --git a/ortools/sat/cp_model_symmetries.h b/ortools/sat/cp_model_symmetries.h index 27b61528d6..e842ad923f 100644 --- a/ortools/sat/cp_model_symmetries.h +++ b/ortools/sat/cp_model_symmetries.h @@ -22,6 +22,7 @@ #include "ortools/sat/presolve_context.h" #include "ortools/sat/sat_parameters.pb.h" #include "ortools/util/logging.h" +#include "ortools/util/time_limit.h" namespace operations_research { namespace sat { @@ -32,7 +33,11 @@ namespace sat { // representation of the) problem variables. // // Note that we ignore the variables that appear in no constraint, instead of -// outputing the full symmetry group involving them. +// outputting the full symmetry group involving them. +// +// Note that the time limit is the global one of the solver, this method +// enforces params.symmetry_detection_deterministic_time_limit() per call on top +// of it. // // TODO(user): On SAT problems it is more powerful to detect permutations also // involving the negation of the problem variables. So that we could find a @@ -40,15 +45,16 @@ namespace sat { // // TODO(user): As long as we only exploit symmetry involving only Boolean // variables we can make this code more efficient by not detecting symmetries -// involing integer variable. +// involving integer variable. void FindCpModelSymmetries( const SatParameters& params, const CpModelProto& problem, std::vector>* generators, - double deterministic_limit, SolverLogger* logger); + SolverLogger* logger, TimeLimit* solver_time_limit); // Detects symmetries and fill the symmetry field. void DetectAndAddSymmetryToProto(const SatParameters& params, - CpModelProto* proto, SolverLogger* logger); + CpModelProto* proto, SolverLogger* logger, + TimeLimit* solver_time_limit); // Basic implementation of some symmetry breaking during presolve. // diff --git a/ortools/sat/cp_model_symmetries_test.cc b/ortools/sat/cp_model_symmetries_test.cc index 1978bdbfa2..30ce5f293e 100644 --- a/ortools/sat/cp_model_symmetries_test.cc +++ b/ortools/sat/cp_model_symmetries_test.cc @@ -14,7 +14,6 @@ #include "ortools/sat/cp_model_symmetries.h" #include -#include #include #include @@ -29,6 +28,7 @@ #include "ortools/sat/presolve_context.h" #include "ortools/sat/sat_parameters.pb.h" #include "ortools/util/logging.h" +#include "ortools/util/time_limit.h" namespace operations_research { namespace sat { @@ -70,8 +70,8 @@ TEST(FindCpModelSymmetries, FindsSymmetry) { std::vector> generators; SolverLogger logger; - FindCpModelSymmetries({}, model, &generators, - std::numeric_limits::infinity(), &logger); + TimeLimit time_limit; + FindCpModelSymmetries({}, model, &generators, &logger, &time_limit); ASSERT_EQ(generators.size(), 1); EXPECT_EQ(generators[0]->DebugString(), "(1 2)"); @@ -84,8 +84,8 @@ TEST(FindCpModelSymmetries, NoSymmetryIfDifferentVariableBounds) { std::vector> generators; SolverLogger logger; - FindCpModelSymmetries({}, model, &generators, - std::numeric_limits::infinity(), &logger); + TimeLimit time_limit; + FindCpModelSymmetries({}, model, &generators, &logger, &time_limit); ASSERT_EQ(generators.size(), 0); } @@ -96,8 +96,8 @@ TEST(FindCpModelSymmetries, NoSymmetryIfDifferentConstraintCoefficients) { SolverLogger logger; std::vector> generators; - FindCpModelSymmetries({}, model, &generators, - std::numeric_limits::infinity(), &logger); + TimeLimit time_limit; + FindCpModelSymmetries({}, model, &generators, &logger, &time_limit); ASSERT_EQ(generators.size(), 0); } @@ -109,8 +109,8 @@ TEST(FindCpModelSymmetries, NoSymmetryIfDifferentObjectiveCoefficients) { SolverLogger logger; std::vector> generators; - FindCpModelSymmetries({}, model, &generators, - std::numeric_limits::infinity(), &logger); + TimeLimit time_limit; + FindCpModelSymmetries({}, model, &generators, &logger, &time_limit); ASSERT_EQ(generators.size(), 0); } @@ -149,8 +149,8 @@ TEST(FindCpModelSymmetries, FindsSymmetryIfSameConstraintBounds) { SolverLogger logger; std::vector> generators; - FindCpModelSymmetries({}, model, &generators, - std::numeric_limits::infinity(), &logger); + TimeLimit time_limit; + FindCpModelSymmetries({}, model, &generators, &logger, &time_limit); ASSERT_EQ(generators.size(), 1); EXPECT_EQ(generators[0]->DebugString(), "(1 2)"); @@ -158,8 +158,7 @@ TEST(FindCpModelSymmetries, FindsSymmetryIfSameConstraintBounds) { // Make sure that if the constraint bounds are different, the symmetry is // broken. model.mutable_constraints(1)->mutable_linear()->set_domain(1, 20); - FindCpModelSymmetries({}, model, &generators, - std::numeric_limits::infinity(), &logger); + FindCpModelSymmetries({}, model, &generators, &logger, &time_limit); ASSERT_EQ(generators.size(), 0); } @@ -172,8 +171,8 @@ TEST(FindCpModelSymmetries, SolverLogger logger; std::vector> generators; - FindCpModelSymmetries({}, model, &generators, - std::numeric_limits::infinity(), &logger); + TimeLimit time_limit; + FindCpModelSymmetries({}, model, &generators, &logger, &time_limit); ASSERT_EQ(generators.size(), 0); } @@ -185,8 +184,8 @@ TEST(FindCpModelSymmetries, FindsSymmetryIfSameConstraintEnforcementLiterals) { SolverLogger logger; std::vector> generators; - FindCpModelSymmetries({}, model, &generators, - std::numeric_limits::infinity(), &logger); + TimeLimit time_limit; + FindCpModelSymmetries({}, model, &generators, &logger, &time_limit); ASSERT_EQ(generators.size(), 1); EXPECT_EQ(generators[0]->DebugString(), "(1 2)"); @@ -200,8 +199,8 @@ TEST(FindCpModelSymmetries, SolverLogger logger; std::vector> generators; - FindCpModelSymmetries({}, model, &generators, - std::numeric_limits::infinity(), &logger); + TimeLimit time_limit; + FindCpModelSymmetries({}, model, &generators, &logger, &time_limit); ASSERT_EQ(generators.size(), 1); EXPECT_EQ(generators[0]->DebugString(), "(1 2)"); @@ -225,8 +224,8 @@ TEST(FindCpModelSymmetries, LinMaxConstraint) { SolverLogger logger; std::vector> generators; - FindCpModelSymmetries({}, model, &generators, - std::numeric_limits::infinity(), &logger); + TimeLimit time_limit; + FindCpModelSymmetries({}, model, &generators, &logger, &time_limit); ASSERT_EQ(generators.size(), 1); EXPECT_EQ(generators[0]->DebugString(), "(1 2)"); @@ -251,8 +250,8 @@ TEST(FindCpModelSymmetries, UnsupportedConstraintTypeReturnsNoGenerators) { SolverLogger logger; std::vector> generators; - FindCpModelSymmetries({}, model, &generators, - std::numeric_limits::infinity(), &logger); + TimeLimit time_limit; + FindCpModelSymmetries({}, model, &generators, &logger, &time_limit); ASSERT_EQ(generators.size(), 0); } @@ -276,8 +275,8 @@ TEST(FindCpModelSymmetries, FindsSymmetryIfNoConstraints) { SolverLogger logger; std::vector> generators; - FindCpModelSymmetries({}, model, &generators, - std::numeric_limits::infinity(), &logger); + TimeLimit time_limit; + FindCpModelSymmetries({}, model, &generators, &logger, &time_limit); ASSERT_EQ(generators.size(), 0); } @@ -313,8 +312,8 @@ TEST(FindCpModelSymmetries, NoSymmetryIfDuplicateConstraints) { SolverLogger logger; std::vector> generators; - FindCpModelSymmetries({}, model, &generators, - std::numeric_limits::infinity(), &logger); + TimeLimit time_limit; + FindCpModelSymmetries({}, model, &generators, &logger, &time_limit); ASSERT_EQ(generators.size(), 0); } @@ -352,8 +351,8 @@ TEST(FindCpModelSymmetries, ImplicationTestThatUsedToFail) { SolverLogger logger; std::vector> generators; - FindCpModelSymmetries({}, model, &generators, - std::numeric_limits::infinity(), &logger); + TimeLimit time_limit; + FindCpModelSymmetries({}, model, &generators, &logger, &time_limit); ASSERT_EQ(generators.size(), 1); EXPECT_EQ(generators[0]->DebugString(), "(0 1) (2 3)"); @@ -391,7 +390,8 @@ TEST(DetectAndAddSymmetryToProto, BasicTest) { SolverLogger logger; SatParameters params; params.set_log_search_progress(true); - DetectAndAddSymmetryToProto(params, &model, &logger); + TimeLimit time_limit; + DetectAndAddSymmetryToProto(params, &model, &logger, &time_limit); // TODO(user): canonicalize the order in each cycle? const SymmetryProto expected = ParseTestProto(R"pb( @@ -426,8 +426,8 @@ TEST(FindCpModelSymmetries, FindsSymmetryInBoolOr) { SolverLogger logger; std::vector> generators; - FindCpModelSymmetries({}, model, &generators, - std::numeric_limits::infinity(), &logger); + TimeLimit time_limit; + FindCpModelSymmetries({}, model, &generators, &logger, &time_limit); ASSERT_EQ(generators.size(), 1); EXPECT_EQ(generators[0]->DebugString(), "(0 1)"); @@ -440,8 +440,8 @@ TEST(FindCpModelSymmetries, FindsSymmetryInNegatedBoolOr) { SolverLogger logger; std::vector> generators; - FindCpModelSymmetries({}, model, &generators, - std::numeric_limits::infinity(), &logger); + TimeLimit time_limit; + FindCpModelSymmetries({}, model, &generators, &logger, &time_limit); ASSERT_EQ(generators.size(), 1); EXPECT_EQ(generators[0]->DebugString(), "(0 2)"); @@ -457,8 +457,8 @@ TEST(FindCpModelSymmetries, FindsSymmetryInBoolOrWithEnforcementLiteral) { SolverLogger logger; std::vector> generators; - FindCpModelSymmetries({}, model, &generators, - std::numeric_limits::infinity(), &logger); + TimeLimit time_limit; + FindCpModelSymmetries({}, model, &generators, &logger, &time_limit); ASSERT_EQ(generators.size(), 1); EXPECT_EQ(generators[0]->DebugString(), "(0 2)"); @@ -471,8 +471,8 @@ TEST(FindCpModelSymmetries, FindsSymmetryInBoolXor) { SolverLogger logger; std::vector> generators; - FindCpModelSymmetries({}, model, &generators, - std::numeric_limits::infinity(), &logger); + TimeLimit time_limit; + FindCpModelSymmetries({}, model, &generators, &logger, &time_limit); ASSERT_EQ(generators.size(), 1); EXPECT_EQ(generators[0]->DebugString(), "(0 2)"); @@ -485,8 +485,8 @@ TEST(FindCpModelSymmetries, FindsSymmetryInNegatedBoolXor) { SolverLogger logger; std::vector> generators; - FindCpModelSymmetries({}, model, &generators, - std::numeric_limits::infinity(), &logger); + TimeLimit time_limit; + FindCpModelSymmetries({}, model, &generators, &logger, &time_limit); ASSERT_EQ(generators.size(), 1); EXPECT_EQ(generators[0]->DebugString(), "(1 2)"); @@ -502,8 +502,8 @@ TEST(FindCpModelSymmetries, FindsSymmetryInBoolXorWithEnforcementLiteral) { SolverLogger logger; std::vector> generators; - FindCpModelSymmetries({}, model, &generators, - std::numeric_limits::infinity(), &logger); + TimeLimit time_limit; + FindCpModelSymmetries({}, model, &generators, &logger, &time_limit); ASSERT_EQ(generators.size(), 1); EXPECT_EQ(generators[0]->DebugString(), "(1 2)"); @@ -519,8 +519,8 @@ TEST(FindCpModelSymmetries, FindsSymmetryInBoolAnd) { SolverLogger logger; std::vector> generators; - FindCpModelSymmetries({}, model, &generators, - std::numeric_limits::infinity(), &logger); + TimeLimit time_limit; + FindCpModelSymmetries({}, model, &generators, &logger, &time_limit); ASSERT_EQ(generators.size(), 1); EXPECT_EQ(generators[0]->DebugString(), "(0 2)"); @@ -536,8 +536,8 @@ TEST(FindCpModelSymmetries, FindsSymmetryInNegatedBoolAnd) { SolverLogger logger; std::vector> generators; - FindCpModelSymmetries({}, model, &generators, - std::numeric_limits::infinity(), &logger); + TimeLimit time_limit; + FindCpModelSymmetries({}, model, &generators, &logger, &time_limit); ASSERT_EQ(generators.size(), 1); EXPECT_EQ(generators[0]->DebugString(), "(0 1)"); @@ -553,8 +553,8 @@ TEST(FindCpModelSymmetries, FindsSymmetryInBoolAndWithEnforcementLiteral) { SolverLogger logger; std::vector> generators; - FindCpModelSymmetries({}, model, &generators, - std::numeric_limits::infinity(), &logger); + TimeLimit time_limit; + FindCpModelSymmetries({}, model, &generators, &logger, &time_limit); ASSERT_EQ(generators.size(), 1); EXPECT_EQ(generators[0]->DebugString(), "(0 1)"); @@ -575,8 +575,8 @@ TEST(FindCpModelSymmetries, SolverLogger logger; std::vector> generators; - FindCpModelSymmetries({}, model, &generators, - std::numeric_limits::infinity(), &logger); + TimeLimit time_limit; + FindCpModelSymmetries({}, model, &generators, &logger, &time_limit); ASSERT_EQ(generators.size(), 1); EXPECT_EQ(generators[0]->DebugString(), "(0 1)"); @@ -613,8 +613,8 @@ TEST(FindCpModelSymmetries, BasicSchedulingCase) { SolverLogger logger; std::vector> generators; - FindCpModelSymmetries({}, model, &generators, - std::numeric_limits::infinity(), &logger); + TimeLimit time_limit; + FindCpModelSymmetries({}, model, &generators, &logger, &time_limit); // The two intervals with the same size can be swapped. ASSERT_EQ(generators.size(), 1); diff --git a/ortools/sat/cp_model_utils.cc b/ortools/sat/cp_model_utils.cc index 5f5d355207..cefc28fef0 100644 --- a/ortools/sat/cp_model_utils.cc +++ b/ortools/sat/cp_model_utils.cc @@ -162,6 +162,7 @@ void GetReferencesUsedByConstraint(const ConstraintProto& ct, break; case ConstraintProto::ConstraintCase::kRoutes: AddIndices(ct.routes().literals(), literals); + // The node expressions are not used by the constraint itself. break; case ConstraintProto::ConstraintCase::kInverse: AddIndices(ct.inverse().f_direct(), variables); diff --git a/ortools/sat/cumulative_energy.cc b/ortools/sat/cumulative_energy.cc index 36e5d617c0..9b712cfd2d 100644 --- a/ortools/sat/cumulative_energy.cc +++ b/ortools/sat/cumulative_energy.cc @@ -23,9 +23,9 @@ #include "absl/container/inlined_vector.h" #include "absl/log/check.h" +#include "absl/log/log.h" #include "absl/types/span.h" #include "ortools/base/iterator_adaptors.h" -#include "ortools/base/logging.h" #include "ortools/sat/2d_orthogonal_packing.h" #include "ortools/sat/diffn_util.h" #include "ortools/sat/integer.h" diff --git a/ortools/sat/cuts.cc b/ortools/sat/cuts.cc index bb31ab8f0e..845ccf9c92 100644 --- a/ortools/sat/cuts.cc +++ b/ortools/sat/cuts.cc @@ -30,6 +30,7 @@ #include "absl/container/flat_hash_map.h" #include "absl/container/flat_hash_set.h" #include "absl/log/check.h" +#include "absl/log/log.h" #include "absl/meta/type_traits.h" #include "absl/numeric/int128.h" #include "absl/strings/str_cat.h" diff --git a/ortools/sat/diffn.cc b/ortools/sat/diffn.cc index a541c30e28..38a9cdbd65 100644 --- a/ortools/sat/diffn.cc +++ b/ortools/sat/diffn.cc @@ -27,9 +27,9 @@ #include "absl/container/flat_hash_set.h" #include "absl/log/check.h" +#include "absl/log/log.h" #include "absl/numeric/bits.h" #include "absl/types/span.h" -#include "ortools/base/logging.h" #include "ortools/sat/2d_mandatory_overlap_propagator.h" #include "ortools/sat/2d_orthogonal_packing.h" #include "ortools/sat/2d_try_edge_propagator.h" @@ -273,11 +273,11 @@ bool NonOverlappingRectanglesEnergyPropagator::Propagate() { return true; } - if (std::max(bounding_box.SizeX(), bounding_box.SizeY()) * - active_box_ranges.size() > - std::numeric_limits::max()) { + if (AtMinOrMaxInt64I( + CapProdI(CapProdI(bounding_box.SizeX(), bounding_box.SizeY()), + active_box_ranges.size()))) { // Avoid integer overflows if the area of the boxes get comparable with - // INT64_MAX + // INT64_MAX. return true; } diff --git a/ortools/sat/diffn_cuts.cc b/ortools/sat/diffn_cuts.cc index a89675c518..3c514bd78b 100644 --- a/ortools/sat/diffn_cuts.cc +++ b/ortools/sat/diffn_cuts.cc @@ -483,7 +483,7 @@ void GenerateNoOvelap2dCompletionTimeCuts(absl::string_view cut_name, // Best cut so far for this loop. int best_end = -1; double best_efficacy = 0.01; - IntegerValue best_min_rhs = 0; + IntegerValue best_min_total_area = 0; bool best_use_subset_sum = false; // Used in the first term of the rhs of the equation. @@ -548,16 +548,16 @@ void GenerateNoOvelap2dCompletionTimeCuts(absl::string_view cut_name, const IntegerValue rhs_second_term = CeilRatio(square_sum_energy, reachable_capacity); - IntegerValue min_rhs = CapAddI(sum_event_areas, rhs_second_term); - if (AtMinOrMaxInt64I(min_rhs)) break; - min_rhs = CeilRatio(min_rhs, 2); + IntegerValue min_total_area = CapAddI(sum_event_areas, rhs_second_term); + if (AtMinOrMaxInt64I(min_total_area)) break; + min_total_area = CeilRatio(min_total_area, 2); // shift contribution by current_start_min. - if (!AddProductTo(sum_energy, current_start_min, &min_rhs)) break; + if (!AddProductTo(sum_energy, current_start_min, &min_total_area)) break; // The efficacy of the cut is the normalized violation of the above // equation. We will normalize by the sqrt of the sum of squared energies. - const double efficacy = (ToDouble(min_rhs) - lp_contrib) / + const double efficacy = (ToDouble(min_total_area) - lp_contrib) / std::sqrt(ToDouble(sum_square_energy)); // For a given start time, we only keep the best cut. @@ -569,13 +569,13 @@ void GenerateNoOvelap2dCompletionTimeCuts(absl::string_view cut_name, if (efficacy > best_efficacy) { best_efficacy = efficacy; best_end = i; - best_min_rhs = min_rhs; + best_min_total_area = min_total_area; best_use_subset_sum = reachable_capacity < y_max_of_subset - y_min_of_subset; } } if (best_end != -1) { - LinearConstraintBuilder cut(model, best_min_rhs, kMaxIntegerValue); + LinearConstraintBuilder cut(model, best_min_total_area, kMaxIntegerValue); bool is_lifted = false; bool add_energy_to_name = false; for (int i = 0; i <= best_end; ++i) { diff --git a/ortools/sat/diffn_util.cc b/ortools/sat/diffn_util.cc index 0c341497b2..52e7ab9d40 100644 --- a/ortools/sat/diffn_util.cc +++ b/ortools/sat/diffn_util.cc @@ -37,6 +37,7 @@ #include "absl/container/flat_hash_set.h" #include "absl/container/inlined_vector.h" #include "absl/log/check.h" +#include "absl/log/log.h" #include "absl/random/bit_gen_ref.h" #include "absl/types/optional.h" #include "absl/types/span.h" diff --git a/ortools/sat/disjunctive.cc b/ortools/sat/disjunctive.cc index a72b3ac18f..9ebee63860 100644 --- a/ortools/sat/disjunctive.cc +++ b/ortools/sat/disjunctive.cc @@ -19,8 +19,8 @@ #include "absl/algorithm/container.h" #include "absl/log/check.h" +#include "absl/log/log.h" #include "absl/types/span.h" -#include "ortools/base/logging.h" #include "ortools/sat/all_different.h" #include "ortools/sat/integer.h" #include "ortools/sat/integer_base.h" diff --git a/ortools/sat/disjunctive.h b/ortools/sat/disjunctive.h index 9b26ab6a68..a5e710483f 100644 --- a/ortools/sat/disjunctive.h +++ b/ortools/sat/disjunctive.h @@ -21,9 +21,9 @@ #include #include +#include "absl/log/log.h" #include "absl/strings/str_cat.h" #include "absl/types/span.h" -#include "ortools/base/logging.h" #include "ortools/sat/integer.h" #include "ortools/sat/integer_base.h" #include "ortools/sat/intervals.h" diff --git a/ortools/sat/feasibility_jump.cc b/ortools/sat/feasibility_jump.cc index 71373e19e2..c5b6bd0e41 100644 --- a/ortools/sat/feasibility_jump.cc +++ b/ortools/sat/feasibility_jump.cc @@ -31,12 +31,12 @@ #include "absl/functional/bind_front.h" #include "absl/functional/function_ref.h" #include "absl/log/check.h" +#include "absl/log/log.h" #include "absl/random/bit_gen_ref.h" #include "absl/random/distributions.h" #include "absl/strings/str_cat.h" #include "absl/types/span.h" #include "ortools/algorithms/binary_search.h" -#include "ortools/base/logging.h" #include "ortools/sat/combine_solutions.h" #include "ortools/sat/constraint_violation.h" #include "ortools/sat/cp_model.pb.h" @@ -132,21 +132,24 @@ void FeasibilityJumpSolver::ImportState() { void FeasibilityJumpSolver::ReleaseState() { states_->Release(state_); } -void FeasibilityJumpSolver::Initialize() { - is_initialized_ = true; - +bool FeasibilityJumpSolver::Initialize() { // For now we just disable or enable it. // But in the future we might have more variation. if (params_.feasibility_jump_linearization_level() == 0) { - evaluator_ = - std::make_unique(linear_model_->model_proto(), params_); + evaluator_ = std::make_unique(linear_model_->model_proto(), + params_, &time_limit_); } else { - evaluator_ = - std::make_unique(linear_model_->model_proto(), params_, - linear_model_->ignored_constraints(), - linear_model_->additional_constraints()); + evaluator_ = std::make_unique( + linear_model_->model_proto(), params_, + linear_model_->ignored_constraints(), + linear_model_->additional_constraints(), &time_limit_); } + if (time_limit_.LimitReached()) { + evaluator_.reset(); + return false; + } + is_initialized_ = true; const int num_variables = linear_model_->model_proto().variables().size(); var_domains_.resize(num_variables); for (int v = 0; v < num_variables; ++v) { @@ -162,6 +165,7 @@ void FeasibilityJumpSolver::Initialize() { var_occurs_in_non_linear_constraint_[v] = true; } } + return true; } namespace { @@ -346,7 +350,11 @@ std::function FeasibilityJumpSolver::GenerateTask(int64_t /*task_id*/) { return [this] { // We delay initialization to the first task as it might be a bit slow // to scan the whole model, so we want to do this part in parallel. - if (!is_initialized_) Initialize(); + if (!is_initialized_) { + if (!Initialize()) { + return; + } + } // Load the next state to work on. ImportState(); diff --git a/ortools/sat/feasibility_jump.h b/ortools/sat/feasibility_jump.h index d9d1945d7a..d1975161c7 100644 --- a/ortools/sat/feasibility_jump.h +++ b/ortools/sat/feasibility_jump.h @@ -43,6 +43,7 @@ #include "ortools/sat/synchronization.h" #include "ortools/sat/util.h" #include "ortools/util/sorted_interval_list.h" +#include "ortools/util/time_limit.h" namespace operations_research::sat { @@ -484,7 +485,9 @@ class FeasibilityJumpSolver : public SubSolver { shared_hints_(shared_hints), stat_tables_(stat_tables), random_(params_), - var_domains_(shared_bounds) {} + var_domains_(shared_bounds) { + shared_time_limit_->UpdateLocalLimit(&time_limit_); + } // If VLOG_IS_ON(1), it will export a bunch of statistics. ~FeasibilityJumpSolver() override; @@ -517,7 +520,8 @@ class FeasibilityJumpSolver : public SubSolver { void ImportState(); void ReleaseState(); - void Initialize(); + // Return false if we could not initialize the evaluator in the time limit. + bool Initialize(); void ResetCurrentSolution(bool use_hint, bool use_objective, double perturbation_probability); void PerturbateCurrentSolution(double perturbation_probability); @@ -589,6 +593,7 @@ class FeasibilityJumpSolver : public SubSolver { SatParameters params_; std::shared_ptr states_; ModelSharedTimeLimit* shared_time_limit_; + TimeLimit time_limit_; SharedResponseManager* shared_response_; SharedLsSolutionRepository* shared_hints_; SharedStatTables* stat_tables_; diff --git a/ortools/sat/implied_bounds.cc b/ortools/sat/implied_bounds.cc index 8fed91fe17..f08fe8df9b 100644 --- a/ortools/sat/implied_bounds.cc +++ b/ortools/sat/implied_bounds.cc @@ -27,6 +27,7 @@ #include "absl/container/flat_hash_map.h" #include "absl/container/flat_hash_set.h" #include "absl/log/check.h" +#include "absl/log/log.h" #include "absl/meta/type_traits.h" #include "absl/types/span.h" #include "ortools/base/logging.h" diff --git a/ortools/sat/integer.cc b/ortools/sat/integer.cc index 32015e3c85..6b4c81757f 100644 --- a/ortools/sat/integer.cc +++ b/ortools/sat/integer.cc @@ -2195,7 +2195,6 @@ bool GenericLiteralWatcher::Propagate(Trail* trail) { std::deque& queue = queue_by_priority_[priority]; while (!queue.empty()) { const int id = queue.front(); - current_id_ = id; queue.pop_front(); // Before we propagate, make sure any reversible structure are up to date. @@ -2222,6 +2221,10 @@ bool GenericLiteralWatcher::Propagate(Trail* trail) { const int64_t old_integer_timestamp = integer_trail_->num_enqueues(); const int64_t old_boolean_timestamp = trail->Index(); + // Set fields that might be accessed from within Propagate(). + current_id_ = id; + call_again_ = false; + // TODO(user): Maybe just provide one function Propagate(watch_indices) ? ++num_propagate_calls; const bool result = @@ -2252,6 +2255,10 @@ bool GenericLiteralWatcher::Propagate(Trail* trail) { UpdateCallingNeeds(trail); } + if (call_again_) { + CallOnNextPropagate(current_id_); + } + // If the propagator pushed a literal, we exit in order to rerun all SAT // only propagators first. Note that since a literal was pushed we are // guaranteed to be called again, and we will resume from priority 0. diff --git a/ortools/sat/integer.h b/ortools/sat/integer.h index 24ff2e3524..a6845bb157 100644 --- a/ortools/sat/integer.h +++ b/ortools/sat/integer.h @@ -1233,8 +1233,15 @@ class GenericLiteralWatcher final : public SatPropagator { int GetCurrentId() const { return current_id_; } // Add the given propagator to its queue. + // + // Warning: This will have no effect if called from within the propagation of + // a propagator since the propagator is still marked as "in the queue" until + // its propagation is done. Use CallAgainDuringThisPropagation() if that is + // what you need instead. void CallOnNextPropagate(int id); + void CallAgainDuringThisPropagation() { call_again_ = true; }; + private: // Updates queue_ and in_queue_ with the propagator ids that need to be // called. @@ -1283,6 +1290,7 @@ class GenericLiteralWatcher final : public SatPropagator { // The id of the propagator we just called. int current_id_; + bool call_again_ = false; std::vector&)>> level_zero_modified_variable_callback_; @@ -1393,6 +1401,8 @@ inline bool IntegerTrail::IntegerLiteralIsFalse(IntegerLiteral l) const { // serves as sentinels. Their index match the variables index. inline IntegerValue IntegerTrail::LevelZeroLowerBound( IntegerVariable var) const { + DCHECK_GE(var, 0); + DCHECK_LT(var, integer_trail_.size()); return integer_trail_[var.value()].bound; } diff --git a/ortools/sat/integer_expr.h b/ortools/sat/integer_expr.h index 2c85548b87..c3871ea0b0 100644 --- a/ortools/sat/integer_expr.h +++ b/ortools/sat/integer_expr.h @@ -23,8 +23,8 @@ #include #include "absl/log/check.h" +#include "absl/log/log.h" #include "absl/types/span.h" -#include "ortools/base/logging.h" #include "ortools/sat/integer.h" #include "ortools/sat/integer_base.h" #include "ortools/sat/linear_constraint.h" diff --git a/ortools/sat/integer_search.cc b/ortools/sat/integer_search.cc index e93f99992f..1a0153b81d 100644 --- a/ortools/sat/integer_search.cc +++ b/ortools/sat/integer_search.cc @@ -23,6 +23,7 @@ #include "absl/container/flat_hash_set.h" #include "absl/log/check.h" +#include "absl/log/log.h" #include "absl/meta/type_traits.h" #include "absl/random/distributions.h" #include "absl/strings/str_cat.h" diff --git a/ortools/sat/lb_tree_search.cc b/ortools/sat/lb_tree_search.cc index 9d0aa09a76..f8ecc062b8 100644 --- a/ortools/sat/lb_tree_search.cc +++ b/ortools/sat/lb_tree_search.cc @@ -24,6 +24,7 @@ #include "absl/cleanup/cleanup.h" #include "absl/log/check.h" +#include "absl/log/log.h" #include "absl/strings/str_cat.h" #include "absl/types/span.h" #include "ortools/base/logging.h" diff --git a/ortools/sat/linear_constraint.h b/ortools/sat/linear_constraint.h index 2c45dc9729..96fd023786 100644 --- a/ortools/sat/linear_constraint.h +++ b/ortools/sat/linear_constraint.h @@ -288,6 +288,8 @@ class LinearConstraintBuilder { // bounds. LinearExpression BuildExpression(); + int NumTerms() const { return terms_.size(); } + private: const IntegerEncoder* encoder_; IntegerValue lb_; diff --git a/ortools/sat/linear_constraint_manager.cc b/ortools/sat/linear_constraint_manager.cc index 43cf9281c7..9efd3769e5 100644 --- a/ortools/sat/linear_constraint_manager.cc +++ b/ortools/sat/linear_constraint_manager.cc @@ -27,6 +27,7 @@ #include "absl/container/btree_map.h" #include "absl/container/flat_hash_map.h" #include "absl/log/check.h" +#include "absl/log/log.h" #include "absl/meta/type_traits.h" #include "absl/strings/str_cat.h" #include "absl/strings/string_view.h" diff --git a/ortools/sat/linear_model.cc b/ortools/sat/linear_model.cc index 966a9947a4..24b378ccd8 100644 --- a/ortools/sat/linear_model.cc +++ b/ortools/sat/linear_model.cc @@ -21,7 +21,7 @@ #include "absl/container/flat_hash_map.h" #include "absl/container/flat_hash_set.h" #include "absl/log/check.h" -#include "ortools/base/logging.h" +#include "absl/log/log.h" #include "ortools/sat/cp_model.pb.h" #include "ortools/sat/cp_model_utils.h" #include "ortools/util/sorted_interval_list.h" diff --git a/ortools/sat/linear_programming_constraint.cc b/ortools/sat/linear_programming_constraint.cc index 33521bebc6..dd184b7fef 100644 --- a/ortools/sat/linear_programming_constraint.cc +++ b/ortools/sat/linear_programming_constraint.cc @@ -30,6 +30,7 @@ #include "absl/algorithm/container.h" #include "absl/container/flat_hash_map.h" #include "absl/log/check.h" +#include "absl/log/log.h" #include "absl/numeric/int128.h" #include "absl/strings/str_cat.h" #include "absl/strings/str_format.h" @@ -858,7 +859,7 @@ bool LinearProgrammingConstraint::IncrementalPropagate( } } - if (!lp_solution_is_set_) { + if (!lp_solution_is_set_ || num_force_lp_call_on_next_propagate_ > 0) { return Propagate(); } @@ -2120,8 +2121,12 @@ void LinearProgrammingConstraint::UpdateSimplexIterationLimit( } bool LinearProgrammingConstraint::Propagate() { + const int old_num_force = num_force_lp_call_on_next_propagate_; + num_force_lp_call_on_next_propagate_ = 0; if (!enabled_) return true; if (time_limit_->LimitReached()) return true; + const int64_t timestamp_at_function_start = integer_trail_->num_enqueues(); + UpdateBoundsOfLpVariables(); // TODO(user): It seems the time we loose by not stopping early might be worth @@ -2160,6 +2165,20 @@ bool LinearProgrammingConstraint::Propagate() { int cuts_round = 0; while (simplex_.GetProblemStatus() == glop::ProblemStatus::OPTIMAL && cuts_round < max_cuts_rounds) { + // We are about to spend some effort finding cuts or changing the LP. + // If one of the LP solve done by this function propagated something, it + // seems better to reach the propagation fix point first before doing that. + // + // Heuristic: if we seem to be in a propagation loop, we might need more + // cut/lazy-constraints in order to just get out of it, so we loop only if + // num_force_lp_call_on_next_propagate_ is small. + if (integer_trail_->num_enqueues() > timestamp_at_function_start && + old_num_force < 3) { + num_force_lp_call_on_next_propagate_ = old_num_force + 1; + watcher_->CallAgainDuringThisPropagation(); + return true; + } + // We wait for the first batch of problem constraints to be added before we // begin to generate cuts. Note that we rely on num_solves_ since on some // problems there is no other constraints than the cuts. diff --git a/ortools/sat/linear_programming_constraint.h b/ortools/sat/linear_programming_constraint.h index 9fb24c3528..3c134ff3c3 100644 --- a/ortools/sat/linear_programming_constraint.h +++ b/ortools/sat/linear_programming_constraint.h @@ -591,6 +591,7 @@ class LinearProgrammingConstraint : public PropagatorInterface, // True if the last time we solved the exact same LP at level zero, no cuts // and no lazy constraints where added. bool lp_at_level_zero_is_final_ = false; + int num_force_lp_call_on_next_propagate_ = 0; // Same as lp_solution_ but this vector is indexed by IntegerVariable. ModelLpVariableMapping& mirror_lp_variable_; diff --git a/ortools/sat/linear_propagation.cc b/ortools/sat/linear_propagation.cc index 4c8a3a40a8..3c1b590b5a 100644 --- a/ortools/sat/linear_propagation.cc +++ b/ortools/sat/linear_propagation.cc @@ -29,10 +29,10 @@ #include "absl/container/flat_hash_set.h" #include "absl/container/inlined_vector.h" #include "absl/log/check.h" +#include "absl/log/log.h" #include "absl/numeric/int128.h" #include "absl/strings/str_cat.h" #include "absl/types/span.h" -#include "ortools/base/logging.h" #include "ortools/base/stl_util.h" #include "ortools/base/strong_vector.h" #include "ortools/sat/integer.h" diff --git a/ortools/sat/linear_relaxation.cc b/ortools/sat/linear_relaxation.cc index 5bfaf36fbc..ff8135babf 100644 --- a/ortools/sat/linear_relaxation.cc +++ b/ortools/sat/linear_relaxation.cc @@ -60,7 +60,6 @@ #include "ortools/sat/scheduling_helpers.h" #include "ortools/sat/util.h" #include "ortools/util/logging.h" -#include "ortools/util/saturated_arithmetic.h" #include "ortools/util/sorted_interval_list.h" #include "ortools/util/strong_integers.h" @@ -613,8 +612,27 @@ void AddRoutesCutGenerator(const ConstraintProto& ct, Model* m, } else { const std::vector demands(ct.routes().demands().begin(), ct.routes().demands().end()); + int num_dimensions = ct.routes().dimensions_size(); + std::vector flat_node_dim_expressions( + num_dimensions * num_nodes, AffineExpression()); + for (int d = 0; d < num_dimensions; ++d) { + const auto& node_exprs = ct.routes().dimensions(d).exprs(); + for (int n = 0; n < node_exprs.size(); ++n) { + const LinearExpressionProto& expr = node_exprs[n]; + AffineExpression& node_expr = + flat_node_dim_expressions[n * num_dimensions + d]; + if (expr.vars().empty()) { + node_expr = AffineExpression(IntegerValue(expr.offset())); + continue; + } + DCHECK_EQ(expr.vars_size(), 1); + node_expr = AffineExpression(mapping->Integer(expr.vars(0)), + expr.coeffs(0), expr.offset()); + } + } relaxation->cut_generators.push_back(CreateCVRPCutGenerator( - num_nodes, tails, heads, literals, demands, ct.routes().capacity(), m)); + num_nodes, tails, heads, literals, demands, flat_node_dim_expressions, + ct.routes().capacity(), m)); } } diff --git a/ortools/sat/no_overlap_2d_helper.cc b/ortools/sat/no_overlap_2d_helper.cc index cfd7ea9cf9..9fee042fff 100644 --- a/ortools/sat/no_overlap_2d_helper.cc +++ b/ortools/sat/no_overlap_2d_helper.cc @@ -20,8 +20,8 @@ #include "absl/base/log_severity.h" #include "absl/log/check.h" +#include "absl/log/log.h" #include "absl/types/span.h" -#include "ortools/base/logging.h" #include "ortools/sat/2d_rectangle_presolve.h" #include "ortools/sat/diffn_util.h" #include "ortools/sat/integer.h" diff --git a/ortools/sat/optimization.cc b/ortools/sat/optimization.cc index aa2533bf6b..6fc0d14b2e 100644 --- a/ortools/sat/optimization.cc +++ b/ortools/sat/optimization.cc @@ -26,9 +26,9 @@ #include "absl/container/btree_set.h" #include "absl/container/flat_hash_set.h" #include "absl/log/check.h" +#include "absl/log/log.h" #include "absl/strings/str_cat.h" #include "absl/strings/str_format.h" -#include "absl/strings/string_view.h" #include "absl/types/span.h" #include "ortools/base/logging.h" #include "ortools/base/stl_util.h" @@ -593,8 +593,8 @@ void CoreBasedOptimizer::ComputeNextStratificationThreshold() { bool CoreBasedOptimizer::CoverOptimization() { if (!sat_solver_->ResetToLevelZero()) return false; - // We set a fix deterministic time limit per all sub-solve and skip to the - // next core if the sum of the subsolve is also over this limit. + // We set a fix deterministic time limit per all sub-solves and skip to the + // next core if the sum of the sub-solves is also over this limit. constexpr double max_dtime_per_core = 0.5; const double old_time_limit = parameters_->max_deterministic_time(); parameters_->set_max_deterministic_time(max_dtime_per_core); diff --git a/ortools/sat/parameters_validation.cc b/ortools/sat/parameters_validation.cc index 4d64b1aaad..4d3125585a 100644 --- a/ortools/sat/parameters_validation.cc +++ b/ortools/sat/parameters_validation.cc @@ -100,6 +100,7 @@ std::string ValidateParameters(const SatParameters& params) { TEST_IN_RANGE(lns_initial_difficulty, 0.0, 1.0); TEST_POSITIVE(at_most_one_max_expansion_size); + TEST_POSITIVE(max_alldiff_domain_size); TEST_NOT_NAN(max_time_in_seconds); TEST_NOT_NAN(max_deterministic_time); diff --git a/ortools/sat/precedences.cc b/ortools/sat/precedences.cc index 7d30519f07..9703bc3f13 100644 --- a/ortools/sat/precedences.cc +++ b/ortools/sat/precedences.cc @@ -17,7 +17,6 @@ #include #include -#include #include #include #include @@ -28,6 +27,7 @@ #include "absl/container/flat_hash_set.h" #include "absl/container/inlined_vector.h" #include "absl/log/check.h" +#include "absl/log/log.h" #include "absl/meta/type_traits.h" #include "absl/types/span.h" #include "ortools/base/logging.h" @@ -1107,6 +1107,17 @@ bool PrecedencesPropagator::BellmanFordTarjan(Trail* trail) { void BinaryRelationRepository::Add(Literal lit, LinearTerm a, LinearTerm b, IntegerValue lhs, IntegerValue rhs) { + if (lit.Index() != kNoLiteralIndex) { + num_enforced_relations_++; + DCHECK(a.coeff == 0 || a.var != kNoIntegerVariable); + DCHECK(b.coeff == 0 || b.var != kNoIntegerVariable); + } else { + DCHECK_NE(a.coeff, 0); + DCHECK_NE(b.coeff, 0); + DCHECK_NE(a.var, kNoIntegerVariable); + DCHECK_NE(b.var, kNoIntegerVariable); + } + Relation r; r.enforcement = lit; r.a = a; @@ -1127,16 +1138,102 @@ void BinaryRelationRepository::Add(Literal lit, LinearTerm a, LinearTerm b, relations_.push_back(std::move(r)); } +void BinaryRelationRepository::AddPartialRelation(Literal lit, + IntegerVariable a, + IntegerVariable b) { + DCHECK_NE(a, kNoIntegerVariable); + DCHECK_NE(b, kNoIntegerVariable); + DCHECK_NE(a, b); + Add(lit, LinearTerm(a, 1), LinearTerm(b, 1), 0, 0); +} + void BinaryRelationRepository::Build() { DCHECK(!is_built_); is_built_ = true; - std::vector keys; + std::vector> literal_key_values; + std::vector> var_key_values; const int num_relations = relations_.size(); - keys.reserve(num_relations); + literal_key_values.reserve(num_enforced_relations_); + var_key_values.reserve(num_relations - num_enforced_relations_); for (int i = 0; i < num_relations; ++i) { - keys.push_back(relations_[i].enforcement.Index()); + const Relation& r = relations_[i]; + if (r.enforcement.Index() == kNoLiteralIndex) { + var_key_values.emplace_back(r.a.var, i); + var_key_values.emplace_back(r.b.var, i); + std::pair key(r.a.var, r.b.var); + if (relations_[i].a.var > relations_[i].b.var) { + std::swap(key.first, key.second); + } + var_pair_to_relations_[key].push_back(i); + } else { + literal_key_values.emplace_back(r.enforcement.Index(), i); + } } - lit_to_relations_.ResetFromFlatMapping(keys, IdentityMap()); + lit_to_relations_.ResetFromPairs(literal_key_values); + var_to_relations_.ResetFromPairs(var_key_values); +} + +bool BinaryRelationRepository::PropagateLocalBounds( + const IntegerTrail& integer_trail, Literal lit, + const absl::flat_hash_map& input, + absl::flat_hash_map* output) const { + DCHECK_NE(lit.Index(), kNoLiteralIndex); + + auto get_lower_bound = [&](IntegerVariable var) { + const auto it = input.find(var); + if (it != input.end()) return it->second; + return integer_trail.LevelZeroLowerBound(var); + }; + auto get_upper_bound = [&](IntegerVariable var) { + return -get_lower_bound(NegationOf(var)); + }; + auto update_lower_bound_by_var = [&](IntegerVariable var, IntegerValue lb) { + if (lb <= integer_trail.LevelZeroLowerBound(var)) return; + const auto [it, inserted] = output->insert({var, lb}); + if (!inserted) { + it->second = std::max(it->second, lb); + } + }; + auto update_upper_bound_by_var = [&](IntegerVariable var, IntegerValue ub) { + update_lower_bound_by_var(NegationOf(var), -ub); + }; + auto update_var_bounds = [&](const LinearTerm& a, const LinearTerm& b, + IntegerValue lhs, IntegerValue rhs) { + if (a.coeff == 0) return; + + // lb(b.y) <= b.y <= ub(b.y) and lhs <= a.x + b.y <= rhs imply + // ceil((lhs - ub(b.y)) / a) <= x <= floor((rhs - lb(b.y)) / a) + if (b.coeff != 0) { + lhs = lhs - b.coeff * get_upper_bound(b.var); + rhs = rhs - b.coeff * get_lower_bound(b.var); + } + update_lower_bound_by_var(a.var, MathUtil::CeilOfRatio(lhs, a.coeff)); + update_upper_bound_by_var(a.var, MathUtil::FloorOfRatio(rhs, a.coeff)); + }; + auto update_var_bounds_from_relation = [&](Relation r) { + r.a.MakeCoeffPositive(); + r.b.MakeCoeffPositive(); + update_var_bounds(r.a, r.b, r.lhs, r.rhs); + update_var_bounds(r.b, r.a, r.lhs, r.rhs); + }; + if (lit.Index() < lit_to_relations_.size()) { + for (const int relation_index : lit_to_relations_[lit]) { + update_var_bounds_from_relation(relations_[relation_index]); + } + } + for (const auto& [var, _] : input) { + if (var >= var_to_relations_.size()) continue; + for (const int relation_index : var_to_relations_[var]) { + update_var_bounds_from_relation(relations_[relation_index]); + } + } + + // Check feasibility. + // TODO(user): we might do that earlier? + for (const auto [var, lb] : *output) { + if (lb > integer_trail.LevelZeroUpperBound(var)) return false; + } + return true; } bool GreaterThanAtLeastOneOfDetector::AddRelationFromIndices( @@ -1205,7 +1302,8 @@ int GreaterThanAtLeastOneOfDetector:: // Collect all relations impacted by this clause. std::vector> infos; for (const Literal l : clause) { - for (const int index : repository_.relation_indices(l.Index())) { + for (const int index : + repository_.IndicesOfRelationsEnforcedBy(l.Index())) { const Relation& r = repository_.relation(index); if (r.a.var != kNoIntegerVariable && IntTypeAbs(r.a.coeff) == 1) { infos.push_back({r.a.var, index}); @@ -1260,6 +1358,7 @@ int GreaterThanAtLeastOneOfDetector:: util_intops::StrongVector> var_to_relations; for (int index = 0; index < repository_.size(); ++index) { const Relation& r = repository_.relation(index); + if (r.enforcement.Index() == kNoLiteralIndex) continue; if (r.a.var != kNoIntegerVariable && IntTypeAbs(r.a.coeff) == 1) { if (r.a.var >= var_to_relations.size()) { var_to_relations.resize(r.a.var + 1); @@ -1388,57 +1487,5 @@ int GreaterThanAtLeastOneOfDetector::AddGreaterThanAtLeastOneOfConstraints( return num_added_constraints; } -bool BinaryRelationRepository::PropagateLocalBounds( - const IntegerTrail& integer_trail, Literal lit, - const absl::flat_hash_map& input, - absl::flat_hash_map* output) const { - output->clear(); - if (lit.Index() >= lit_to_relations_.size()) return true; - - auto get_lower_bound = [&](IntegerVariable var) { - const auto it = input.find(var); - if (it != input.end()) return it->second; - return integer_trail.LevelZeroLowerBound(var); - }; - auto get_upper_bound = [&](IntegerVariable var) { - return -get_lower_bound(NegationOf(var)); - }; - auto update_lower_bound_by_var = [&](IntegerVariable var, IntegerValue lb) { - if (lb <= integer_trail.LevelZeroLowerBound(var)) return; - const auto [it, inserted] = output->insert({var, lb}); - if (!inserted) { - it->second = std::max(it->second, lb); - } - }; - auto update_upper_bound_by_var = [&](IntegerVariable var, IntegerValue ub) { - update_lower_bound_by_var(NegationOf(var), -ub); - }; - auto update_var_bounds = [&](const LinearTerm& a, const LinearTerm& b, - IntegerValue lhs, IntegerValue rhs) { - if (a.coeff == 0) return; - - // lb(b.y) <= b.y <= ub(b.y) and lhs <= a.x + b.y <= rhs imply - // ceil((lhs - ub(b.y)) / a) <= x <= floor((rhs - lb(b.y)) / a) - lhs = lhs - b.coeff * get_upper_bound(b.var); - rhs = rhs - b.coeff * get_lower_bound(b.var); - update_lower_bound_by_var(a.var, MathUtil::CeilOfRatio(lhs, a.coeff)); - update_upper_bound_by_var(a.var, MathUtil::FloorOfRatio(rhs, a.coeff)); - }; - for (const int relation_index : lit_to_relations_[lit]) { - auto r = relations_[relation_index]; - r.a.MakeCoeffPositive(); - r.b.MakeCoeffPositive(); - update_var_bounds(r.a, r.b, r.lhs, r.rhs); - update_var_bounds(r.b, r.a, r.lhs, r.rhs); - } - - // Check feasibility. - // TODO(user): we might do that earlier? - for (const auto [var, lb] : *output) { - if (lb > integer_trail.LevelZeroUpperBound(var)) return false; - } - return true; -} - } // namespace sat } // namespace operations_research diff --git a/ortools/sat/precedences.h b/ortools/sat/precedences.h index f79cc665f2..e29e4f1e17 100644 --- a/ortools/sat/precedences.h +++ b/ortools/sat/precedences.h @@ -18,7 +18,6 @@ #include #include #include -#include #include #include @@ -465,8 +464,8 @@ class PrecedencesPropagator : public SatPropagator, PropagatorInterface { // Similar to AffineExpression, but with a zero constant. // If coeff is zero, then this is always zero and var is ignored. struct LinearTerm { - IntegerVariable var = kNoIntegerVariable; - IntegerValue coeff = IntegerValue(0); + LinearTerm() = default; + LinearTerm(IntegerVariable v, IntegerValue c) : var(v), coeff(c) {} void MakeCoeffPositive() { if (coeff < 0) { @@ -474,6 +473,13 @@ struct LinearTerm { var = NegationOf(var); } } + + bool operator==(const LinearTerm& other) const { + return var == other.var && coeff == other.coeff; + } + + IntegerVariable var = kNoIntegerVariable; + IntegerValue coeff = IntegerValue(0); }; // A relation of the form enforcement => a + b \in [lhs, rhs]. @@ -485,36 +491,73 @@ struct Relation { LinearTerm b; IntegerValue lhs; IntegerValue rhs; + + bool operator==(const Relation& other) const { + return enforcement == other.enforcement && a == other.a && b == other.b && + lhs == other.lhs && rhs == other.rhs; + } }; -// A repository of all the enforced linear constraints of size 1 or 2. +// A repository of all the enforced linear constraints of size 1 or 2, and of +// all the non-enforced linear constraints of size 2. // // TODO(user): This is not always needed, find a way to clean this once we // don't need it. class BinaryRelationRepository { public: int size() const { return relations_.size(); } + // The returned relation is guaranteed to only have positive variables. const Relation& relation(int index) const { return relations_[index]; } - absl::Span relation_indices(LiteralIndex lit) const { + // Returns the indices of the relations that are enforced by the given + // literal. + absl::Span IndicesOfRelationsEnforcedBy(LiteralIndex lit) const { if (lit >= lit_to_relations_.size()) return {}; return lit_to_relations_[lit]; } - // Adds a relation lit => a + b \in [lhs, rhs]. + // Returns the indices of the non-enforced relations that contain the given + // (positive) variable. + absl::Span IndicesOfRelationsContaining( + IntegerVariable var) const { + if (var >= var_to_relations_.size()) return {}; + return var_to_relations_[var]; + } + + // Returns the indices of the non-enforced relations that contain the given + // (positive) variables. + absl::Span IndicesOfRelationsBetween(IntegerVariable var1, + IntegerVariable var2) const { + if (var1 > var2) std::swap(var1, var2); + const std::pair key(var1, var2); + const auto it = var_pair_to_relations_.find(key); + if (it == var_pair_to_relations_.end()) return {}; + return it->second; + } + + // Adds a conditional relation lit => a + b \in [lhs, rhs] (one of the terms + // can be zero), or an always true binary relation a + b \in [lhs, rhs] (both + // terms must be non-zero). void Add(Literal lit, LinearTerm a, LinearTerm b, IntegerValue lhs, IntegerValue rhs); + // Adds a partial conditional relation between two variables, with unspecified + // coefficients and bounds. + void AddPartialRelation(Literal lit, IntegerVariable a, IntegerVariable b); + // Builds the literal to relations mapping. This should be called once all the // relations have been added. void Build(); // Assuming level-zero bounds + any (var >= value) in the input map, // fills "output" with a "propagated" set of bounds assuming lit is true (by - // using the relations enforced by lit). Note that we will only fill bounds > - // level-zero ones in output. + // using the relations enforced by lit, as well as the non-enforced ones). + // Note that we will only fill bounds > level-zero ones in output. // // Returns false if the new bounds are infeasible at level zero. + // + // Important: by default this does not call output->clear() so we can take + // the max with already inferred bounds. bool PropagateLocalBounds( const IntegerTrail& integer_trail, Literal lit, const absl::flat_hash_map& input, @@ -522,8 +565,13 @@ class BinaryRelationRepository { private: bool is_built_ = false; + int num_enforced_relations_ = 0; std::vector relations_; CompactVectorVector lit_to_relations_; + CompactVectorVector var_to_relations_; + absl::flat_hash_map, + std::vector> + var_pair_to_relations_; }; // Detects if at least one of a subset of linear of size 2 or 1, touching the diff --git a/ortools/sat/precedences_test.cc b/ortools/sat/precedences_test.cc index f72587706d..0552a6cae0 100644 --- a/ortools/sat/precedences_test.cc +++ b/ortools/sat/precedences_test.cc @@ -14,9 +14,10 @@ #include "ortools/sat/precedences.h" #include +#include #include -#include "absl/types/span.h" +#include "absl/container/flat_hash_map.h" #include "gtest/gtest.h" #include "ortools/base/gmock.h" #include "ortools/sat/integer.h" @@ -32,6 +33,7 @@ namespace sat { namespace { using ::testing::ElementsAre; +using ::testing::IsEmpty; using ::testing::UnorderedElementsAre; // A simple macro to make the code more readable. @@ -362,7 +364,7 @@ TEST(PrecedencesPropagatorTest, TrickyCycle) { EXPECT_FALSE(propagator->Propagate(trail)); EXPECT_THAT(trail->FailingClause(), ElementsAre(Literal(-1))); - // Test that the code dectected properly a positive cycle in the dependency + // Test that the code detected properly a positive cycle in the dependency // graph instead of just pushing the bounds until the upper bound is reached. EXPECT_LT(integer_trail->num_enqueues(), 10); } @@ -443,6 +445,155 @@ TEST(PrecedenceRelationsTest, CollectPrecedences) { EXPECT_TRUE(p.empty()); } +TEST(BinaryRelationRepositoryTest, Build) { + Model model; + const IntegerVariable x = model.Add(NewIntegerVariable(0, 10)); + const IntegerVariable y = model.Add(NewIntegerVariable(0, 10)); + const IntegerVariable z = model.Add(NewIntegerVariable(0, 10)); + const Literal lit_a = Literal(model.Add(NewBooleanVariable()), true); + const Literal lit_b = Literal(model.Add(NewBooleanVariable()), true); + BinaryRelationRepository repository; + repository.Add(lit_a, {NegationOf(x), 1}, {y, 1}, 2, 8); + repository.Add(Literal(kNoLiteralIndex), {x, 2}, {y, -2}, 0, 10); + repository.Add(lit_a, {x, -3}, {NegationOf(y), 2}, 1, 15); + repository.Add(lit_b, {x, -3}, {kNoIntegerVariable, 0}, 3, 5); + repository.Add(Literal(kNoLiteralIndex), {x, 3}, {y, -1}, 5, 15); + repository.Add(Literal(kNoLiteralIndex), {x, 1}, {z, -1}, 0, 10); + repository.AddPartialRelation(lit_b, x, z); + repository.Build(); + + EXPECT_EQ(repository.size(), 7); + EXPECT_EQ(repository.relation(0), (Relation{lit_a, {x, -1}, {y, 1}, 2, 8})); + EXPECT_EQ(repository.relation(1), + (Relation{Literal(kNoLiteralIndex), {x, 2}, {y, -2}, 0, 10})); + EXPECT_EQ(repository.relation(2), (Relation{lit_a, {x, -3}, {y, -2}, 1, 15})); + EXPECT_EQ(repository.relation(3), + (Relation{lit_b, {x, -3}, {kNoIntegerVariable, 0}, 3, 5})); + EXPECT_EQ(repository.relation(6), (Relation{lit_b, {x, 1}, {z, 1}, 0, 0})); + EXPECT_THAT(repository.IndicesOfRelationsEnforcedBy(lit_a), + UnorderedElementsAre(0, 2)); + EXPECT_THAT(repository.IndicesOfRelationsEnforcedBy(lit_b), + UnorderedElementsAre(3, 6)); + EXPECT_THAT(repository.IndicesOfRelationsContaining(x), + UnorderedElementsAre(1, 4, 5)); + EXPECT_THAT(repository.IndicesOfRelationsContaining(y), + UnorderedElementsAre(1, 4)); + EXPECT_THAT(repository.IndicesOfRelationsContaining(z), + UnorderedElementsAre(5)); + EXPECT_THAT(repository.IndicesOfRelationsBetween(x, y), + UnorderedElementsAre(1, 4)); + EXPECT_THAT(repository.IndicesOfRelationsBetween(y, x), + UnorderedElementsAre(1, 4)); + EXPECT_THAT(repository.IndicesOfRelationsBetween(x, z), + UnorderedElementsAre(5)); + EXPECT_THAT(repository.IndicesOfRelationsBetween(z, y), IsEmpty()); +} + +TEST(BinaryRelationRepositoryTest, PropagateLocalBounds_EnforcedRelation) { + Model model; + const IntegerVariable x = model.Add(NewIntegerVariable(0, 10)); + const IntegerVariable y = model.Add(NewIntegerVariable(0, 10)); + const Literal lit_a = Literal(model.Add(NewBooleanVariable()), true); + BinaryRelationRepository repository; + repository.Add(lit_a, {x, -1}, {y, 1}, 2, 10); // lit_a => y => x + 2 + repository.Build(); + IntegerTrail* integer_trail = model.GetOrCreate(); + absl::flat_hash_map input = {{x, 3}}; + absl::flat_hash_map output; + + const bool result = + repository.PropagateLocalBounds(*integer_trail, lit_a, input, &output); + + EXPECT_TRUE(result); + EXPECT_THAT(output, UnorderedElementsAre(std::make_pair(NegationOf(x), -8), + std::make_pair(y, 5))); +} + +TEST(BinaryRelationRepositoryTest, PropagateLocalBounds_UnenforcedRelation) { + Model model; + const IntegerVariable x = model.Add(NewIntegerVariable(0, 10)); + const IntegerVariable y = model.Add(NewIntegerVariable(0, 10)); + const Literal lit_a = Literal(model.Add(NewBooleanVariable()), true); + const Literal kNoLiteral = Literal(kNoLiteralIndex); + BinaryRelationRepository repository; + repository.Add(lit_a, {x, -1}, {y, 1}, -5, 10); // lit_a => y => x - 5 + repository.Add(kNoLiteral, {x, -1}, {y, 1}, 2, 10); // y => x + 2 + repository.Build(); + IntegerTrail* integer_trail = model.GetOrCreate(); + absl::flat_hash_map input = {{x, 3}}; + absl::flat_hash_map output; + + const bool result = + repository.PropagateLocalBounds(*integer_trail, lit_a, input, &output); + + EXPECT_TRUE(result); + EXPECT_THAT(output, UnorderedElementsAre(std::make_pair(NegationOf(x), -8), + std::make_pair(y, 5))); +} + +TEST(BinaryRelationRepositoryTest, + PropagateLocalBounds_EnforcedBoundSmallerThanLevelZeroBound) { + Model model; + const IntegerVariable x = model.Add(NewIntegerVariable(0, 10)); + const IntegerVariable y = model.Add(NewIntegerVariable(0, 10)); + const Literal lit_a = Literal(model.Add(NewBooleanVariable()), true); + const Literal lit_b = Literal(model.Add(NewBooleanVariable()), true); + BinaryRelationRepository repository; + repository.Add(lit_a, {x, -1}, {y, 1}, -5, 10); // lit_a => y => x - 5 + repository.Add(lit_b, {x, -1}, {y, 1}, 2, 10); // lit_b => y => x + 2 + repository.Build(); + IntegerTrail* integer_trail = model.GetOrCreate(); + absl::flat_hash_map input = {{x, 3}}; + absl::flat_hash_map output; + + const bool result = + repository.PropagateLocalBounds(*integer_trail, lit_a, input, &output); + + EXPECT_TRUE(result); + EXPECT_THAT(output, IsEmpty()); +} + +TEST(BinaryRelationRepositoryTest, + PropagateLocalBounds_EnforcedBoundSmallerThanOutputBound) { + Model model; + const IntegerVariable x = model.Add(NewIntegerVariable(0, 10)); + const IntegerVariable y = model.Add(NewIntegerVariable(0, 10)); + const Literal lit_a = Literal(model.Add(NewBooleanVariable()), true); + BinaryRelationRepository repository; + repository.Add(lit_a, {x, -1}, {y, 1}, 2, 10); // lit_a => y => x + 2 + repository.Build(); + IntegerTrail* integer_trail = model.GetOrCreate(); + absl::flat_hash_map input = {{x, 3}}; + absl::flat_hash_map output = {{y, 8}}; + + const bool result = + repository.PropagateLocalBounds(*integer_trail, lit_a, input, &output); + + EXPECT_TRUE(result); + EXPECT_THAT(output, UnorderedElementsAre(std::make_pair(NegationOf(x), -8), + std::make_pair(y, 8))); +} + +TEST(BinaryRelationRepositoryTest, PropagateLocalBounds_Infeasible) { + Model model; + const IntegerVariable x = model.Add(NewIntegerVariable(0, 10)); + const IntegerVariable y = model.Add(NewIntegerVariable(0, 10)); + const Literal lit_a = Literal(model.Add(NewBooleanVariable()), true); + BinaryRelationRepository repository; + repository.Add(lit_a, {x, -1}, {y, 1}, 8, 10); // lit_a => y => x + 8 + repository.Build(); + IntegerTrail* integer_trail = model.GetOrCreate(); + absl::flat_hash_map input = {{x, 3}}; + absl::flat_hash_map output; + + const bool result = + repository.PropagateLocalBounds(*integer_trail, lit_a, input, &output); + + EXPECT_FALSE(result); + EXPECT_THAT(output, UnorderedElementsAre(std::make_pair(NegationOf(x), -2), + std::make_pair(y, 11))); +} + TEST(GreaterThanAtLeastOneOfDetectorTest, AddGreaterThanAtLeastOneOf) { Model model; const IntegerVariable a = model.Add(NewIntegerVariable(2, 10)); diff --git a/ortools/sat/presolve_context.cc b/ortools/sat/presolve_context.cc index de4698f94b..b022bf8222 100644 --- a/ortools/sat/presolve_context.cc +++ b/ortools/sat/presolve_context.cc @@ -30,12 +30,12 @@ #include "absl/container/flat_hash_set.h" #include "absl/flags/flag.h" #include "absl/log/check.h" +#include "absl/log/log.h" #include "absl/meta/type_traits.h" #include "absl/numeric/int128.h" #include "absl/strings/str_cat.h" #include "absl/strings/string_view.h" #include "absl/types/span.h" -#include "ortools/base/logging.h" #include "ortools/port/proto_utils.h" #include "ortools/sat/cp_model.pb.h" #include "ortools/sat/cp_model_checker.h" @@ -2766,6 +2766,24 @@ void CreateValidModelWithSingleConstraint(const ConstraintProto& ct, ApplyToAllVariableIndices(mapping_function, &ct); ApplyToAllLiteralIndices(mapping_function, &ct); ApplyToAllIntervalIndices(interval_mapping_function, &ct); + if (ct.constraint_case() == ConstraintProto::kRoutes) { + for (RoutesConstraintProto::NodeExpressions& node_exprs : + *ct.mutable_routes()->mutable_dimensions()) { + for (LinearExpressionProto& expr : *node_exprs.mutable_exprs()) { + if (expr.vars().empty()) continue; + DCHECK_EQ(expr.vars().size(), 1); + const int ref = expr.vars(0); + const auto it = inverse_variable_map.find(PositiveRef(ref)); + if (it == inverse_variable_map.end()) { + expr.clear_vars(); + expr.clear_coeffs(); + continue; + } + const int image = it->second; + expr.set_vars(0, RefIsPositive(ref) ? image : NegatedRef(image)); + } + } + } } } diff --git a/ortools/sat/presolve_util_test.cc b/ortools/sat/presolve_util_test.cc index 71398b8809..e57f2916f8 100644 --- a/ortools/sat/presolve_util_test.cc +++ b/ortools/sat/presolve_util_test.cc @@ -20,11 +20,11 @@ #include #include "absl/container/flat_hash_set.h" +#include "absl/log/log.h" #include "absl/random/random.h" #include "absl/types/span.h" #include "gtest/gtest.h" #include "ortools/base/gmock.h" -#include "ortools/base/logging.h" #include "ortools/base/parse_test_proto.h" #include "ortools/sat/cp_model.h" #include "ortools/sat/cp_model.pb.h" diff --git a/ortools/sat/probing.cc b/ortools/sat/probing.cc index 25c5df8e16..fa05291f3f 100644 --- a/ortools/sat/probing.cc +++ b/ortools/sat/probing.cc @@ -23,6 +23,7 @@ #include "absl/container/btree_set.h" #include "absl/container/inlined_vector.h" #include "absl/log/check.h" +#include "absl/log/log.h" #include "absl/strings/string_view.h" #include "absl/types/span.h" #include "ortools/base/logging.h" diff --git a/ortools/sat/python/cp_model.py b/ortools/sat/python/cp_model.py index 6cd013455b..dabc24bd2b 100644 --- a/ortools/sat/python/cp_model.py +++ b/ortools/sat/python/cp_model.py @@ -2521,6 +2521,35 @@ class CpSolver: index=_get_index(variables), ) + def float_value(self, expression: LinearExprT) -> float: + """Returns the value of a linear expression after solve.""" + return self._checked_response.float_value(expression) + + def float_values(self, expressions: _IndexOrSeries) -> pd.Series: + """Returns the float values of the input linear expressions. + + If `expressions` is a `pd.Index`, then the output will be indexed by the + variables. If `variables` is a `pd.Series` indexed by the underlying + dimensions, then the output will be indexed by the same underlying + dimensions. + + Args: + expressions (Union[pd.Index, pd.Series]): The set of expressions from + which to get the values. + + Returns: + pd.Series: The values of all variables in the set. + + Raises: + RuntimeError: if solve() has not been called. + """ + if self.__response_wrapper is None: + raise RuntimeError("solve() has not been called.") + return pd.Series( + data=[self.__response_wrapper.float_value(expr) for expr in expressions], + index=_get_index(expressions), + ) + def boolean_value(self, literal: LiteralT) -> bool: """Returns the boolean value of a literal after solve.""" return self._checked_response.boolean_value(literal) @@ -2796,6 +2825,23 @@ class CpSolverSolutionCallback(cmh.SolutionCallback): raise RuntimeError("solve() has not been called.") return self.Value(expression) + def float_value(self, expression: LinearExprT) -> float: + """Evaluates an linear expression in the current solution. + + Args: + expression: a linear expression of the model. + + Returns: + An integer value equal to the evaluation of the linear expression + against the current solution. + + Raises: + RuntimeError: if 'expression' is not a LinearExpr. + """ + if not self.has_response(): + raise RuntimeError("solve() has not been called.") + return self.FloatValue(expression) + def has_response(self) -> bool: return self.HasResponse() diff --git a/ortools/sat/python/cp_model_helper.cc b/ortools/sat/python/cp_model_helper.cc index 4e00d4c918..a8632552e5 100644 --- a/ortools/sat/python/cp_model_helper.cc +++ b/ortools/sat/python/cp_model_helper.cc @@ -163,10 +163,19 @@ class ResponseWrapper { double UserTime() const { return response_.user_time(); } + double FloatValue(std::shared_ptr expr) const { + FloatExprVisitor visitor; + visitor.AddToProcess(expr, 1); + return visitor.Evaluate(response_); + } + + double FixedFloatValue(double value) const { return value; } + int64_t Value(std::shared_ptr expr) const { - IntExprVisitor visitor; int64_t value; - if (!visitor.Evaluate(expr, response_, &value)) { + IntExprVisitor visitor; + visitor.AddToProcess(expr, 1); + if (!visitor.Evaluate(response_, &value)) { ThrowError(PyExc_ValueError, absl::StrCat("Failed to evaluate linear expression: ", expr->DebugString())); @@ -453,9 +462,10 @@ PYBIND11_MODULE(cp_model_helper, m) { "Value", [](const SolutionCallback& callback, std::shared_ptr expr) { - IntExprVisitor visitor; int64_t value; - if (!visitor.Evaluate(expr, callback.Response(), &value)) { + IntExprVisitor visitor; + visitor.AddToProcess(expr, 1); + if (!visitor.Evaluate(callback.Response(), &value)) { ThrowError(PyExc_ValueError, absl::StrCat("Failed to evaluate linear expression: ", expr->DebugString())); @@ -466,6 +476,21 @@ PYBIND11_MODULE(cp_model_helper, m) { .def( "Value", [](const SolutionCallback&, int64_t value) { return value; }, "Returns the value of a linear expression after solve.") + .def( + "FloatValue", + [](const SolutionCallback& callback, + std::shared_ptr expr) { + FloatExprVisitor visitor; + visitor.AddToProcess(expr, 1.0); + return visitor.Evaluate(callback.Response()); + }, + "Returns the value of a floating point linear expression after " + "solve.") + .def( + "FloatValue", + [](const SolutionCallback&, double value) { return value; }, + "Returns the value of a floating point linear expression after " + "solve.") .def( "BooleanValue", [](const SolutionCallback& callback, std::shared_ptr lit) { @@ -495,6 +520,8 @@ PYBIND11_MODULE(cp_model_helper, m) { .def("sufficient_assumptions_for_infeasibility", &ResponseWrapper::SufficientAssumptionsForInfeasibility) .def("user_time", &ResponseWrapper::UserTime) + .def("float_value", &ResponseWrapper::FloatValue, py::arg("expr")) + .def("float_value", &ResponseWrapper::FixedFloatValue, py::arg("value")) .def("value", &ResponseWrapper::Value, py::arg("expr")) .def("value", &ResponseWrapper::FixedValue, py::arg("value")) .def("wall_time", &ResponseWrapper::WallTime); @@ -1037,6 +1064,7 @@ PYBIND11_MODULE(cp_model_helper, m) { "Evaluating a Literal as a Boolean valueis " "not supported."); }) + .def("__hash__", &Literal::Hash) // PEP8 Compatibility. .def("Not", &Literal::negated) .def("Index", &Literal::index); diff --git a/ortools/sat/python/cp_model_test.py b/ortools/sat/python/cp_model_test.py index 068abfd404..3d5c5296b9 100644 --- a/ortools/sat/python/cp_model_test.py +++ b/ortools/sat/python/cp_model_test.py @@ -57,6 +57,22 @@ class SolutionSum(cp_model.CpSolverSolutionCallback): return self.__sum +class SolutionFloatValue(cp_model.CpSolverSolutionCallback): + """Record the evaluation of a float expression in the solution.""" + + def __init__(self, expr: cp_model.LinearExpr) -> None: + cp_model.CpSolverSolutionCallback.__init__(self) + self.__expr: cp_model.LinearExpr = expr + self.__value: float = 0.0 + + def on_solution_callback(self) -> None: + self.__value = self.float_value(self.__expr) + + @property + def value(self) -> float: + return self.__value + + class SolutionObjective(cp_model.CpSolverSolutionCallback): """Record the objective value of the solution.""" @@ -191,6 +207,12 @@ class CpModelTest(absltest.TestCase): cst = model.new_constant(5) self.assertEqual("5", str(cst)) + def test_hash_int_var(self) -> None: + model = cp_model.CpModel() + var_a = model.new_int_var(0, 2, "a") + variables = set() + variables.add(var_a) + def test_literal(self) -> None: model = cp_model.CpModel() x = model.new_bool_var("x") @@ -1528,6 +1550,18 @@ class CpModelTest(absltest.TestCase): self.assertEqual(cp_model.OPTIMAL, status) self.assertEqual(6, solution_sum.sum) + def test_solve_with_float_value_in_callback(self) -> None: + model = cp_model.CpModel() + x = model.new_int_var(0, 5, "x") + y = model.new_int_var(0, 5, "y") + model.add_linear_constraint(x + y, 6, 6) + + solver = cp_model.CpSolver() + solution_float_value = SolutionFloatValue((x + y) * 0.5) + status = solver.solve(model, solution_float_value) + self.assertEqual(cp_model.OPTIMAL, status) + self.assertEqual(3.0, solution_float_value.value) + def test_best_bound_callback(self) -> None: model = cp_model.CpModel() x0 = model.new_bool_var("x0") @@ -1558,6 +1592,17 @@ class CpModelTest(absltest.TestCase): self.assertEqual(solver.value(y), 10) self.assertEqual(solver.value(2), 2) + def test_float_value(self) -> None: + model = cp_model.CpModel() + x = model.new_int_var(0, 10, "x") + y = model.new_int_var(0, 10, "y") + model.add(x + 2 * y == 29) + solver = cp_model.CpSolver() + status = solver.solve(model) + self.assertEqual(cp_model.OPTIMAL, status) + self.assertEqual(solver.float_value(x * 1.5 + 0.25), 13.75) + self.assertEqual(solver.float_value(2.25), 2.25) + def test_boolean_value(self) -> None: model = cp_model.CpModel() x = model.new_bool_var("x") diff --git a/ortools/sat/python/linear_expr.cc b/ortools/sat/python/linear_expr.cc index a0ba3e1e85..66d2214ae2 100644 --- a/ortools/sat/python/linear_expr.cc +++ b/ortools/sat/python/linear_expr.cc @@ -21,6 +21,7 @@ #include #include +#include "absl/hash/hash.h" #include "absl/log/check.h" #include "absl/strings/str_cat.h" #include "absl/strings/str_join.h" @@ -130,20 +131,25 @@ void FloatExprVisitor::AddToProcess(std::shared_ptr expr, double coeff) { to_process_.push_back(std::make_pair(expr, coeff)); } + void FloatExprVisitor::AddConstant(double constant) { offset_ += constant; } + void FloatExprVisitor::AddVarCoeff(std::shared_ptr var, double coeff) { canonical_terms_[var] += coeff; } -double FloatExprVisitor::Process(std::shared_ptr expr, - std::vector>* vars, - std::vector* coeffs) { - AddToProcess(expr, 1.0); + +void FloatExprVisitor::ProcessAll() { while (!to_process_.empty()) { const auto [expr, coeff] = to_process_.back(); to_process_.pop_back(); expr->VisitAsFloat(*this, coeff); } +} + +double FloatExprVisitor::Process(std::vector>* vars, + std::vector* coeffs) { + ProcessAll(); vars->clear(); coeffs->clear(); @@ -156,9 +162,20 @@ double FloatExprVisitor::Process(std::shared_ptr expr, return offset_; } +double FloatExprVisitor::Evaluate(const CpSolverResponse& solution) { + ProcessAll(); + + for (const auto& [var, coeff] : canonical_terms_) { + if (coeff == 0) continue; + offset_ += coeff * solution.solution(var->index()); + } + return offset_; +} + FlatFloatExpr::FlatFloatExpr(std::shared_ptr expr) { FloatExprVisitor lin; - offset_ = lin.Process(expr, &vars_, &coeffs_); + lin.AddToProcess(expr, 1.0); + offset_ = lin.Process(&vars_, &coeffs_); } void FlatFloatExpr::VisitAsFloat(FloatExprVisitor& lin, double c) { @@ -735,10 +752,8 @@ bool IntExprVisitor::Process(std::vector>* vars, return true; } -bool IntExprVisitor::Evaluate(std::shared_ptr expr, - const CpSolverResponse& solution, +bool IntExprVisitor::Evaluate(const CpSolverResponse& solution, int64_t* value) { - AddToProcess(expr, 1); if (!ProcessAll()) return false; *value = offset_; @@ -749,6 +764,10 @@ bool IntExprVisitor::Evaluate(std::shared_ptr expr, return true; } +// TODO(user): This hash method does not distinguish between variables with +// the same index and different models. +int64_t Literal::Hash() const { return absl::HashOf(index()); } + bool BaseIntVarComparator::operator()(std::shared_ptr lhs, std::shared_ptr rhs) const { return lhs->index() < rhs->index(); diff --git a/ortools/sat/python/linear_expr.h b/ortools/sat/python/linear_expr.h index 87e311c2f2..1f3b07eed0 100644 --- a/ortools/sat/python/linear_expr.h +++ b/ortools/sat/python/linear_expr.h @@ -160,9 +160,10 @@ class FloatExprVisitor { void AddToProcess(std::shared_ptr expr, double coeff); void AddConstant(double constant); void AddVarCoeff(std::shared_ptr var, double coeff); - double Process(std::shared_ptr expr, - std::vector>* vars, + void ProcessAll(); + double Process(std::vector>* vars, std::vector* coeffs); + double Evaluate(const CpSolverResponse& solution); private: std::vector, double>> to_process_; @@ -212,8 +213,7 @@ class IntExprVisitor { bool ProcessAll(); bool Process(std::vector>* vars, std::vector* coeffs, int64_t* offset); - bool Evaluate(std::shared_ptr expr, - const CpSolverResponse& solution, int64_t* value); + bool Evaluate(const CpSolverResponse& solution, int64_t* value); private: std::vector, int64_t>> to_process_; @@ -477,6 +477,9 @@ class Literal : public LinearExpr { * The negation of the current literal. */ virtual std::shared_ptr negated() = 0; + + /// Returns the hash of the current literal. + int64_t Hash() const; }; /** diff --git a/ortools/sat/routing_cuts.cc b/ortools/sat/routing_cuts.cc index ce38eda1c7..3bf9160669 100644 --- a/ortools/sat/routing_cuts.cc +++ b/ortools/sat/routing_cuts.cc @@ -20,6 +20,7 @@ #include #include #include +#include #include #include #include @@ -30,14 +31,20 @@ #include "absl/container/flat_hash_map.h" #include "absl/container/flat_hash_set.h" #include "absl/log/check.h" +#include "absl/log/log.h" +#include "absl/numeric/bits.h" #include "absl/random/distributions.h" #include "absl/strings/str_cat.h" #include "absl/types/span.h" #include "ortools/base/logging.h" #include "ortools/base/mathutil.h" +#include "ortools/base/stl_util.h" #include "ortools/base/strong_vector.h" +#include "ortools/graph/connected_components.h" #include "ortools/graph/graph.h" #include "ortools/graph/max_flow.h" +#include "ortools/sat/cp_model.pb.h" +#include "ortools/sat/cp_model_utils.h" #include "ortools/sat/cuts.h" #include "ortools/sat/integer.h" #include "ortools/sat/integer_base.h" @@ -46,6 +53,7 @@ #include "ortools/sat/model.h" #include "ortools/sat/precedences.h" #include "ortools/sat/sat_base.h" +#include "ortools/sat/synchronization.h" #include "ortools/sat/util.h" #include "ortools/util/strong_integers.h" @@ -99,6 +107,7 @@ MinOutgoingFlowHelper::MinOutgoingFlowHelper( *model->GetOrCreate()), trail_(*model->GetOrCreate()), integer_trail_(*model->GetOrCreate()), + shared_stats_(model->GetOrCreate()), in_subset_(num_nodes, false), index_in_subset_(num_nodes, -1), incoming_arc_indices_(num_nodes), @@ -108,31 +117,260 @@ MinOutgoingFlowHelper::MinOutgoingFlowHelper( node_var_lower_bounds_(num_nodes), next_node_var_lower_bounds_(num_nodes) {} +MinOutgoingFlowHelper::~MinOutgoingFlowHelper() { + if (!VLOG_IS_ON(1)) return; + std::vector> stats; + stats.push_back({"RoutingDp/num_full_dp_calls", num_full_dp_calls_}); + stats.push_back({"RoutingDp/num_full_dp_skips", num_full_dp_skips_}); + stats.push_back( + {"RoutingDp/num_full_dp_early_abort", num_full_dp_early_abort_}); + stats.push_back( + {"RoutingDp/num_full_dp_work_abort", num_full_dp_work_abort_}); + for (const auto& [name, count] : num_by_type_) { + stats.push_back({absl::StrCat("RoutingDp/num_bounds_", name), count}); + } + shared_stats_->AddStats(stats); +} + +int MinOutgoingFlowHelper::ComputeDemandBasedMinOutgoingFlow( + absl::Span subset, const RouteRelationsHelper& helper) { + DCHECK_EQ(helper.num_nodes(), in_subset_.size()); + DCHECK_EQ(helper.num_arcs(), tails_.size()); + + // TODO(user): When we try multiple algorithm from this class on the same + // subset, we should initialize the graph just once as this is costly on large + // problem. + InitializeGraph(subset); + + int min_outgoing_flow = 1; + std::string best_name; + for (int d = 0; d < helper.num_dimensions(); ++d) { + for (const bool negate_variables : {false, true}) { + for (const bool use_incoming : {true, false}) { + bool gcd_was_used = false; + const int bound = ComputeMinNumberOfBins( + RelaxIntoSpecialBinPackingProblem(subset, d, negate_variables, + use_incoming, helper), + &gcd_was_used); + if (bound > min_outgoing_flow) { + min_outgoing_flow = bound; + best_name = absl::StrCat((use_incoming ? "in_" : "out_"), + (gcd_was_used ? "gcd_" : ""), + (negate_variables ? "neg_" : ""), d); + } + } + } + } + + if (min_outgoing_flow > 1) num_by_type_[best_name]++; + return min_outgoing_flow; +} + +absl::Span MinOutgoingFlowHelper::RelaxIntoSpecialBinPackingProblem( + absl::Span subset, int dimension, bool negate_variables, + bool use_incoming, const RouteRelationsHelper& helper) { + tmp_bin_packing_problem_.clear(); + + // Computes UB/LB. The derivation in the .h used a simple version, but we can + // be a bit tighter. We explain for the use_incoming case only: + // + // In this setting, recall that bins correspond to the first node of each + // route covering the subset. The max_upper_bound is a maximum over the last + // node of each route, and as such it does not need to consider nodes with no + // arc leaving the subset: such nodes cannot be last. + // + // Moreover, if the bin is not empty, then the bound for that bin does not + // need to consider the variable of the "bin" node itself. If the new bound is + // non-negative, then it is also valid for the empty bin. If it is negative we + // have an issue as using it will force the bin to not be empty. But we can + // always use std::max(0, new_bound) which is always valid and still tighter + // than the bound explained in the .h which is always non-negative. + // + // This way we can have a different bound for each bin, which is slightly + // stronger than using the same bound for all of them. + const int num_nodes = in_subset_.size(); + std::vector min_lower_bound_of_others(num_nodes, + kMaxIntegerValue); + std::vector max_upper_bound_of_others(num_nodes, + kMinIntegerValue); + + // We do a forward and a backward pass in order to compute the min/max + // of all the other nodes for each node. + for (const bool forward_pass : {true, false}) { + IntegerValue local_min = kMaxIntegerValue; + IntegerValue local_max = kMinIntegerValue; + const int size = subset.size(); + for (int i = 0; i < size; ++i) { + const int n = forward_pass ? subset[i] : subset[size - 1 - i]; + AffineExpression expr = helper.GetNodeExpression(n, dimension); + if (negate_variables) expr = expr.Negated(); + + // The local_min/max contains the min/max of all nodes strictly before 'n' + // in the forward pass (resp. striclty after). So after two passes, + // min/max_..._of_others[n] will contains the min/max of all nodes + // different from n. + min_lower_bound_of_others[n] = + std::min(min_lower_bound_of_others[n], local_min); + max_upper_bound_of_others[n] = + std::max(max_upper_bound_of_others[n], local_max); + + // Update local_min/max. + if (has_incoming_arcs_from_outside_[n]) { + local_min = + std::min(local_min, integer_trail_.LevelZeroLowerBound(expr)); + } + if (has_outgoing_arcs_to_outside_[n]) { + local_max = + std::max(local_max, integer_trail_.LevelZeroUpperBound(expr)); + } + } + } + + for (const int n : subset) { + AffineExpression expr = helper.GetNodeExpression(n, dimension); + if (negate_variables) expr = expr.Negated(); + + const absl::Span arcs = + use_incoming ? incoming_arc_indices_[n] : outgoing_arc_indices_[n]; + IntegerValue demand = kMaxIntegerValue; + for (const int a : arcs) { + demand = + std::min(demand, helper.GetArcOffsetBound( + a, dimension, negate_variables, integer_trail_)); + } + + ItemOrBin obj; + obj.demand = demand; + + // We compute the capacity like this to avoid overflow and always have + // a non-negative capacity (see above). + obj.capacity = 0; + if (use_incoming) { + if (max_upper_bound_of_others[n] > + integer_trail_.LevelZeroLowerBound(expr)) { + obj.capacity = max_upper_bound_of_others[n] - + integer_trail_.LevelZeroLowerBound(expr); + } + } else { + if (integer_trail_.LevelZeroUpperBound(expr) > + min_lower_bound_of_others[n]) { + obj.capacity = integer_trail_.LevelZeroUpperBound(expr) - + min_lower_bound_of_others[n]; + } + } + + // Note that we don't explicitly deal with the corner case of a subset node + // with no arcs. This corresponds to INFEASIBLE problem and should be dealt + // with elsewhere. Being "less restrictive" will still result in a valid + // bound and that is enough here. + if ((use_incoming && !has_incoming_arcs_from_outside_[n]) || + (!use_incoming && !has_outgoing_arcs_to_outside_[n])) { + obj.type = ItemOrBin::MUST_BE_ITEM; + obj.capacity = 0; + } else if (arcs.empty()) { + obj.type = ItemOrBin::MUST_BE_BIN; + obj.demand = 0; // We don't want kMaxIntegerValue ! + } else { + obj.type = ItemOrBin::ITEM_OR_BIN; + } + + tmp_bin_packing_problem_.push_back(obj); + } + + return absl::MakeSpan(tmp_bin_packing_problem_); +} + +int ComputeMinNumberOfBins(absl::Span objects, bool* gcd_was_used) { + if (objects.empty()) return 0; + + IntegerValue sum_of_demands(0); + int64_t gcd = 0; + for (const ItemOrBin& obj : objects) { + sum_of_demands = CapAddI(sum_of_demands, obj.demand); + gcd = std::gcd(gcd, std::abs(obj.demand.value())); + } + + // TODO(user): we can probably handle a couple of extra case rather than just + // bailing out here and below. + if (AtMinOrMaxInt64I(sum_of_demands)) return 0; + + // If the gcd of all the demands term is positive, we can divide everything. + *gcd_was_used = (gcd > 1); + if (gcd > 1) { + for (ItemOrBin& obj : objects) { + obj.demand /= gcd; + obj.capacity = FloorRatio(obj.capacity, gcd); + } + sum_of_demands /= gcd; + } + + // For a given choice of bins (set B), a feasible problem must satisfy. + // sum_{i \notin B} demands_i <= sum_{i \in B} capacity_i. + // + // Using this we can compute a lower bound on the number of bins needed + // using a greedy algorithm that chooses B in order to make the above + // inequality as "loose" as possible. + // + // This puts 'a' before 'b' if we get more unused capacity by using 'a' as a + // bin and 'b' as an item rather than the other way around. If we call the + // other items demands D and capacities C, the two options are: + // - option1: a.demand + D <= b.capacity + C + // - option2: b.demand + D <= a.capacity + C + // + // The option2 above leads to more unused capacity: + // (a.capacity - b.demand) > (b.capacity - a.demand). + std::stable_sort(objects.begin(), objects.end(), + [](const ItemOrBin& a, const ItemOrBin& b) { + if (a.type != b.type) { + // We want in order: + // MUST_BE_BIN, ITEM_OR_BIN, MUST_BE_ITEM. + return a.type > b.type; + } + return a.capacity + a.demand > b.capacity + b.demand; + }); + + // We start with no bins (sum_of_demands=everything, sum_of_capacity=0) and + // add the best bins one by one until we have sum_of_demands <= + // sum_of_capacity. + int num_bins = 0; + IntegerValue sum_of_capacity(0); + for (; num_bins < objects.size(); ++num_bins) { + // Use obj as a bin instead of as an item, if possible (i.e., unless + // obj.type is MUST_BE_ITEM). + const ItemOrBin& obj = objects[num_bins]; + if (obj.type != ItemOrBin::MUST_BE_BIN && + sum_of_demands <= sum_of_capacity) { + return num_bins; + } + if (obj.type == ItemOrBin::MUST_BE_ITEM) { + // Because of our order, we only have objects of type MUST_BE_ITEM left. + // Hence we can no longer change items to bins, and since the demands + // exceed the capacity, the problem is infeasible. We just return the + // (number of objects + 1) in this case (any bound is valid). + DCHECK_GT(sum_of_demands, sum_of_capacity); + return objects.size() + 1; + } + sum_of_capacity = CapAddI(sum_of_capacity, obj.capacity); + if (AtMinOrMaxInt64I(sum_of_capacity)) return num_bins; + sum_of_demands -= obj.demand; + } + return num_bins; +} + int MinOutgoingFlowHelper::ComputeMinOutgoingFlow( absl::Span subset) { DCHECK_GE(subset.size(), 1); - DCHECK(absl::c_all_of(in_subset_, [](bool b) { return !b; })); - DCHECK(absl::c_all_of(incoming_arc_indices_, - [](const auto& v) { return v.empty(); })); - DCHECK(absl::c_all_of(reachable_, [](bool b) { return !b; })); DCHECK(absl::c_all_of(next_reachable_, [](bool b) { return !b; })); DCHECK(absl::c_all_of(node_var_lower_bounds_, [](const auto& m) { return m.empty(); })); DCHECK(absl::c_all_of(next_node_var_lower_bounds_, [](const auto& m) { return m.empty(); })); - for (const int n : subset) { - in_subset_[n] = true; - // Conservatively assume that each subset node is reachable from outside. - reachable_[n] = true; - } - const int num_arcs = tails_.size(); - for (int i = 0; i < num_arcs; ++i) { - if (in_subset_[tails_[i]] && in_subset_[heads_[i]] && - heads_[i] != tails_[i]) { - incoming_arc_indices_[heads_[i]].push_back(i); - } - } + InitializeGraph(subset); + + // Conservatively assume that each subset node is reachable from outside. + // TODO(user): use has_incoming_arcs_from_outside_[] to be more precise. + reachable_ = in_subset_; // Maximum number of nodes of a feasible path inside the subset. int longest_path_length = 1; @@ -180,12 +418,10 @@ int MinOutgoingFlowHelper::ComputeMinOutgoingFlow( // The maximum number of distinct paths of length `longest_path_length`. int max_longest_paths = 0; - // Reset the temporary data structures for the next call. for (const int n : subset) { - in_subset_[n] = false; - incoming_arc_indices_[n].clear(); if (reachable_[n]) ++max_longest_paths; - reachable_[n] = false; + + // Reset the temporary data structures for the next call. next_reachable_[n] = false; node_var_lower_bounds_[n].clear(); next_node_var_lower_bounds_[n].clear(); @@ -227,82 +463,69 @@ struct Path { } }; -struct PathVariableBounds { - absl::flat_hash_set incoming_arc_indices; - absl::flat_hash_map> - lower_bound_by_var_and_arc_index; -}; } // namespace +void MinOutgoingFlowHelper::InitializeGraph(absl::Span subset) { + const int num_nodes = in_subset_.size(); + in_subset_.assign(num_nodes, false); + index_in_subset_.assign(num_nodes, -1); + for (int i = 0; i < subset.size(); ++i) { + const int n = subset[i]; + in_subset_[n] = true; + index_in_subset_[n] = i; + } + + has_incoming_arcs_from_outside_.assign(num_nodes, false); + has_outgoing_arcs_to_outside_.assign(num_nodes, false); + + for (auto& v : incoming_arc_indices_) v.clear(); + for (auto& v : outgoing_arc_indices_) v.clear(); + for (int i = 0; i < tails_.size(); ++i) { + const int tail = tails_[i]; + const int head = heads_[i]; + + // we always ignore self-arcs here. + if (tail == head) continue; + + if (in_subset_[tail] && in_subset_[head]) { + outgoing_arc_indices_[tail].push_back(i); + incoming_arc_indices_[head].push_back(i); + } else if (in_subset_[tail] && !in_subset_[head]) { + has_outgoing_arcs_to_outside_[tail] = true; + } else if (!in_subset_[tail] && in_subset_[head]) { + has_incoming_arcs_from_outside_[head] = true; + } + } +} + int MinOutgoingFlowHelper::ComputeTightMinOutgoingFlow( absl::Span subset) { DCHECK_GE(subset.size(), 1); DCHECK_LE(subset.size(), 32); - DCHECK(absl::c_all_of(index_in_subset_, [](int i) { return i == -1; })); - DCHECK(absl::c_all_of(outgoing_arc_indices_, - [](const auto& v) { return v.empty(); })); std::vector longest_path_length_by_end_node(subset.size(), 1); - for (int i = 0; i < subset.size(); ++i) { - index_in_subset_[subset[i]] = i; - } - for (int i = 0; i < tails_.size(); ++i) { - if (index_in_subset_[tails_[i]] != -1 && - index_in_subset_[heads_[i]] != -1 && heads_[i] != tails_[i]) { - outgoing_arc_indices_[tails_[i]].push_back(i); - } - } + InitializeGraph(subset); - absl::flat_hash_map path_var_bounds; + absl::flat_hash_map tmp_lbs; + absl::flat_hash_map> + path_var_bounds; std::vector paths; std::vector next_paths; for (int i = 0; i < subset.size(); ++i) { + // TODO(user): use has_incoming_arcs_from_outside_[] to skip some nodes. paths.push_back( {.node_set = static_cast(1 << i), .last_node = subset[i]}); + path_var_bounds[paths.back()] = {}; // LevelZero bounds. } int longest_path_length = 1; for (int path_length = 1; path_length <= subset.size(); ++path_length) { for (const Path& path : paths) { - // Merge the bounds by variable and arc incoming to the last node of the - // path into bounds by variable, if possible, and check whether they are - // feasible or not. - const auto& var_bounds = path_var_bounds[path]; - absl::flat_hash_map lower_bound_by_var; - for (const auto& [var, lower_bound_by_arc_index] : - var_bounds.lower_bound_by_var_and_arc_index) { - // If each arc which can reach the last node of the path enforces some - // lower bound for `var`, then the lower bound of `var` can be increased - // to the minimum of these arc-specific lower bounds (since at least one - // of these arcs must be selected to reach this node). - if (lower_bound_by_arc_index.size() != - var_bounds.incoming_arc_indices.size()) { - continue; - } - IntegerValue lb = lower_bound_by_arc_index.begin()->second; - for (const auto& [_, lower_bound] : lower_bound_by_arc_index) { - lb = std::min(lb, lower_bound); - } - if (lb > integer_trail_.LevelZeroLowerBound(var)) { - lower_bound_by_var[var] = lb; - } - } - path_var_bounds.erase(path); - auto get_lower_bound = [&](IntegerVariable var) { - const auto it = lower_bound_by_var.find(var); - if (it != lower_bound_by_var.end()) return it->second; - return integer_trail_.LevelZeroLowerBound(var); - }; - auto get_upper_bound = [&](IntegerVariable var) { - return -get_lower_bound(NegationOf(var)); - }; - bool feasible_path = true; - for (const auto& [var, lb] : lower_bound_by_var) { - if (get_upper_bound(var) < lb) { - feasible_path = false; - break; - } - } - if (!feasible_path) continue; + // We remove it from the hash_map since this entry should no longer be + // used as we create path of increasing length. + DCHECK(path_var_bounds.contains(path)); + const absl::flat_hash_map path_bounds = + std::move(path_var_bounds.extract(path).mapped()); + // We have found a feasible path, update the path length statistics... longest_path_length = path_length; longest_path_length_by_end_node[index_in_subset_[path.last_node]] = @@ -319,47 +542,23 @@ int MinOutgoingFlowHelper::ComputeTightMinOutgoingFlow( const Path new_path = { .node_set = path.node_set | (1 << head_index_in_subset), .last_node = head}; - if (!path_var_bounds.contains(new_path)) { - next_paths.push_back(new_path); + + // If this arc cannot be taken skip. + tmp_lbs.clear(); + if (!binary_relation_repository_.PropagateLocalBounds( + integer_trail_, literals_[outgoing_arc_index], path_bounds, + &tmp_lbs)) { + continue; } - auto& new_var_bounds = path_var_bounds[new_path]; - new_var_bounds.incoming_arc_indices.insert(outgoing_arc_index); - auto update_lower_bound_by_var_and_arc_index = - [&](IntegerVariable var, int arc_index, IntegerValue lb) { - auto& lower_bound_by_arc_index = - new_var_bounds.lower_bound_by_var_and_arc_index[var]; - auto it = lower_bound_by_arc_index.find(arc_index); - if (it != lower_bound_by_arc_index.end()) { - it->second = std::max(it->second, lb); - } else { - lower_bound_by_arc_index[arc_index] = lb; - } - }; - auto update_upper_bound_by_var_and_arc_index = - [&](IntegerVariable var, int arc_index, IntegerValue ub) { - update_lower_bound_by_var_and_arc_index(NegationOf(var), - arc_index, -ub); - }; - auto update_var_bounds = [&](int arc_index, LinearTerm a, LinearTerm b, - IntegerValue lhs, IntegerValue rhs) { - if (a.coeff == 0) return; - a.MakeCoeffPositive(); - b.MakeCoeffPositive(); - // lb(b.y) <= b.y <= ub(b.y) and lhs <= a.x + b.y <= rhs imply - // ceil((lhs - ub(b.y)) / a) <= x <= floor((rhs - lb(b.y)) / a) - lhs = lhs - b.coeff * get_upper_bound(b.var); - rhs = rhs - b.coeff * get_lower_bound(b.var); - update_lower_bound_by_var_and_arc_index( - a.var, arc_index, MathUtil::CeilOfRatio(lhs, a.coeff)); - update_upper_bound_by_var_and_arc_index( - a.var, arc_index, MathUtil::FloorOfRatio(rhs, a.coeff)); - }; - const Literal lit = literals_[outgoing_arc_index]; - for (const int relation_index : - binary_relation_repository_.relation_indices(lit)) { - const auto& r = binary_relation_repository_.relation(relation_index); - update_var_bounds(outgoing_arc_index, r.a, r.b, r.lhs, r.rhs); - update_var_bounds(outgoing_arc_index, r.b, r.a, r.lhs, r.rhs); + + const auto [it, inserted] = path_var_bounds.insert({new_path, tmp_lbs}); + if (inserted) { + // We have a feasible path to a new state. + next_paths.push_back(new_path); + } else { + // We found another way to reach this state, only keep common best + // bounds. + ComputeMinLowerBoundOfSharedVariables(tmp_lbs, &it->second); } } } @@ -373,23 +572,701 @@ int MinOutgoingFlowHelper::ComputeTightMinOutgoingFlow( ++max_longest_paths; } } - // Reset the temporary data structures for the next call. - for (const int n : subset) { - index_in_subset_[n] = -1; - outgoing_arc_indices_[n].clear(); - } + return GetMinOutgoingFlow(subset.size(), longest_path_length, max_longest_paths); } +bool MinOutgoingFlowHelper::SubsetMightBeServedWithKRoutes( + int k, absl::Span subset) { + if (k >= subset.size()) return true; + if (subset.size() > 31) return true; + + ++num_full_dp_calls_; + InitializeGraph(subset); + + struct State { + // Bit i is set iif node subset[i] is in one of the current routes. + uint32_t node_set; + + // The last nodes of each of the k routes. If the hamming weight is less + // that k, then at least one route is still empty. + uint32_t last_nodes_set; + + // Valid lower bounds for this state. + // + // Note that unlike the other algorithm here, we keep the collective bounds + // of all the nodes so far, so this is likely in + // O(longest_route * num_dimensions) which can take quite a lot of space. + // + // By "dimensions", we mean the number of variables appearing in binary + // relation controlled by an arc literal. See for instance + // RouteRelationsHelper that also uses a similar definition. + // + // Hopefully the DFS order limit the number of entry to O(n^2 * k), so still + // somewhat reasonable for small values. + absl::flat_hash_map lbs; + }; + + const int size = subset.size(); + const uint32_t final_mask = (1 << size) - 1; + + // This is also correlated to the work done, and we abort if we starts to + // do too much work on one instance. + int64_t allocated_memory_estimate = 0; + + // We just do a DFS from the initial state. + std::vector states; + states.push_back(State()); + while (!states.empty()) { + if (allocated_memory_estimate > 1e7) { + ++num_full_dp_work_abort_; + return true; // Abort. + } + const State from_state = std::move(states.back()); + states.pop_back(); + + // The number of routes is the hamming weight of from_state.last_nodes_set. + const int num_routes = absl::popcount(from_state.last_nodes_set); + + // We start by choosing the first k starts (in increasing order). + // For that we only add after the maximum position already chosen. + if (num_routes < k) { + const int num_extra = k - num_routes - 1; + for (int i = 0; i + num_extra < size; ++i) { + if (from_state.node_set >> i) continue; + if (!has_incoming_arcs_from_outside_[subset[i]]) continue; + + // All "initial-state" start with empty hash-map that correspond to + // the level zero bounds. + State to_state; + const uint32_t head_mask = (1 << i); + to_state.node_set = from_state.node_set | head_mask; + to_state.last_nodes_set = from_state.last_nodes_set | head_mask; + if (to_state.node_set == final_mask) { + ++num_full_dp_early_abort_; + return true; // All served! + } + states.push_back(std::move(to_state)); + } + continue; + } + + // We have k routes, extend one of the last nodes. + // + // TODO(user): we cannot have any last node with + // has_outgoing_arcs_to_outside_[node] at false! Exploit this. + for (int i = 0; i < size; ++i) { + const uint32_t tail_mask = 1 << i; + if ((from_state.last_nodes_set & tail_mask) == 0) continue; + + for (const int outgoing_arc_index : outgoing_arc_indices_[subset[i]]) { + const int head = heads_[outgoing_arc_index]; + const uint32_t head_mask = (1 << index_in_subset_[head]); + if (from_state.node_set & head_mask) continue; + + State to_state; + to_state.lbs = from_state.lbs; // keep old bounds + if (!binary_relation_repository_.PropagateLocalBounds( + integer_trail_, literals_[outgoing_arc_index], from_state.lbs, + &to_state.lbs)) { + continue; + } + + to_state.node_set = from_state.node_set | head_mask; + to_state.last_nodes_set = from_state.last_nodes_set | head_mask; + to_state.last_nodes_set ^= tail_mask; + allocated_memory_estimate += to_state.lbs.size(); + if (to_state.node_set == final_mask) { + ++num_full_dp_early_abort_; + return true; // All served! + } + states.push_back(std::move(to_state)); + } + } + } + + // We explored everything, no way to serve this with only k routes! + return false; +} + +namespace { +IntegerVariable UniqueSharedVariable(const Relation& r1, const Relation& r2) { + DCHECK_NE(r1.a.var, r1.b.var); + DCHECK_NE(r2.a.var, r2.b.var); + if (r1.a.var == r2.a.var && r1.b.var != r2.b.var) return r1.a.var; + if (r1.a.var == r2.b.var && r1.b.var != r2.a.var) return r1.a.var; + if (r1.b.var == r2.a.var && r1.a.var != r2.b.var) return r1.b.var; + if (r1.b.var == r2.b.var && r1.a.var != r2.a.var) return r1.b.var; + return kNoIntegerVariable; +} + +class RouteRelationsBuilder { + public: + using Relation = RouteRelationsHelper::Relation; + + RouteRelationsBuilder( + int num_nodes, absl::Span tails, absl::Span heads, + absl::Span literals, + absl::Span flat_node_dim_expressions, + const BinaryRelationRepository& binary_relation_repository) + : num_nodes_(num_nodes), + num_arcs_(tails.size()), + tails_(tails), + heads_(heads), + literals_(literals), + binary_relation_repository_(binary_relation_repository) { + if (!flat_node_dim_expressions.empty()) { + DCHECK_EQ(flat_node_dim_expressions.size() % num_nodes, 0); + num_dimensions_ = flat_node_dim_expressions.size() / num_nodes; + flat_node_dim_expressions_.assign(flat_node_dim_expressions.begin(), + flat_node_dim_expressions.end()); + } + } + + int num_dimensions() const { return num_dimensions_; } + + const std::vector& flat_node_dim_expressions() const { + return flat_node_dim_expressions_; + } + + const std::vector& flat_arc_dim_relations() const { + return flat_arc_dim_relations_; + } + + bool Build() { + if (flat_node_dim_expressions_.empty()) { + // Step 1: find the number of dimensions (as the number of connected + // components in the graph of binary relations), and find to which + // dimension each variable belongs. + ComputeDimensionOfEachVariable(); + if (num_dimensions_ == 0) return false; + + // Step 2: find the variables which can be unambiguously associated with a + // node and dimension. + // - compute the indices of the binary relations which can be + // unambiguously associated with the incoming and outgoing arcs of each + // node, per dimension. + ComputeAdjacentRelationsPerNodeAndDimension(); + // - find variable associations by using variables which are uniquely + // shared by two adjacent relations of a node. + std::queue> node_dim_pairs = + ComputeVarAssociationsFromSharedVariableOfAdjacentRelations(); + if (node_dim_pairs.empty()) return false; + // - find more variable associations by using arcs from nodes with + // an associated variable, whose other end has no associated variable, and + // where only one variable can be associated with it. + ComputeVarAssociationsFromRelationsWithSingleFreeVar(node_dim_pairs); + } + + // Step 3: compute the relation for each arc and dimension, now that the + // variables associated with each node and dimension are known. + ComputeArcRelations(); + return true; + } + + private: + // A coeff * var + offset affine expression, where `var` is always a positive + // reference (contrary to AffineExpression, where the coefficient is always + // positive). + struct NodeExpression { + IntegerVariable var; + IntegerValue coeff; + IntegerValue offset; + + explicit NodeExpression(const AffineExpression& expr) { + if (expr.var == kNoIntegerVariable || VariableIsPositive(expr.var)) { + var = expr.var; + coeff = expr.coeff; + } else { + var = PositiveVariable(expr.var); + coeff = -expr.coeff; + } + offset = expr.constant; + } + + bool IsEmpty() const { return var == kNoIntegerVariable; } + }; + + AffineExpression& node_expression(int node, int dimension) { + return flat_node_dim_expressions_[node * num_dimensions_ + dimension]; + }; + + const Relation& arc_relation(int arc_index, int dimension) const { + return flat_arc_dim_relations_[arc_index * num_dimensions_ + dimension]; + } + + void MaybeSetArcRelation(int arc_index, int dimension, + const NodeExpression& tail_expr, + const NodeExpression& head_expr, + const sat::Relation& r) { + Relation& relation = + flat_arc_dim_relations_[arc_index * num_dimensions_ + dimension]; + IntegerValue tail_coeff; + IntegerValue head_coeff; + // A relation a * X + b * Y \in [lhs, rhs] between the X and Y variables is + // equivalent to a (k.a/A) * (A.X+α) + (k.b/B) * (B.Y+β) \in [k.lhs+ɣ, + // k.rhs+ɣ] relation between the A.X+α and B.Y+β terms if the divisions are + // exact, where ɣ=(k.a/A)*α+(k.b/B)*β. The smallest k > 0 such that k.a/A is + // integer is |A|/gcd(a, A), and the smallest k > 0 such that k.b/B is + // integer is |B|/gcd(b, B). The least common multiple of the two is the + // smallest k ensuring the above equivalence. + int64_t a = r.a.coeff.value(); + int64_t b = r.b.coeff.value(); + if (r.a.var != tail_expr.var) std::swap(a, b); + const int64_t tail_k = std::abs(tail_expr.coeff.value()) / + std::gcd(tail_expr.coeff.value(), a); + const int64_t head_k = std::abs(head_expr.coeff.value()) / + std::gcd(head_expr.coeff.value(), b); + const int64_t k = std::lcm(tail_k, head_k); + // TODO(user): do not add the relation in case of overflow (this can + // happen if the expressions are provided by the user in the model proto). + tail_coeff = (k * a) / tail_expr.coeff; + head_coeff = (k * b) / head_expr.coeff; + // If several relations are associated with the same arc, keep the first one + // found, unless the new one has opposite +1/-1 coefficients (these + // relations are preferred because they can be used to compute "demand + // based" min outgoing flows). + if (!relation.empty()) { + if (IntTypeAbs(tail_coeff) != 1 || head_coeff != -tail_coeff) return; + } + const IntegerValue domain_offset = + tail_coeff * tail_expr.offset + head_coeff * head_expr.offset; + relation.tail_coeff = tail_coeff; + relation.head_coeff = head_coeff; + relation.lhs = k * r.lhs + domain_offset; + relation.rhs = k * r.rhs + domain_offset; + if (relation.head_coeff < 0) { + relation.tail_coeff = -relation.tail_coeff; + relation.head_coeff = -relation.head_coeff; + relation.lhs = -relation.lhs; + relation.rhs = -relation.rhs; + std::swap(relation.lhs, relation.rhs); + } + } + + void ComputeDimensionOfEachVariable() { + // Step 1: find the number of dimensions (as the number of connected + // components in the graph of binary relations). + // TODO(user): see if we can use a shared + // DenseConnectedComponentsFinder with one node per variable of the whole + // model instead. + ConnectedComponentsFinder cc_finder; + for (int i = 0; i < num_arcs_; ++i) { + if (tails_[i] == heads_[i]) continue; + num_arcs_per_literal_[literals_[i]]++; + for (const int relation_index : + binary_relation_repository_.IndicesOfRelationsEnforcedBy( + literals_[i])) { + const auto& r = binary_relation_repository_.relation(relation_index); + if (r.a.var == kNoIntegerVariable || r.b.var == kNoIntegerVariable) { + continue; + } + cc_finder.AddEdge(r.a.var, r.b.var); + } + } + const std::vector> connected_components = + cc_finder.FindConnectedComponents(); + for (int i = 0; i < connected_components.size(); ++i) { + for (const IntegerVariable var : connected_components[i]) { + dimension_by_var_[var] = i; + } + } + num_dimensions_ = connected_components.size(); + } + + void ComputeAdjacentRelationsPerNodeAndDimension() { + adjacent_relation_indices_ = std::vector>>( + num_dimensions_, std::vector>(num_nodes_)); + for (int i = 0; i < num_arcs_; ++i) { + if (tails_[i] == heads_[i]) continue; + // If a literal is associated with more than one arc, a relation + // associated with this literal cannot be unambiguously associated with an + // arc. + if (num_arcs_per_literal_[literals_[i]] > 1) continue; + for (const int relation_index : + binary_relation_repository_.IndicesOfRelationsEnforcedBy( + literals_[i])) { + const auto& r = binary_relation_repository_.relation(relation_index); + if (r.a.var == kNoIntegerVariable || r.b.var == kNoIntegerVariable) { + continue; + } + const int dimension = dimension_by_var_[r.a.var]; + adjacent_relation_indices_[dimension][tails_[i]].push_back( + relation_index); + adjacent_relation_indices_[dimension][heads_[i]].push_back( + relation_index); + } + } + } + + // Returns the (node, dimension) pairs for which a variable association has + // been found. + std::queue> + ComputeVarAssociationsFromSharedVariableOfAdjacentRelations() { + flat_node_dim_expressions_ = std::vector( + num_nodes_ * num_dimensions_, AffineExpression()); + std::queue> result; + for (int n = 0; n < num_nodes_; ++n) { + for (int d = 0; d < num_dimensions_; ++d) { + // If two relations on incoming or outgoing arcs of n have a unique + // shared variable, such as in the case of l <-X,Y-> n <-Y,Z-> m (i.e. a + // relation between X and Y on the (l,n) arc, and a relation between Y + // and Z on the (n,m) arc), then this variable is necessarily associated + // with n. + for (const int r1_index : adjacent_relation_indices_[d][n]) { + const auto& r1 = binary_relation_repository_.relation(r1_index); + for (const int r2_index : adjacent_relation_indices_[d][n]) { + if (r1_index == r2_index) continue; + const auto& r2 = binary_relation_repository_.relation(r2_index); + const IntegerVariable shared_var = UniqueSharedVariable(r1, r2); + if (shared_var == kNoIntegerVariable) continue; + DCHECK_EQ(dimension_by_var_[shared_var], d); + AffineExpression& node_expr = node_expression(n, d); + if (node_expr.IsConstant()) { + result.push({n, d}); + } else if (node_expr.var != shared_var) { + VLOG(2) << "Several vars per node and dimension in route with " + << num_nodes_ << " nodes and " << num_arcs_ << " arcs"; + return {}; + } + node_expr = shared_var; + } + } + } + } + return result; + } + + void ComputeVarAssociationsFromRelationsWithSingleFreeVar( + std::queue>& node_dim_pairs_to_process) { + std::vector> adjacent_arcs_per_node(num_nodes_); + for (int i = 0; i < num_arcs_; ++i) { + if (tails_[i] == heads_[i]) continue; + adjacent_arcs_per_node[tails_[i]].push_back(i); + adjacent_arcs_per_node[heads_[i]].push_back(i); + } + while (!node_dim_pairs_to_process.empty()) { + const auto [node, dimension] = node_dim_pairs_to_process.front(); + const AffineExpression node_expr = node_expression(node, dimension); + DCHECK(!node_expr.IsConstant()); + node_dim_pairs_to_process.pop(); + for (const int arc_index : adjacent_arcs_per_node[node]) { + const int tail = tails_[arc_index]; + const int head = heads_[arc_index]; + DCHECK(node == tail || node == head); + int other_node = node == tail ? head : tail; + if (!node_expression(other_node, dimension).IsConstant()) { + continue; + } + IntegerVariable candidate_var = kNoIntegerVariable; + bool candidate_var_is_unique = true; + for (const int relation_index : + binary_relation_repository_.IndicesOfRelationsEnforcedBy( + literals_[arc_index])) { + const auto& r = binary_relation_repository_.relation(relation_index); + if (r.a.var == kNoIntegerVariable || r.b.var == kNoIntegerVariable) { + continue; + } + if (r.a.var == node_expr.var) { + if (candidate_var != kNoIntegerVariable && + candidate_var != r.b.var) { + candidate_var_is_unique = false; + break; + } + candidate_var = r.b.var; + } + if (r.b.var == node_expr.var) { + if (candidate_var != kNoIntegerVariable && + candidate_var != r.a.var) { + candidate_var_is_unique = false; + break; + } + candidate_var = r.a.var; + } + } + if (candidate_var != kNoIntegerVariable && candidate_var_is_unique) { + node_expression(other_node, dimension) = candidate_var; + node_dim_pairs_to_process.push({other_node, dimension}); + } + } + } + } + + void ComputeArcRelations() { + flat_arc_dim_relations_ = + std::vector(num_arcs_ * num_dimensions_, Relation()); + int num_inconsistent_relations = 0; + for (int i = 0; i < num_arcs_; ++i) { + const int tail = tails_[i]; + const int head = heads_[i]; + if (tail == head) continue; + for (const int relation_index : + binary_relation_repository_.IndicesOfRelationsEnforcedBy( + literals_[i])) { + const auto& r = binary_relation_repository_.relation(relation_index); + if (r.a.var == kNoIntegerVariable || r.b.var == kNoIntegerVariable) { + continue; + } + bool is_consistent = false; + for (int dimension = 0; dimension < num_dimensions_; ++dimension) { + const NodeExpression tail_expr(node_expression(tail, dimension)); + const NodeExpression head_expr(node_expression(head, dimension)); + if (tail_expr.IsEmpty() || head_expr.IsEmpty()) { + continue; + } + if (!((tail_expr.var == r.a.var && head_expr.var == r.b.var) || + (tail_expr.var == r.b.var && head_expr.var == r.a.var))) { + continue; + } + MaybeSetArcRelation(i, dimension, tail_expr, head_expr, r); + is_consistent = true; + } + if (!is_consistent) ++num_inconsistent_relations; + } + // If some relations are missing for this arc, check if we can use + // enforced relations to fill them. + for (int d = 0; d < num_dimensions_; ++d) { + if (!arc_relation(i, d).empty()) continue; + const NodeExpression tail_expr(node_expression(tail, d)); + const NodeExpression head_expr(node_expression(head, d)); + if (tail_expr.IsEmpty() || head_expr.IsEmpty()) { + continue; + } + for (const int relation_index : + binary_relation_repository_.IndicesOfRelationsBetween( + tail_expr.var, head_expr.var)) { + MaybeSetArcRelation( + i, d, tail_expr, head_expr, + binary_relation_repository_.relation(relation_index)); + } + } + } + if (num_inconsistent_relations > 0) { + VLOG(2) << num_inconsistent_relations + << " inconsistent relations in route with " << num_nodes_ + << " nodes and " << num_arcs_ << " arcs"; + } + } + + const int num_nodes_; + const int num_arcs_; + absl::Span tails_; + absl::Span heads_; + absl::Span literals_; + const BinaryRelationRepository& binary_relation_repository_; + + int num_dimensions_; + absl::flat_hash_map dimension_by_var_; + absl::flat_hash_map num_arcs_per_literal_; + // The indices of the binary relations associated with the incoming and + // outgoing arcs of each node, per dimension. + std::vector>> adjacent_relation_indices_; + // The expression associated with node n and dimension d is at index n * + // num_dimensions_ + d. + std::vector flat_node_dim_expressions_; + // The relation associated with arc a and dimension d (or kNoRelation if + // there is none) is at index a * num_dimensions_ + d. + std::vector flat_arc_dim_relations_; +}; +} // namespace + +std::unique_ptr RouteRelationsHelper::Create( + int num_nodes, const std::vector& tails, const std::vector& heads, + const std::vector& literals, + absl::Span flat_node_dim_expressions, + const BinaryRelationRepository& binary_relation_repository) { + RouteRelationsBuilder builder(num_nodes, tails, heads, literals, + flat_node_dim_expressions, + binary_relation_repository); + if (!builder.Build()) return nullptr; + std::unique_ptr helper(new RouteRelationsHelper( + tails, heads, builder.num_dimensions(), + builder.flat_node_dim_expressions(), builder.flat_arc_dim_relations())); + if (VLOG_IS_ON(2)) helper->LogStats(); + return helper; +} + +RouteRelationsHelper::RouteRelationsHelper( + const std::vector& tails, const std::vector& heads, + int num_dimensions, std::vector flat_node_dim_expressions, + std::vector flat_arc_dim_relations) + : tails_(tails), + heads_(heads), + num_dimensions_(num_dimensions), + flat_node_dim_expressions_(std::move(flat_node_dim_expressions)), + flat_arc_dim_relations_(std::move(flat_arc_dim_relations)) { + DCHECK_GE(num_dimensions_, 1); +} + +IntegerValue RouteRelationsHelper::GetArcOffsetBound( + int arc, int dimension, bool upper_bound, + const IntegerTrail& integer_trail) const { + const auto& r = GetArcRelation(arc, dimension); + if (r.empty() || r.head_coeff != 1 || r.tail_coeff != -1) { + // Use X_head-X_tail \in [lb(X_head)-ub(X_tail), ub(X_head)-lb(X_tail)]. + const AffineExpression tail_expr = + GetNodeExpression(tails_[arc], dimension); + const AffineExpression head_expr = + GetNodeExpression(heads_[arc], dimension); + if (upper_bound) { + // Opposite of the rhs. + return integer_trail.LevelZeroLowerBound(tail_expr) - + integer_trail.LevelZeroUpperBound(head_expr); + } + return integer_trail.LevelZeroLowerBound(head_expr) - + integer_trail.LevelZeroUpperBound(tail_expr); + } + return upper_bound ? -r.rhs : r.lhs; +} + +void RouteRelationsHelper::RemoveArcs( + absl::Span sorted_arc_indices) { + int new_size = 0; + const int num_arcs = this->num_arcs(); + for (int i = 0; i < num_arcs; ++i) { + if (!sorted_arc_indices.empty() && sorted_arc_indices.front() == i) { + sorted_arc_indices.remove_prefix(1); + continue; + } + for (int d = 0; d < num_dimensions_; ++d) { + flat_arc_dim_relations_[new_size++] = + flat_arc_dim_relations_[i * num_dimensions_ + d]; + } + } + flat_arc_dim_relations_.resize(new_size); +} + +void RouteRelationsHelper::LogStats() const { + const int num_nodes = this->num_nodes(); + const int num_arcs = this->num_arcs(); + LOG(INFO) << "Route with " << num_nodes << " nodes and " << num_arcs + << " arcs"; + for (int d = 0; d < num_dimensions_; ++d) { + int num_vars = 0; + int num_relations = 0; + for (int i = 0; i < num_nodes; ++i) { + if (!GetNodeExpression(i, d).IsConstant()) ++num_vars; + } + for (int i = 0; i < num_arcs; ++i) { + if (!GetArcRelation(i, d).empty()) ++num_relations; + } + LOG(INFO) << "dimension " << d << ": " << num_vars << " vars and " + << num_relations << " relations"; + } +} + +namespace { +// Converts a literal index in a model proto to a Literal. +Literal ToLiteral(int lit) { + return Literal(BooleanVariable(PositiveRef(lit)), RefIsPositive(lit)); +} + +// Converts a variable index in a NodeVariables proto to an IntegerVariable. +IntegerVariable ToPositiveIntegerVariable(int i) { + return IntegerVariable(PositiveRef(i) << 1); +} + +// Converts an IntegerVariable to variable indices in a NodeVariables proto. +int ToNodeVariableIndex(IntegerVariable var) { + DCHECK(VariableIsPositive(var)); + return var.value() >> 1; +} + +// Returns a repository containing a partial view (i.e. without coefficients or +// domains) of the enforced linear constraints (of size 2 only) in `model`. This +// is the only information needed to infer the mapping from variables to nodes +// in routes constraints. +BinaryRelationRepository ComputePartialBinaryRelationRepository( + const CpModelProto& model) { + BinaryRelationRepository repository; + for (const ConstraintProto& ct : model.constraints()) { + if (ct.constraint_case() != ConstraintProto::kLinear) continue; + const absl::Span vars = ct.linear().vars(); + if (ct.enforcement_literal().size() != 1 || vars.size() != 2) continue; + if (vars[0] == vars[1]) continue; + repository.AddPartialRelation(ToLiteral(ct.enforcement_literal(0)), + ToPositiveIntegerVariable(vars[0]), + ToPositiveIntegerVariable(vars[1])); + } + repository.Build(); + return repository; +} + +// Returns the number of dimensions added to the constraint. +int MaybeFillRoutesConstraintNodeExpressions( + RoutesConstraintProto& routes, const BinaryRelationRepository& repository) { + int max_node = 0; + for (const int node : routes.tails()) { + max_node = std::max(max_node, node); + } + for (const int node : routes.heads()) { + max_node = std::max(max_node, node); + } + const int num_nodes = max_node + 1; + std::vector tails(routes.tails().begin(), routes.tails().end()); + std::vector heads(routes.heads().begin(), routes.heads().end()); + std::vector literals; + literals.reserve(routes.literals_size()); + for (int lit : routes.literals()) { + literals.push_back(ToLiteral(lit)); + } + const std::unique_ptr helper = + RouteRelationsHelper::Create(num_nodes, tails, heads, literals, + /*flat_node_dim_expressions=*/{}, + repository); + if (helper == nullptr) return 0; + + for (int d = 0; d < helper->num_dimensions(); ++d) { + RoutesConstraintProto::NodeExpressions& dimension = + *routes.add_dimensions(); + for (int n = 0; n < num_nodes; ++n) { + AffineExpression expr = helper->GetNodeExpression(n, d); + LinearExpressionProto& node_expr = *dimension.add_exprs(); + if (expr.var != kNoIntegerVariable) { + node_expr.add_vars(ToNodeVariableIndex(PositiveVariable(expr.var))); + node_expr.add_coeffs(VariableIsPositive(expr.var) + ? expr.coeff.value() + : -expr.coeff.value()); + } + node_expr.set_offset(expr.constant.value()); + } + } + return helper->num_dimensions(); +} + +} // namespace + +std::pair MaybeFillMissingRoutesConstraintNodeExpressions( + const CpModelProto& input_model, CpModelProto& output_model) { + std::vector routes_to_fill; + for (ConstraintProto& ct : *output_model.mutable_constraints()) { + if (ct.constraint_case() != ConstraintProto::kRoutes) continue; + if (!ct.routes().dimensions().empty()) continue; + routes_to_fill.push_back(ct.mutable_routes()); + } + if (routes_to_fill.empty()) return {0, 0}; + + int total_num_dimensions = 0; + const BinaryRelationRepository partial_repository = + ComputePartialBinaryRelationRepository(input_model); + for (RoutesConstraintProto* routes : routes_to_fill) { + total_num_dimensions += + MaybeFillRoutesConstraintNodeExpressions(*routes, partial_repository); + } + return {static_cast(routes_to_fill.size()), total_num_dimensions}; +} + namespace { class OutgoingCutHelper { public: - OutgoingCutHelper(int num_nodes, bool is_route_constraint, int64_t capacity, - absl::Span demands, - absl::Span tails, absl::Span heads, - absl::Span literals, Model* model) + OutgoingCutHelper( + int num_nodes, bool is_route_constraint, int64_t capacity, + absl::Span demands, + absl::Span flat_node_dim_expressions, + absl::Span tails, absl::Span heads, + absl::Span literals, Model* model) : num_nodes_(num_nodes), is_route_constraint_(is_route_constraint), capacity_(capacity), @@ -397,13 +1274,19 @@ class OutgoingCutHelper { tails_(tails.begin(), tails.end()), heads_(heads.begin(), heads.end()), literals_(literals.begin(), literals.end()), - literal_lp_values_(literals.size()), params_(*model->GetOrCreate()), trail_(*model->GetOrCreate()), random_(model->GetOrCreate()), encoder_(model->GetOrCreate()), in_subset_(num_nodes, false), - min_outgoing_flow_helper_(num_nodes, tails_, heads_, literals_, model) { + self_arc_literal_(num_nodes_), + self_arc_lp_value_(num_nodes_), + nodes_incoming_weight_(num_nodes_), + nodes_outgoing_weight_(num_nodes_), + min_outgoing_flow_helper_(num_nodes, tails_, heads_, literals_, model), + route_relations_helper_(RouteRelationsHelper::Create( + num_nodes, tails_, heads_, literals_, flat_node_dim_expressions, + *model->GetOrCreate())) { // Compute the total demands in order to know the minimum incoming/outgoing // flow. for (const int64_t demand : demands) total_demand_ += demand; @@ -470,7 +1353,7 @@ class OutgoingCutHelper { // relevant. bool AddOutgoingCut(LinearConstraintManager* manager, std::string name, int subset_size, const std::vector& in_subset, - int64_t rhs_lower_bound, int ignore_arcs_with_head); + int64_t rhs_lower_bound, int outside_node_to_ignore); const int num_nodes_; const bool is_route_constraint_; @@ -479,7 +1362,6 @@ class OutgoingCutHelper { std::vector tails_; std::vector heads_; std::vector literals_; - std::vector literal_lp_values_; std::vector relevant_arcs_; std::vector symmetrized_relevant_arcs_; std::vector> ordered_arcs_; @@ -493,9 +1375,19 @@ class OutgoingCutHelper { std::vector in_subset_; std::vector complement_of_subset_; + // Self-arc information, indexed in [0, num_nodes_) + std::vector nodes_with_self_arc_; + std::vector self_arc_literal_; + std::vector self_arc_lp_value_; + + // Temporary memory used by TrySubsetCut(). + std::vector nodes_incoming_weight_; + std::vector nodes_outgoing_weight_; + MaxBoundedSubsetSum max_bounded_subset_sum_; MaxBoundedSubsetSumExact max_bounded_subset_sum_exact_; MinOutgoingFlowHelper min_outgoing_flow_helper_; + std::unique_ptr route_relations_helper_; }; void OutgoingCutHelper::FilterFalseArcsAtLevelZero() { @@ -504,8 +1396,12 @@ void OutgoingCutHelper::FilterFalseArcsAtLevelZero() { int new_size = 0; const int size = static_cast(tails_.size()); const VariablesAssignment& assignment = trail_.Assignment(); + std::vector removed_arcs; for (int i = 0; i < size; ++i) { - if (assignment.LiteralIsFalse(literals_[i])) continue; + if (assignment.LiteralIsFalse(literals_[i])) { + removed_arcs.push_back(i); + continue; + } tails_[new_size] = tails_[i]; heads_[new_size] = heads_[i]; literals_[new_size] = literals_[i]; @@ -515,7 +1411,9 @@ void OutgoingCutHelper::FilterFalseArcsAtLevelZero() { tails_.resize(new_size); heads_.resize(new_size); literals_.resize(new_size); - literal_lp_values_.resize(new_size); + if (route_relations_helper_ != nullptr) { + route_relations_helper_->RemoveArcs(removed_arcs); + } } } @@ -526,20 +1424,30 @@ void OutgoingCutHelper::InitializeForNewLpSolution( // We will collect only the arcs with a positive lp_values to speed up some // computation below. relevant_arcs_.clear(); + nodes_with_self_arc_.clear(); // Sort the arcs by non-increasing lp_values. const auto& lp_values = manager->LpValues(); std::vector> relevant_arc_by_decreasing_lp_values; for (int i = 0; i < literals_.size(); ++i) { - double lp_value; const IntegerVariable direct_view = encoder_->GetLiteralView(literals_[i]); - if (direct_view != kNoIntegerVariable) { - lp_value = lp_values[direct_view]; - } else { - lp_value = - 1.0 - lp_values[encoder_->GetLiteralView(literals_[i].Negated())]; + const double lp_value = + direct_view != kNoIntegerVariable + ? lp_values[direct_view] + : 1.0 - lp_values[encoder_->GetLiteralView(literals_[i].Negated())]; + + // We treat self-edge separately. + // Note also that we do not need to include them in relevant_arcs_. + // + // TODO(user): If there are multiple self-arc, the code should still + // work, but is not ideal. + if (tails_[i] == heads_[i]) { + const int node = tails_[i]; + nodes_with_self_arc_.push_back(node); + self_arc_lp_value_[node] = lp_value; + self_arc_literal_[node] = literals_[i]; + continue; } - literal_lp_values_[i] = lp_value; if (lp_value < 1e-6) continue; relevant_arcs_.push_back({tails_[i], heads_[i], lp_value}); @@ -549,17 +1457,94 @@ void OutgoingCutHelper::InitializeForNewLpSolution( relevant_arc_by_decreasing_lp_values.end(), std::greater>()); + gtl::STLSortAndRemoveDuplicates(&nodes_with_self_arc_); + ordered_arcs_.clear(); for (const auto& [score, arc] : relevant_arc_by_decreasing_lp_values) { ordered_arcs_.push_back({tails_[arc], heads_[arc]}); } } +namespace { + +// Compute the current outgoing/incoming flow out of the subset. +// In many cases this will be the same, but not with outside_node_to_ignore +// or in case our LP does not contain all the constraints. +// +// Looping over all arcs can take a significant portion of the running time, +// it is why it is faster to do it only on arcs with non-zero lp values which +// should be in linear number rather than the total number of arc which can be +// quadratic. +// +// TODO(user): For the symmetric case there is an even faster algo. See if +// it can be generalized to the asymmetric one if become needed. +// Reference is algo 6.4 of the "The Traveling Salesman Problem" book +// mentionned above. +std::pair GetIncomingAndOutgoingLpFlow( + absl::Span relevant_arcs, + const std::vector& in_subset, int outside_node_to_ignore = -1) { + double outgoing_flow = 0.0; + double incoming_flow = 0.0; + for (const auto arc : relevant_arcs) { + const bool tail_in = in_subset[arc.tail]; + const bool head_in = in_subset[arc.head]; + if (tail_in && !head_in) { + if (arc.head == outside_node_to_ignore) continue; + outgoing_flow += arc.lp_value; + } else if (!tail_in && head_in) { + if (arc.tail == outside_node_to_ignore) continue; + incoming_flow += arc.lp_value; + } + } + return {incoming_flow, outgoing_flow}; +} + +} // namespace + bool OutgoingCutHelper::AddOutgoingCut(LinearConstraintManager* manager, std::string name, int subset_size, const std::vector& in_subset, int64_t rhs_lower_bound, - int ignore_arcs_with_head) { + int outside_node_to_ignore) { + // Skip cut if it is not violated. + const auto [in_flow, out_flow] = GetIncomingAndOutgoingLpFlow( + relevant_arcs_, in_subset, outside_node_to_ignore); + const double out_violation = static_cast(rhs_lower_bound) - out_flow; + const double in_violation = static_cast(rhs_lower_bound) - in_flow; + if (out_violation <= 1e-3 && in_violation <= 1e-3) return false; + + // We create the cut and rely on AddCut() for computing its efficacy and + // rejecting it if it is bad. + LinearConstraintBuilder outgoing(encoder_, IntegerValue(rhs_lower_bound), + kMaxIntegerValue); + LinearConstraintBuilder incoming(encoder_, IntegerValue(rhs_lower_bound), + kMaxIntegerValue); + + // Rather than doing two loops, we initialize the cuts right away, even if + // only one of them will be used. + for (int i = 0; i < tails_.size(); ++i) { + const bool tail_in = in_subset[tails_[i]]; + const bool head_in = in_subset[heads_[i]]; + if (tail_in && !head_in) { + if (heads_[i] == outside_node_to_ignore) continue; + CHECK(outgoing.AddLiteralTerm(literals_[i], IntegerValue(1))); + } else if (!tail_in && head_in) { + if (tails_[i] == outside_node_to_ignore) continue; + CHECK(incoming.AddLiteralTerm(literals_[i], IntegerValue(1))); + } + } + + // As arcs get fixed (this happens a lot in LNS subproblems), even if the + // incoming flow is the same as the outgoing flow, the number of incoming arcs + // might be widely different from the one of outgoing arcs. We prefer to pick + // the sparser cut. + const double out_efficacy = out_violation / std::sqrt(outgoing.NumTerms()); + const double in_efficacy = in_violation / std::sqrt(incoming.NumTerms()); + + // Select the best option between outgoing and incoming. + LinearConstraintBuilder& cut_builder = + (out_efficacy >= in_efficacy) ? outgoing : incoming; + // A node is said to be optional if it can be excluded from the subcircuit, // in which case there is a self-loop on that node. // If there are optional nodes, use extended formula: @@ -570,72 +1555,56 @@ bool OutgoingCutHelper::AddOutgoingCut(LinearConstraintManager* manager, int num_optional_nodes_out = 0; int optional_loop_in = -1; int optional_loop_out = -1; - for (int i = 0; i < tails_.size(); ++i) { - if (tails_[i] != heads_[i]) continue; - if (in_subset[tails_[i]]) { + for (const int n : nodes_with_self_arc_) { + if (in_subset[n]) { num_optional_nodes_in++; if (optional_loop_in == -1 || - literal_lp_values_[i] < literal_lp_values_[optional_loop_in]) { - optional_loop_in = i; + self_arc_lp_value_[n] < self_arc_lp_value_[optional_loop_in]) { + optional_loop_in = n; } } else { num_optional_nodes_out++; if (optional_loop_out == -1 || - literal_lp_values_[i] < literal_lp_values_[optional_loop_out]) { - optional_loop_out = i; + self_arc_lp_value_[n] < self_arc_lp_value_[optional_loop_out]) { + optional_loop_out = n; } } } - // TODO(user): The lower bound for CVRP is computed assuming all nodes must be - // served, if it is > 1 we lower it to one in the presence of optional nodes. - if (num_optional_nodes_in + num_optional_nodes_out > 0) { - CHECK_GE(rhs_lower_bound, 1); - rhs_lower_bound = 1; - ignore_arcs_with_head = -1; - } - - // We create the cut and rely on AddCut() for computing its efficacy and - // rejecting it if it is bad. - LinearConstraintBuilder outgoing(encoder_, IntegerValue(rhs_lower_bound), - kMaxIntegerValue); - - // Add outgoing arcs, compute outgoing flow. - for (int i = 0; i < tails_.size(); ++i) { - if (in_subset[tails_[i]] && !in_subset[heads_[i]]) { - if (heads_[i] == ignore_arcs_with_head) continue; - CHECK(outgoing.AddLiteralTerm(literals_[i], IntegerValue(1))); - } - } + // This just makes sure we don't call this with a bound > 1 if there is + // optional node inside the subset. + CHECK(rhs_lower_bound == 1 || num_optional_nodes_in == 0); // Support optional nodes if any. if (num_optional_nodes_in + num_optional_nodes_out > 0) { // When all optionals of one side are excluded in lp solution, no cut. if (num_optional_nodes_in == subset_size && (optional_loop_in == -1 || - literal_lp_values_[optional_loop_in] > 1.0 - 1e-6)) { + self_arc_lp_value_[optional_loop_in] > 1.0 - 1e-6)) { return false; } if (num_optional_nodes_out == num_nodes_ - subset_size && (optional_loop_out == -1 || - literal_lp_values_[optional_loop_out] > 1.0 - 1e-6)) { + self_arc_lp_value_[optional_loop_out] > 1.0 - 1e-6)) { return false; } // There is no mandatory node in subset, add optional_loop_in. if (num_optional_nodes_in == subset_size) { - CHECK(outgoing.AddLiteralTerm(literals_[optional_loop_in], - IntegerValue(1))); + CHECK_EQ(rhs_lower_bound, 1); + CHECK(cut_builder.AddLiteralTerm(self_arc_literal_[optional_loop_in], + IntegerValue(1))); } // There is no mandatory node out of subset, add optional_loop_out. if (num_optional_nodes_out == num_nodes_ - subset_size) { - CHECK(outgoing.AddLiteralTerm(literals_[optional_loop_out], - IntegerValue(1))); + CHECK_EQ(rhs_lower_bound, 1); + CHECK(cut_builder.AddLiteralTerm(self_arc_literal_[optional_loop_out], + IntegerValue(1))); } } - return manager->AddCut(outgoing.Build(), name); + return manager->AddCut(cut_builder.Build(), name); } bool OutgoingCutHelper::TrySubsetCut(std::string name, @@ -644,8 +1613,9 @@ bool OutgoingCutHelper::TrySubsetCut(std::string name, DCHECK_GE(subset.size(), 1); DCHECK_LT(subset.size(), num_nodes_); - // Initialize "in_subset" and contain_depot. + // Do some initialization. bool contain_depot = false; + in_subset_.assign(num_nodes_, false); for (const int n : subset) { in_subset_[n] = true; if (n == 0 && is_route_constraint_) { @@ -653,11 +1623,48 @@ bool OutgoingCutHelper::TrySubsetCut(std::string name, } } - // Compute a lower bound on the outgoing flow. + // For the route-constraint, we will always consider the subset without the + // depot. We complement it if needed. + if (contain_depot) { + complement_of_subset_.clear(); + for (int i = 0; i < num_nodes_; ++i) { + if (!in_subset_[i]) { + complement_of_subset_.push_back(i); + } + in_subset_[i] = !in_subset_[i]; + } + + // Change the span to point in the new subset! + subset = complement_of_subset_; + } + + // For now we can only apply fancy route cuts if all nodes in subset are + // mandatory. + bool all_subset_nodes_are_mandatory = true; + if (is_route_constraint_) { + for (const int n : nodes_with_self_arc_) { + if (in_subset_[n]) { + all_subset_nodes_are_mandatory = false; + break; + } + } + } + + // The TSP case is "easy". // - // TODO(user): This lower bound assume all nodes in subset must be served. - // If this is not the case, we are really defensive in AddOutgoingCut(). - // Improve depending on where the self-loop are. + // TODO(user): Turn on some of the automatic detection for circuit constraint. + // Even if we are looking for a full circuit of the mandatory nodes, some + // side-constraint might require to go in and out of a subset more than once. + // + // TODO(user): deal with non-mandatory node in the route constraint? + if (!is_route_constraint_ || !all_subset_nodes_are_mandatory) { + return AddOutgoingCut(manager, name, subset.size(), in_subset_, + /*rhs_lower_bound=*/1, + /*outside_node_to_ignore=*/-1); + } + + // Compute a lower bound on the outgoing flow assuming all node in the subset + // must be served. int64_t min_outgoing_flow = 1; // Bounds inferred automatically from the enforced binary relation of the @@ -666,73 +1673,47 @@ bool OutgoingCutHelper::TrySubsetCut(std::string name, // TODO(user): This is still not as good as the "capacity" bounds below in // some cases. Fix! we should be able to use the same relation to infer the // capacity bounds somehow. - const int subset_or_complement_size = - contain_depot ? num_nodes_ - subset.size() : subset.size(); - if (subset_or_complement_size <= - params_.routing_cut_subset_size_for_binary_relation_bound() && - subset_or_complement_size >= - params_.routing_cut_subset_size_for_tight_binary_relation_bound()) { - int bound; - if (contain_depot) { - complement_of_subset_.clear(); - for (int i = 0; i < num_nodes_; ++i) { - if (!in_subset_[i]) complement_of_subset_.push_back(i); - } - bound = min_outgoing_flow_helper_.ComputeMinOutgoingFlow( - complement_of_subset_); - } else { - bound = min_outgoing_flow_helper_.ComputeMinOutgoingFlow(subset); + if (route_relations_helper_ != nullptr) { + const int bound = + min_outgoing_flow_helper_.ComputeDemandBasedMinOutgoingFlow( + subset, *route_relations_helper_); + if (bound > min_outgoing_flow) { + absl::StrAppend(&name, "AutomaticDimension"); + min_outgoing_flow = bound; } + } + if (subset.size() < + params_.routing_cut_subset_size_for_tight_binary_relation_bound()) { + const int bound = + min_outgoing_flow_helper_.ComputeTightMinOutgoingFlow(subset); + if (bound > min_outgoing_flow) { + absl::StrAppend(&name, "AutomaticTight"); + min_outgoing_flow = bound; + } + } else if (subset.size() < + params_.routing_cut_subset_size_for_binary_relation_bound()) { + const int bound = min_outgoing_flow_helper_.ComputeMinOutgoingFlow(subset); if (bound > min_outgoing_flow) { absl::StrAppend(&name, "Automatic"); min_outgoing_flow = bound; } } - if (subset_or_complement_size < - params_.routing_cut_subset_size_for_tight_binary_relation_bound()) { - int bound; - if (contain_depot) { - complement_of_subset_.clear(); - for (int i = 0; i < num_nodes_; ++i) { - if (!in_subset_[i]) complement_of_subset_.push_back(i); - } - bound = min_outgoing_flow_helper_.ComputeTightMinOutgoingFlow( - complement_of_subset_); - } else { - bound = min_outgoing_flow_helper_.ComputeTightMinOutgoingFlow(subset); - } - if (bound > min_outgoing_flow) { - absl::StrAppend(&name, "AutomaticTight"); - min_outgoing_flow = bound; - } - } // Bounds coming from the demands_/capacity_ fields (if set). + // If we cannot reach the capacity given the demands in the subset, we can + // derive tighter bounds. std::vector to_ignore_candidates; if (!demands_.empty()) { - // If subset contains depot, we actually look at the subset complement to - // derive a bound on the outgoing flow. If we cannot reach the capacity - // given the demands in the subset, we can derive tighter bounds. int64_t has_excessive_demands = false; int64_t has_negative_demands = false; int64_t sum_of_elements = 0; std::vector elements; - const auto process_demand = [&](int64_t d) { + for (const int n : subset) { + const int64_t d = demands_[n]; if (d < 0) has_negative_demands = true; if (d > capacity_) has_excessive_demands = true; sum_of_elements += d; elements.push_back(d); - }; - if (contain_depot) { - for (int n = 0; n < num_nodes_; ++n) { - if (!in_subset_[n]) { - process_demand(demands_[n]); - } - } - } else { - for (const int n : subset) { - process_demand(demands_[n]); - } } // Lets wait for these to disappear before adding cuts. @@ -743,14 +1724,17 @@ bool OutgoingCutHelper::TrySubsetCut(std::string name, // already tight. // // TODO(user): Compute a bound in the presence of negative demands? - bool exact_was_used = false; int64_t tightened_capacity = capacity_; + int tightening_level = 0; if (!has_negative_demands && sum_of_elements > capacity_) { max_bounded_subset_sum_.Reset(capacity_); for (const int64_t e : elements) { max_bounded_subset_sum_.Add(e); } tightened_capacity = max_bounded_subset_sum_.CurrentMax(); + if (tightened_capacity < capacity_) { + tightening_level = 1; + } // If the complexity looks ok, try a more expensive DP than the quick one // above. @@ -760,8 +1744,8 @@ bool OutgoingCutHelper::TrySubsetCut(std::string name, max_bounded_subset_sum_exact_.MaxSubsetSum(elements, capacity_); CHECK_LE(exact, tightened_capacity); if (exact < tightened_capacity) { + tightening_level = 2; tightened_capacity = exact; - exact_was_used = true; } } } @@ -770,15 +1754,15 @@ bool OutgoingCutHelper::TrySubsetCut(std::string name, MathUtil::CeilOfRatio(sum_of_elements, tightened_capacity); if (flow_lower_bound > min_outgoing_flow) { min_outgoing_flow = flow_lower_bound; - absl::StrAppend(&name, exact_was_used ? "Tightened" : "Capacity"); + absl::StrAppend(&name, "Demand", tightening_level); } - if (!contain_depot && flow_lower_bound >= min_outgoing_flow) { + if (flow_lower_bound >= min_outgoing_flow) { // We compute the biggest extra item that could fit in 'flow_lower_bound' // bins. If the first (flow_lower_bound - 1) bins are tight, i.e. all // their tightened_capacity is filled, then the last bin will have // 'last_bin_fillin' stuff, which will leave 'space_left' to fit an extra - // 'item. + // item. const int64_t last_bin_fillin = sum_of_elements - (flow_lower_bound - 1) * tightened_capacity; const int64_t space_left = capacity_ - last_bin_fillin; @@ -806,6 +1790,13 @@ bool OutgoingCutHelper::TrySubsetCut(std::string name, // By hypothesis, outgoing_flow(A) + outgoing_flow(B) > flow_lower_bound // and, since n is not the depot, outgoing_flow(B) <= 1. Hence // outgoing_flow(A) >= flow_lower_bound. + // + // Note that this reasoning also applies to the incoming_flow, we have the + // same lower bound from the incoming flow not arriving from such node. + // + // Also of note, is that even if this node is optional, the bound is still + // valid since if any flow leave or come to this node, it must be in the + // tour. for (int n = 1; n < num_nodes_; ++n) { if (in_subset_[n]) continue; if (demands_[n] > space_left) { @@ -815,19 +1806,43 @@ bool OutgoingCutHelper::TrySubsetCut(std::string name, } } + if (subset.size() <= + params_.routing_cut_subset_size_for_exact_binary_relation_bound()) { + // Before doing something expensive, we can check if this might generate + // a violated cut in the first place. + const auto [in_flow, out_flow] = + GetIncomingAndOutgoingLpFlow(relevant_arcs_, in_subset_); + const double max_flow = std::max(in_flow, out_flow); + if (max_flow + 1e-2 >= min_outgoing_flow + 1.0) { + min_outgoing_flow_helper_.ReportDpSkip(); + } else if (!min_outgoing_flow_helper_.SubsetMightBeServedWithKRoutes( + min_outgoing_flow, subset)) { + // TODO(user): Shall we call SubsetMightBeServedWithKRoutes() again + // with min_outgoing_flow + 1 here? + absl::StrAppend(&name, "DP"); + min_outgoing_flow += 1; + to_ignore_candidates.clear(); // no longer valid. + } + } + // Out of to_ignore_candidates, use an heuristic to pick one. - int ignore_arcs_with_head = -1; + int outside_node_to_ignore = -1; if (!to_ignore_candidates.empty()) { absl::StrAppend(&name, "Lifted"); - // Compute the lp weight going from subset to the candidates. - absl::flat_hash_map candidate_weights; - for (const int n : to_ignore_candidates) candidate_weights[n] = 0; + // Compute the lp weight going from subset to the candidates or from the + // candidates to the subset. + // + // Note that we only reset the position that we care about below. + for (const int n : to_ignore_candidates) { + nodes_incoming_weight_[n] = 0; + nodes_outgoing_weight_[n] = 0; + } for (const auto arc : relevant_arcs_) { if (in_subset_[arc.tail] && !in_subset_[arc.head]) { - auto it = candidate_weights.find(arc.head); - if (it == candidate_weights.end()) continue; - it->second += arc.lp_value; + nodes_incoming_weight_[arc.head] += arc.lp_value; + } else if (!in_subset_[arc.tail] && in_subset_[arc.head]) { + nodes_outgoing_weight_[arc.tail] += arc.lp_value; } } @@ -835,7 +1850,8 @@ bool OutgoingCutHelper::TrySubsetCut(std::string name, std::vector bests; double best_weight = 0.0; for (const int n : to_ignore_candidates) { - const double weight = candidate_weights.at(n); + const double weight = + std::max(nodes_outgoing_weight_[n], nodes_incoming_weight_[n]); if (bests.empty() || weight > best_weight) { bests.clear(); bests.push_back(n); @@ -846,42 +1862,15 @@ bool OutgoingCutHelper::TrySubsetCut(std::string name, } // Randomly pick if we have many "bests". - ignore_arcs_with_head = + outside_node_to_ignore = bests.size() == 1 ? bests[0] : bests[absl::Uniform(*random_, 0, bests.size())]; } - // Compute the current outgoing flow out of the subset. - // - // This can take a significant portion of the running time, it is why it is - // faster to do it only on arcs with non-zero lp values which should be in - // linear number rather than the total number of arc which can be quadratic. - // - // TODO(user): For the symmetric case there is an even faster algo. See if - // it can be generalized to the asymmetric one if become needed. - // Reference is algo 6.4 of the "The Traveling Salesman Problem" book - // mentionned above. - double outgoing_flow = 0.0; - for (const auto arc : relevant_arcs_) { - if (in_subset_[arc.tail] && !in_subset_[arc.head]) { - if (arc.head == ignore_arcs_with_head) continue; - outgoing_flow += arc.lp_value; - } - } - - // Add a cut if the current outgoing flow is not enough. - bool result = false; - if (outgoing_flow + 1e-2 < min_outgoing_flow) { - result = AddOutgoingCut(manager, name, subset.size(), in_subset_, - /*rhs_lower_bound=*/min_outgoing_flow, - ignore_arcs_with_head); - } - - // Sparse clean up. - for (const int n : subset) in_subset_[n] = false; - - return result; + return AddOutgoingCut(manager, name, subset.size(), in_subset_, + /*rhs_lower_bound=*/min_outgoing_flow, + outside_node_to_ignore); } bool OutgoingCutHelper::TryBlossomSubsetCut( @@ -890,7 +1879,7 @@ bool OutgoingCutHelper::TryBlossomSubsetCut( DCHECK_GE(subset.size(), 1); DCHECK_LT(subset.size(), num_nodes_); - // Initialize "in_subset" and the subset demands. + // Initialize "in_subset". for (const int n : subset) in_subset_[n] = true; auto cleanup = ::absl::MakeCleanup([subset, this]() { for (const int n : subset) in_subset_[n] = false; @@ -955,13 +1944,12 @@ bool OutgoingCutHelper::TryBlossomSubsetCut( if (special_edges.size() == 1) { int num_other_optional = 0; const auto [special_tail, special_head] = *special_edges.begin(); - for (int i = 0; i < tails_.size(); ++i) { - if (tails_[i] != heads_[i]) continue; - if (tails_[i] != special_head && tails_[i] != special_tail) { + for (const int n : nodes_with_self_arc_) { + if (n != special_head && n != special_tail) { ++num_other_optional; if (best_optional_index == -1 || - literal_lp_values_[i] < literal_lp_values_[best_optional_index]) { - best_optional_index = i; + self_arc_lp_value_[n] < self_arc_lp_value_[best_optional_index]) { + best_optional_index = n; } } } @@ -981,10 +1969,10 @@ bool OutgoingCutHelper::TryBlossomSubsetCut( if (best_optional_index != -1) { absl::StrAppend(&name, "_opt"); - // This is tricky: The normal cut assume x_e <=1, but in case of a single + // This is tricky: The normal cut assume x_e <= 1, but in case of a single // 2 cycle, x_e can be equal to 2. So we need a coeff of 2 to disable that // cut. - CHECK(builder.AddLiteralTerm(literals_[best_optional_index], + CHECK(builder.AddLiteralTerm(self_arc_literal_[best_optional_index], IntegerValue(2))); } @@ -1321,7 +2309,9 @@ CutGenerator CreateStronglyConnectedGraphCutGenerator( absl::Span literals, Model* model) { auto helper = std::make_unique( num_nodes, /*is_route_constraint=*/false, /*capacity=*/0, - /*demands=*/absl::Span(), tails, heads, literals, model); + /*demands=*/absl::Span(), + /*flat_node_dim_expressions=*/ + absl::Span{}, tails, heads, literals, model); CutGenerator result; result.vars = GetAssociatedVariables(literals, model); result.generate_cuts = @@ -1332,14 +2322,14 @@ CutGenerator CreateStronglyConnectedGraphCutGenerator( return result; } -CutGenerator CreateCVRPCutGenerator(int num_nodes, absl::Span tails, - absl::Span heads, - absl::Span literals, - absl::Span demands, - int64_t capacity, Model* model) { +CutGenerator CreateCVRPCutGenerator( + int num_nodes, absl::Span tails, absl::Span heads, + absl::Span literals, absl::Span demands, + absl::Span flat_node_dim_expressions, + int64_t capacity, Model* model) { auto helper = std::make_unique( - num_nodes, /*is_route_constraint=*/true, capacity, demands, tails, heads, - literals, model); + num_nodes, /*is_route_constraint=*/true, capacity, demands, + flat_node_dim_expressions, tails, heads, literals, model); CutGenerator result; result.vars = GetAssociatedVariables(literals, model); result.generate_cuts = diff --git a/ortools/sat/routing_cuts.h b/ortools/sat/routing_cuts.h index 915abd3217..ddff2ce9d9 100644 --- a/ortools/sat/routing_cuts.h +++ b/ortools/sat/routing_cuts.h @@ -18,6 +18,8 @@ #include #include +#include +#include #include #include @@ -30,10 +32,147 @@ #include "ortools/sat/model.h" #include "ortools/sat/precedences.h" #include "ortools/sat/sat_base.h" +#include "ortools/sat/synchronization.h" namespace operations_research { namespace sat { +// Helper to recover the mapping between nodes and binary relation variables in +// simple cases of route constraints (at most one variable per node and +// "dimension" -- such as time or load, and at most one relation per arc and +// dimension). +class RouteRelationsHelper { + public: + // Creates a RouteRelationsHelper for the given RoutesConstraint and + // associated binary relations. If `flat_node_dim_expressions` is empty, + // infers them from the binary relations, if possible (otherwise, returns + // nullptr). If `flat_node_dim_expressions` is not empty, uses it to + // initialize the helper (this list should have num_dimensions times num_nodes + // elements, with the expression associated with node n and dimension d at + // index n * num_dimensions + d). If there are more than one relation per arc + // and dimension, a single relation is chosen arbitrarily. + static std::unique_ptr Create( + int num_nodes, const std::vector& tails, + const std::vector& heads, const std::vector& literals, + absl::Span flat_node_dim_expressions, + const BinaryRelationRepository& binary_relation_repository); + + // Returns the number of "dimensions", such as time or vehicle load. + int num_dimensions() const { return num_dimensions_; } + + int num_nodes() const { + return flat_node_dim_expressions_.size() / num_dimensions_; + } + + int num_arcs() const { + return flat_arc_dim_relations_.size() / num_dimensions_; + } + + // Returns the expression associated with the given node and dimension. + const AffineExpression& GetNodeExpression(int node, int dimension) const { + return flat_node_dim_expressions_[node * num_dimensions_ + dimension]; + } + + // Returns the relation tail_coeff.X + head_coeff.Y \in [lhs, rhs] between the + // X and Y expressions associated with the tail and head of the given arc, + // respectively, and the given dimension (head_coeff is always positive). + // Returns an "empty" struct with all fields set to 0 if there is no such + // relation. + struct Relation { + IntegerValue tail_coeff = 0; + IntegerValue head_coeff = 0; + IntegerValue lhs; + IntegerValue rhs; + + bool empty() const { return tail_coeff == 0 && head_coeff == 0; } + + bool operator==(const Relation& r) const { + return tail_coeff == r.tail_coeff && head_coeff == r.head_coeff && + lhs == r.lhs && rhs == r.rhs; + } + }; + const Relation& GetArcRelation(int arc, int dimension) const { + return flat_arc_dim_relations_[arc * num_dimensions_ + dimension]; + } + + // Returns the level zero lower or upper bound of the offset between the + // expressions associated with the head and tail of the given arc, and the + // given dimension. + IntegerValue GetArcOffsetBound(int arc, int dimension, bool upper_bound, + const IntegerTrail& integer_trail) const; + + void RemoveArcs(absl::Span sorted_arc_indices); + + private: + RouteRelationsHelper(const std::vector& tails, + const std::vector& heads, int num_dimensions, + std::vector flat_node_dim_expressions, + std::vector flat_arc_dim_relations); + + void LogStats() const; + + const std::vector& tails_; + const std::vector& heads_; + + int num_dimensions_; + // The expression associated with node n and dimension d is at index n * + // num_dimensions_ + d. + std::vector flat_node_dim_expressions_; + // The relation associated with arc a and dimension d is at index a * + // num_dimensions_ + d. + std::vector flat_arc_dim_relations_; +}; + +// Computes and fills the node expressions of all the routes constraints in +// `output_model` that don't have them, if possible. The node expressions are +// inferred from the binary relations in `input_model`. Both models must have +// the same variables (they can reference the same underlying object). +// Returns the number of constraints that were filled, and the total number of +// dimensions added to them. +std::pair MaybeFillMissingRoutesConstraintNodeExpressions( + const CpModelProto& input_model, CpModelProto& output_model); + +// This is used to represent a "special bin packing" problem where we have +// objects that can either be items with a given demand or bins with a given +// capacity. The problem is to choose the minimum number of objects that will +// be bins, such that the other objects (items) can be packed inside. +struct ItemOrBin { + // Only one option will apply, this can either be an item with given demand + // or a bin with given capacity. + // + // Important: We support negative demands and negative capacity. We just + // need that the sum of demand <= capacity for the item in that bin. + IntegerValue demand = 0; + IntegerValue capacity = 0; + + // We described the problem where each object can be an item or a bin, but + // in practice we might have restriction on what object can be which, and we + // use this field to indicate that. + // + // The numerical order is important as we use that in the greedy algorithm. + // See ComputeMinNumberOfBins() code. + enum { + MUST_BE_ITEM = 0, // capacity will be set at zero. + ITEM_OR_BIN = 1, + MUST_BE_BIN = 2, // demand will be set at zero. + } type = ITEM_OR_BIN; +}; + +// Given a "special bin packing" problem as decribed above, return a lower +// bound on the number of bins that needs to be taken. +// +// This simply sorts the object according to a greedy criteria and minimize +// the number of bins such that the "demands <= capacities" constraint is +// satisfied. +// +// If the problem is infeasible, this will return object.size() + 1, which is +// a trivially infeasible bound. +// +// TODO(user): Use fancier DP to derive tighter bound. Also, when there are +// many dimensions, the choice of which item go to which bin is correlated, +// can we exploit this? +int ComputeMinNumberOfBins(absl::Span objects, bool* gcd_was_used); + // Helper to compute the minimum flow going out of a subset of nodes, for a // given RoutesConstraint. class MinOutgoingFlowHelper { @@ -42,6 +181,16 @@ class MinOutgoingFlowHelper { const std::vector& heads, const std::vector& literals, Model* model); + ~MinOutgoingFlowHelper(); + + // Returns the minimum flow going out of `subset`, based on a generalization + // of the CVRP "rounded capacity inequalities", by using the given helper, if + // possible (this requires all nodes to have an associated variable, and all + // relations to have +1, -1 coefficients). The complexity is O((subset.size() + // + num_arcs()) * num_dimensions()). + int ComputeDemandBasedMinOutgoingFlow(absl::Span subset, + const RouteRelationsHelper& helper); + // Returns the minimum flow going out of `subset`, based on a conservative // estimate of the maximum number of nodes of a feasible path inside this // subset. `subset` must not be empty and must not contain the depot (node 0). @@ -54,7 +203,79 @@ class MinOutgoingFlowHelper { // cycles). The complexity is O(2 ^ subset.size()). int ComputeTightMinOutgoingFlow(absl::Span subset); + // Returns false if the given subset CANNOT be served by k routes. + // Returns true if we have a route or we don't know for sure (if we abort). + // The parameter k must be positive. + // + // Even more costly algo in O(n!/k!*k^(n-k)) that answers the question exactly + // given the available enforced linear1 and linear2 constraints. However it + // can stop as soon as one solution is found. + // + // TODO(user): the complexity also depends on the longest route and improves + // if routes fail quickly. Give a better estimate? + bool SubsetMightBeServedWithKRoutes(int k, absl::Span subset); + + // Just for stats reporting. + void ReportDpSkip() { num_full_dp_skips_++; } + private: + void InitializeGraph(absl::Span subset); + + // Given a subset S to serve in a route constraint, returns a special bin + // packing problem (defined above) where the minimum number of bins will + // correspond to the minimum number of vehicles needed to serve this subset. + // + // One way to derive such reduction is as follow. + // + // If we look at a path going through the subset, it will touch in order the + // nodes P = {n_0, ..., n_e}. It will enter S at a "start" node n_0 and leave + // at a "end" node n_e. + // + // We assume (see the RouteRelationsHelper) that each node n has an + // associated variable X_n, and that each arc t->h has an associated relation + // lhs(t,h) <= X_h - X_t. Summing all these inequalities along the path above + // we get: + // Sum_{i \in [1..e]} lhs(n_i, n_(i+1)) <= X_(n_e) - X_(n_0) + // introducing: + // - d(n) = min_(i \in S) lhs(i, n) [minimum incoming weight in subset S] + // - UB = max_(i \in S) upper_bound(X_i) + // We get: + // Sum_{n \in P \ n_0} d(n) <= UB - lower_bound(n_0) + // + // Here we can see that the "starting node" n0 is on the "capacity" side and + // will serve the role of a bin with capacity (UB - lower_bound(n_0)), whereas + // the other nodes n will be seen as "item" with demands d(i). + // + // Given that the set of paths going through S must be disjoint and serve all + // the nodes, we get exactly the special bin packing problem described above + // where the starting nodes are the bins and the other inner-nodes are the + // items. + // + // Note that if a node has no incoming arc from within S, it must be a start + // (i.e. a bin). And if a node has no incoming arcs from outside S, it cannot + // be a start an must be an inner node (i.e. an item). We can exploit this to + // derive better bounds. + // + // We just explained the reduction using incoming arcs and starts of route, + // but we can do the same with outgoing arcs and ends of route. We can also + // change the dimension (the X_i) and variable direction used in the + // RouteRelationsHelper to exploit relations X_h - X_t <= rhs(t,h) instead. + // + // We provide a reduction for the cross product of: + // - Each possible dimension in the RouteRelationsHelper. + // - lhs or rhs (when negate_variables = true) in X - Y \in [lhs, rhs]. + // - (start and incoming arcs) or (ends and outgoing arcs). + // + // Warning: the returned Span<> is only valid until the next call to this + // function. + // + // TODO(user): Given the info for a subset, we can derive bounds for any + // smaller set included in it. We just have to ignore the MUST_BE_ITEM + // type as this might no longer be true. That might be interseting. + absl::Span RelaxIntoSpecialBinPackingProblem( + absl::Span subset, int dimension, bool negate_variables, + bool use_incoming, const RouteRelationsHelper& helper); + // Returns the minimum flow going out of a subset of size `subset_size`, // assuming that the longest feasible path inside this subset has // `longest_path_length` nodes and that there are at most `max_longest_paths` @@ -68,6 +289,7 @@ class MinOutgoingFlowHelper { const BinaryRelationRepository& binary_relation_repository_; const Trail& trail_; const IntegerTrail& integer_trail_; + SharedStatistics* shared_stats_; // Temporary data used by ComputeMinOutgoingFlow(). Always contain default // values, except while ComputeMinOutgoingFlow() is running. @@ -81,10 +303,20 @@ class MinOutgoingFlowHelper { std::vector in_subset_; std::vector index_in_subset_; + // For each node n, the indices (in tails_, heads_) of the m->n and n->m arcs // inside the subset (self arcs excepted). std::vector> incoming_arc_indices_; std::vector> outgoing_arc_indices_; + + // This can only be true for node in the current subset. If a node 'n' has no + // incoming arcs from outside the subset, the part of a route serving node 'n' + // in a subset cannot start at that node. And if it has no outoing arc leaving + // the subset, it cannot end at that node. This can be used to derive tighter + // bounds. + std::vector has_incoming_arcs_from_outside_; + std::vector has_outgoing_arcs_to_outside_; + // For each node n, whether it can appear at the current and next position in // a feasible path. std::vector reachable_; @@ -98,6 +330,15 @@ class MinOutgoingFlowHelper { node_var_lower_bounds_; std::vector> next_node_var_lower_bounds_; + + std::vector tmp_bin_packing_problem_; + + // Statistics. + int64_t num_full_dp_skips_ = 0; + int64_t num_full_dp_calls_ = 0; + int64_t num_full_dp_early_abort_ = 0; + int64_t num_full_dp_work_abort_ = 0; + absl::flat_hash_map num_by_type_; }; // Given a graph with nodes in [0, num_nodes) and a set of arcs (the order is @@ -194,11 +435,14 @@ CutGenerator CreateStronglyConnectedGraphCutGenerator( // components, computes the demand needed to serves it, and depending on whether // it contains the depot (node zero) or not, compute the minimum number of // vehicle that needs to cross the component border. -CutGenerator CreateCVRPCutGenerator(int num_nodes, absl::Span tails, - absl::Span heads, - absl::Span literals, - absl::Span demands, - int64_t capacity, Model* model); +// `flat_node_dim_expressions` must have num_dimensions (possibly 0) times +// num_nodes elements, with the expression associated with node n and dimension +// d at index n * num_dimensions + d. +CutGenerator CreateCVRPCutGenerator( + int num_nodes, absl::Span tails, absl::Span heads, + absl::Span literals, absl::Span demands, + absl::Span flat_node_dim_expressions, + int64_t capacity, Model* model); // Try to find a subset where the current LP capacity of the outgoing or // incoming arc is not enough to satisfy the demands. diff --git a/ortools/sat/routing_cuts_test.cc b/ortools/sat/routing_cuts_test.cc index 2862ecb59d..ef4cf142f2 100644 --- a/ortools/sat/routing_cuts_test.cc +++ b/ortools/sat/routing_cuts_test.cc @@ -17,18 +17,21 @@ #include #include #include +#include #include #include #include "absl/container/flat_hash_map.h" +#include "absl/log/log.h" #include "absl/random/distributions.h" #include "absl/random/random.h" #include "absl/types/span.h" #include "gtest/gtest.h" #include "ortools/base/gmock.h" -#include "ortools/base/logging.h" +#include "ortools/base/parse_test_proto.h" #include "ortools/base/strong_vector.h" #include "ortools/graph/max_flow.h" +#include "ortools/sat/cp_model.h" #include "ortools/sat/cuts.h" #include "ortools/sat/integer.h" #include "ortools/sat/integer_base.h" @@ -43,7 +46,15 @@ namespace operations_research { namespace sat { namespace { +using ::google::protobuf::contrib::parse_proto::ParseTestProto; +using ::testing::AnyOf; using ::testing::ElementsAre; +using ::testing::Eq; +using ::testing::EqualsProto; +using ::testing::IsEmpty; +using ::testing::Pair; +using ::testing::UnorderedElementsAre; +using Relation = RouteRelationsHelper::Relation; TEST(MinOutgoingFlowHelperTest, TwoNodesWithoutConstraints) { Model model; @@ -116,6 +127,327 @@ TEST(MinOutgoingFlowHelperTest, CapacityConstraints) { EXPECT_EQ(tight_min_flow, 2); } +class DemandBasedMinOutgoingFlowHelperTest + : public testing::TestWithParam> {}; + +TEST_P(DemandBasedMinOutgoingFlowHelperTest, BasicCapacities) { + // If true, the load variables are the load of the vehicle leaving each node, + // otherwise they are the load of the vehicle arriving at each node. + const bool use_outgoing_load = GetParam().first; + // If true, vehicles pickup items at each node, otherwise they deliver items. + const bool pickup = GetParam().second; + + Model model; + const int num_nodes = 5; + // A complete graph with num_nodes. + std::vector tails; + std::vector heads; + std::vector literals; + absl::flat_hash_map, Literal> literal_by_arc; + for (int tail = 0; tail < num_nodes; ++tail) { + for (int head = 0; head < num_nodes; ++head) { + if (tail == head) continue; + tails.push_back(tail); + heads.push_back(head); + literals.push_back(Literal(model.Add(NewBooleanVariable()), true)); + literal_by_arc[{tail, head}] = literals.back(); + } + } + const std::vector demands = {0, 11, 12, 13, 14}; + std::vector loads; + const int max_capacity = 49; + for (int n = 0; n < num_nodes; ++n) { + if (pickup == use_outgoing_load) { + loads.push_back(model.Add(NewIntegerVariable(demands[n], max_capacity))); + } else { + loads.push_back( + model.Add(NewIntegerVariable(0, max_capacity - demands[n]))); + } + } + // Capacity constraints. + auto* repository = model.GetOrCreate(); + for (const auto& [arc, literal] : literal_by_arc) { + const auto& [tail, head] = arc; + if (tail == 0 || head == 0) continue; + if (pickup) { + // loads[head] - loads[tail] >= demand + repository->Add(literal, {loads[head], 1}, {loads[tail], -1}, + demands[use_outgoing_load ? head : tail], 1000); + } else { + // loads[tail] - loads[head] >= demand + repository->Add(literal, {loads[tail], 1}, {loads[head], -1}, + demands[use_outgoing_load ? head : tail], 1000); + } + } + repository->Build(); + std::unique_ptr route_relations_helper = + RouteRelationsHelper::Create(num_nodes, tails, heads, literals, {}, + *repository); + ASSERT_NE(route_relations_helper, nullptr); + // Subject under test. + MinOutgoingFlowHelper helper(num_nodes, tails, heads, literals, &model); + + const int min_flow = helper.ComputeDemandBasedMinOutgoingFlow( + {1, 2, 3, 4}, *route_relations_helper); + + // The total demand is 50, and the maximum capacity is 49. + EXPECT_EQ(min_flow, 2); +} + +TEST_P(DemandBasedMinOutgoingFlowHelperTest, + NodesWithoutIncomingOrOutgoingArc) { + // If true, the load variables are the load of the vehicle leaving each node, + // otherwise they are the load of the vehicle arriving at each node. + const bool use_outgoing_load = GetParam().first; + // If true, vehicles pickup items at each node, otherwise they deliver items. + const bool pickup = GetParam().second; + + Model model; + // A graph with 4 nodes and 4 arcs, with 1 node without incoming arc and 1 + // node without outgoing arc: + // + // --> 1 --> 2 --> + // ^ | + // | v + // --> 0 --> 3 --> + // + // We use "outside" arcs from/to node 4 otherwise the problem will be + // infeasible. + const int num_nodes = 5; + const std::vector tails = {0, 0, 1, 2, 4, 4, 2, 3}; + const std::vector heads = {1, 3, 2, 3, 0, 1, 4, 4}; + std::vector literals(tails.size()); + for (int i = 0; i < literals.size(); ++i) { + literals[i] = Literal(model.Add(NewBooleanVariable()), true); + } + const std::vector demands = {11, 12, 13, 14}; + std::vector loads; + const int max_capacity = 49; + for (int n = 0; n < demands.size(); ++n) { + if (pickup == use_outgoing_load) { + loads.push_back(model.Add(NewIntegerVariable(demands[n], max_capacity))); + } else { + loads.push_back( + model.Add(NewIntegerVariable(0, max_capacity - demands[n]))); + } + } + // Capacity constraints. + auto* repository = model.GetOrCreate(); + for (int i = 0; i < 4; ++i) { + const int head = heads[i]; + const int tail = tails[i]; + if (pickup) { + // loads[head] - loads[tail] >= demand + repository->Add(literals[i], {loads[head], 1}, {loads[tail], -1}, + demands[use_outgoing_load ? head : tail], 1000); + } else { + // loads[tail] - loads[head] >= demand + repository->Add(literals[i], {loads[tail], 1}, {loads[head], -1}, + demands[use_outgoing_load ? head : tail], 1000); + } + } + repository->Build(); + std::unique_ptr route_relations_helper = + RouteRelationsHelper::Create(num_nodes, tails, heads, literals, {}, + *repository); + ASSERT_NE(route_relations_helper, nullptr); + // Subject under test. + MinOutgoingFlowHelper helper(num_nodes, tails, heads, literals, &model); + + const int min_flow = helper.ComputeDemandBasedMinOutgoingFlow( + {0, 1, 2, 3}, *route_relations_helper); + + // The total demand is 50, and the maximum capacity is 49. + EXPECT_EQ(min_flow, 2); +} + +INSTANTIATE_TEST_SUITE_P(AllCombinations, DemandBasedMinOutgoingFlowHelperTest, + testing::Values(std::make_pair(true, true), + std::make_pair(true, false), + std::make_pair(false, true), + std::make_pair(false, false))); + +TEST(MinOutgoingFlowHelperTest, NodeExpressionWithConstant) { + // A graph with 3 nodes: 0 <--> 1 -(demand1)-> 2 <-(demand2)-> 0 + Model model; + const int num_nodes = 3; + std::vector tails = {1, 0, 0, 1, 2}; + std::vector heads = {2, 1, 2, 0, 0}; + std::vector literals; + for (int i = 0; i < tails.size(); ++i) { + literals.push_back(Literal(model.Add(NewBooleanVariable()), true)); + } + // The vehicle capacity and the demand at each node. + const int capacity = 100; + const int demand1 = 70; + const int demand2 = 40; + // The load of the vehicle arriving at node 1. + const IntegerVariable load1 = + model.Add(NewIntegerVariable(0, capacity - demand1)); + // The load of the vehicle arriving at node 2, minus `offset`. + const int offset = 30; + const IntegerVariable offset_load2 = + model.Add(NewIntegerVariable(-offset, capacity - demand2 - offset)); + + auto* repository = model.GetOrCreate(); + // Capacity constraint: (offset_load2 + offset) - load1 >= demand1 + repository->Add(literals[0], {offset_load2, 1}, {load1, -1}, demand1 - offset, + 1000); + repository->Build(); + std::unique_ptr route_relations_helper = + RouteRelationsHelper::Create(num_nodes, tails, heads, literals, + {AffineExpression(), AffineExpression(load1), + AffineExpression(offset_load2, 1, offset)}, + *repository); + ASSERT_NE(route_relations_helper, nullptr); + + MinOutgoingFlowHelper helper(num_nodes, tails, heads, literals, &model); + const int min_flow = + helper.ComputeDemandBasedMinOutgoingFlow({1, 2}, *route_relations_helper); + + // The total demand exceeds the capacity. + EXPECT_EQ(min_flow, 2); +} + +TEST(MinOutgoingFlowHelperTest, NodeMustBeInnerNode) { + // when considering subset {1, 2, 3}, knowing that 2 cannot be reached + // from outside can lead to better bound. The non zero-demands are in () on + // the arcs. + // + // 0 --> 1 -(5)-> 2 -(5)-> 3 --> 0 + // 1 <-(3)- 2 -----------> 0 + // 1 -----(4)------> 3 + // 0 --------------------> 3 + for (const bool can_enter_at_2 : {true, false}) { + Model model; + const int num_nodes = 4; + std::vector tails = {0, 1, 2, 3, 2, 2, 1, 0}; + std::vector heads = {1, 2, 3, 0, 0, 1, 3, 3}; + std::vector demands = {0, 5, 5, 0, 0, 4, 4, 0}; + if (can_enter_at_2) { + tails.push_back(0); + heads.push_back(2); + demands.push_back(0); + } + std::vector literals; + const int num_arcs = demands.size(); + for (int i = 0; i < num_arcs; ++i) { + literals.push_back(Literal(model.Add(NewBooleanVariable()), true)); + } + + std::vector loads; + for (int i = 0; i < num_nodes; ++i) { + loads.push_back(model.Add(NewIntegerVariable(0, 8))); + } + + // Capacity constraints. + auto* repository = model.GetOrCreate(); + for (int i = 0; i < num_arcs; ++i) { + // loads[head] - loads[tail] >= demand[arc] + repository->Add(literals[i], {loads[heads[i]], 1}, {loads[tails[i]], -1}, + demands[i], 1000); + } + repository->Build(); + std::unique_ptr route_relations_helper = + RouteRelationsHelper::Create(num_nodes, tails, heads, literals, {}, + *repository); + ASSERT_NE(route_relations_helper, nullptr); + + MinOutgoingFlowHelper helper(num_nodes, tails, heads, literals, &model); + const int min_flow = helper.ComputeDemandBasedMinOutgoingFlow( + {1, 2, 3}, *route_relations_helper); + + // If we cannot enter at 2, the only possibility is 0->1->2->0 and 0->3->0. + // Otherwise 0->2->1->3->0 is just under the capacity of 8. + EXPECT_EQ(min_flow, can_enter_at_2 ? 1 : 2); + } +} + +TEST(MinOutgoingFlowHelperTest, BetterUseOfUpperBound) { + // The non-zero demands are in () on the arcs. + // when considering subset {1, 2}: + // + // 0 --> 1 -(8)-> 2 --> 0 + // 0 --> 2 -(8)-> 1 --> 0 + for (const bool bounds_forces_two_path : {true, false}) { + Model model; + std::vector tails = {0, 1, 2, 0, 2, 1}; + std::vector heads = {1, 2, 0, 2, 1, 0}; + std::vector demands = {0, 8, 0, 0, 8, 0}; + std::vector literals; + const int num_arcs = demands.size(); + for (int i = 0; i < num_arcs; ++i) { + literals.push_back(Literal(model.Add(NewBooleanVariable()), true)); + } + + std::vector loads; + loads.push_back(model.Add(NewIntegerVariable(0, 10))); // depot. + if (bounds_forces_two_path) { + // Here if we exploit the bound properly, we can see that both possible + // paths are invalid. + loads.push_back(model.Add(NewIntegerVariable(0, 10))); + loads.push_back(model.Add(NewIntegerVariable(5, 5))); + } else { + // Here the path 0->1->2->0 is fine. + loads.push_back(model.Add(NewIntegerVariable(0, 10))); + loads.push_back(model.Add(NewIntegerVariable(5, 10))); + } + + // Capacity constraints. + auto* repository = model.GetOrCreate(); + for (int i = 0; i < num_arcs; ++i) { + // loads[head] - loads[tail] >= demand[arc] + repository->Add(literals[i], {loads[heads[i]], 1}, {loads[tails[i]], -1}, + demands[i], 1000); + } + repository->Build(); + std::unique_ptr route_relations_helper = + RouteRelationsHelper::Create(loads.size(), tails, heads, literals, {}, + *repository); + ASSERT_NE(route_relations_helper, nullptr); + + MinOutgoingFlowHelper helper(loads.size(), tails, heads, literals, &model); + const int min_flow = helper.ComputeDemandBasedMinOutgoingFlow( + {1, 2}, *route_relations_helper); + + EXPECT_EQ(min_flow, bounds_forces_two_path ? 2 : 1); + } +} + +TEST(MinOutgoingFlowHelperTest, DemandBasedMinOutgoingFlow_IsolatedNodes) { + Model model; + const int num_nodes = 5; + // A star graph with num_nodes-1 nodes and a depot. + std::vector tails; + std::vector heads; + std::vector literals; + std::vector variables; + auto* repository = model.GetOrCreate(); + // The depot variable. + variables.push_back(model.Add(NewIntegerVariable(0, 100))); + for (int head = 1; head < num_nodes; ++head) { + tails.push_back(0); + heads.push_back(head); + literals.push_back(Literal(model.Add(NewBooleanVariable()), true)); + variables.push_back(model.Add(NewIntegerVariable(0, 100))); + // Dummy relation, used only to associate a variable with each node. + repository->Add(literals.back(), {variables[head], 1}, {variables[0], -1}, + 1, 100); + } + repository->Build(); + std::unique_ptr route_relations_helper = + RouteRelationsHelper::Create(num_nodes, tails, heads, literals, {}, + *repository); + ASSERT_NE(route_relations_helper, nullptr); + // Subject under test. + MinOutgoingFlowHelper helper(num_nodes, tails, heads, literals, &model); + + const int min_flow = helper.ComputeDemandBasedMinOutgoingFlow( + {1, 2, 3, 4}, *route_relations_helper); + + EXPECT_EQ(min_flow, 4); +} + TEST(MinOutgoingFlowHelperTest, TimeWindows) { Model model; const int num_nodes = 5; @@ -170,12 +502,835 @@ TEST(MinOutgoingFlowHelperTest, TimeWindows) { EXPECT_EQ(tight_min_flow, 2); } -// Test on a simple tree: -// 3 -// / \ \ -// 1 0 5 -// / \ -// 2 4 +std::vector> +GetNodeExpressionsByDimension(const RouteRelationsHelper& helper) { + std::vector> result( + helper.num_dimensions()); + for (int n = 0; n < helper.num_nodes(); ++n) { + for (int d = 0; d < helper.num_dimensions(); ++d) { + if (!helper.GetNodeExpression(n, d).IsConstant()) { + result[d][n] = helper.GetNodeExpression(n, d); + } + } + } + return result; +} + +int SolveTwoDimensionBinPacking(int capacity, absl::Span load1, + absl::Span load2) { + // Lets generate a quick cp-sat model. + const int num_items = load1.size(); + const int num_bins = num_items; + + CpModelBuilder cp_model; + + // x[i][b] == item i in bin b. + std::vector> x(num_items); + for (int i = 0; i < num_items; ++i) { + x[i].resize(num_bins); + for (int b = 0; b < num_bins; ++b) { + x[i][b] = cp_model.NewBoolVar(); + } + } + + // Place all items. + for (int i = 0; i < num_items; ++i) { + cp_model.AddExactlyOne(x[i]); + } + + // Respect capacity. + for (int b = 0; b < num_bins; ++b) { + LinearExpr sum1; + LinearExpr sum2; + for (int i = 0; i < num_items; ++i) { + sum1 += load1[i] * x[i][b]; + sum2 += load2[i] * x[i][b]; + } + cp_model.AddLessOrEqual(sum1, capacity); + cp_model.AddLessOrEqual(sum2, capacity); + } + + // Bin used variables. + std::vector is_used(num_bins); + for (int b = 0; b < num_bins; ++b) { + is_used[b] = cp_model.NewBoolVar(); + for (int i = 0; i < num_items; ++i) { + cp_model.AddImplication(x[i][b], is_used[b]); + } + } + + // Objective + cp_model.Minimize(LinearExpr::Sum(is_used)); + + // Solving part. + const CpSolverResponse response = Solve(cp_model.Build()); + return static_cast(response.objective_value()); +} + +// We test a simple example with 2 dimensions and 4 nodes with demands +// (7, 3) (3, 7) and (3, 1), (1, 3). +TEST(MinOutgoingFlowHelperTest, SubsetMightBeServedWithKRoutes) { + Model model; + const int num_nodes = 5; + + // A complete graph with num_nodes. + std::vector tails; + std::vector heads; + std::vector literals; + absl::flat_hash_map, Literal> literal_by_arc; + for (int tail = 0; tail < num_nodes; ++tail) { + for (int head = 0; head < num_nodes; ++head) { + if (tail == head) continue; + tails.push_back(tail); + heads.push_back(head); + literals.push_back(Literal(model.Add(NewBooleanVariable()), true)); + literal_by_arc[{tail, head}] = literals.back(); + } + } + + // Load of each node on both dimensions. + std::vector load1 = {0, 7, 3, 3, 1}; + std::vector load2 = {0, 3, 7, 1, 3}; + + // For each node, one cumul variable per dimension. + std::vector cumul_vars_1; + std::vector cumul_vars_2; + const int64_t capacity(10); + for (int n = 0; n < num_nodes; ++n) { + cumul_vars_1.push_back(model.Add(NewIntegerVariable(load1[n], capacity))); + cumul_vars_2.push_back(model.Add(NewIntegerVariable(load2[n], capacity))); + } + + // Capacity constraints on two dimensions. + auto* repository = model.GetOrCreate(); + for (const auto& [arc, literal] : literal_by_arc) { + const auto& [tail, head] = arc; + + // vars[head] >= vars[tail] + load[head]; + repository->Add(literal, {cumul_vars_1[head], 1}, {cumul_vars_1[tail], -1}, + load1[head], 10000); + repository->Add(literal, {cumul_vars_2[head], 1}, {cumul_vars_2[tail], -1}, + load2[head], 10000); + } + repository->Build(); + + const int optimal = SolveTwoDimensionBinPacking(capacity, load1, load2); + EXPECT_EQ(optimal, 2); + + // Subject under test. + MinOutgoingFlowHelper helper(num_nodes, tails, heads, literals, &model); + + std::vector subset = {1, 2, 3, 4}; + for (int k = 0; k < subset.size(); ++k) { + if (k < optimal) { + EXPECT_FALSE(helper.SubsetMightBeServedWithKRoutes(k, subset)); + } else { + EXPECT_TRUE(helper.SubsetMightBeServedWithKRoutes(k, subset)); + } + } +} + +// Same as above but with randomization. +// I kept the "golden" test just to make sure things looks reasonable. +TEST(MinOutgoingFlowHelperTest, SubsetMightBeServedWithKRoutesRandom) { + Model model; + absl::BitGen random; + const int num_nodes = 8; + const int capacity = 20; + + // A complete graph with num_nodes. + std::vector tails; + std::vector heads; + std::vector literals; + absl::flat_hash_map, Literal> literal_by_arc; + for (int tail = 0; tail < num_nodes; ++tail) { + for (int head = 0; head < num_nodes; ++head) { + if (tail == head) continue; + tails.push_back(tail); + heads.push_back(head); + literals.push_back(Literal(model.Add(NewBooleanVariable()), true)); + literal_by_arc[{tail, head}] = literals.back(); + } + } + + // Load of each node on both dimensions. + std::vector load1(num_nodes, 0); + std::vector load2(num_nodes, 0); + for (int n = 0; n < num_nodes; ++n) { + load1[n] = absl::Uniform(random, 0, capacity); + load2[n] = absl::Uniform(random, 0, capacity); + } + + // For each node, one cumul variable per dimension. + std::vector cumul_vars_1; + std::vector cumul_vars_2; + for (int n = 0; n < num_nodes; ++n) { + cumul_vars_1.push_back(model.Add(NewIntegerVariable(load1[n], capacity))); + cumul_vars_2.push_back(model.Add(NewIntegerVariable(load2[n], capacity))); + } + + // Capacity constraints on two dimensions. + auto* repository = model.GetOrCreate(); + for (const auto& [arc, literal] : literal_by_arc) { + const auto& [tail, head] = arc; + + // vars[head] >= vars[tail] + load[head]; + repository->Add(literal, {cumul_vars_1[head], 1}, {cumul_vars_1[tail], -1}, + load1[head], 10000); + repository->Add(literal, {cumul_vars_2[head], 1}, {cumul_vars_2[tail], -1}, + load2[head], 10000); + } + repository->Build(); + + // To check our indices mapping, lets remove a random nodes from the subset + std::vector subset; + for (int i = 0; i < num_nodes; ++i) subset.push_back(i); + const int to_remove = absl::Uniform(random, 0, num_nodes); + std::swap(subset[to_remove], subset.back()); + subset.pop_back(); + + // We set the load to zero to have the proper optimal. + load1[to_remove] = 0; + load2[to_remove] = 0; + const int optimal = SolveTwoDimensionBinPacking(capacity, load1, load2); + LOG(INFO) << "random problem optimal = " << optimal; + + // Subject under test. + MinOutgoingFlowHelper helper(num_nodes, tails, heads, literals, &model); + + for (int k = 0; k < subset.size(); ++k) { + if (k < optimal) { + EXPECT_FALSE(helper.SubsetMightBeServedWithKRoutes(k, subset)); + } else { + EXPECT_TRUE(helper.SubsetMightBeServedWithKRoutes(k, subset)); + } + } +} + +int SolveSpecialBinPackingWithCpSat(absl::Span objects) { + CpModelBuilder cp_model; + + const int n = objects.size(); + std::vector item_is_bin(n); + for (int i = 0; i < n; ++i) { + if (objects[i].type == ItemOrBin::MUST_BE_BIN) { + item_is_bin[i] = cp_model.TrueVar(); + } else if (objects[i].type == ItemOrBin::MUST_BE_ITEM) { + item_is_bin[i] = cp_model.FalseVar(); + } else { + item_is_bin[i] = cp_model.NewBoolVar(); + } + } + + // x[i][b] == item i in bin b. + std::vector> x(n); + for (int i = 0; i < n; ++i) { + x[i].resize(n); + for (int b = 0; b < n; ++b) { + if (i == b) { + // We always place a bin into itself in this model. + x[i][b] = item_is_bin[b]; + } else { + x[i][b] = cp_model.NewBoolVar(); + cp_model.AddImplication(x[i][b], item_is_bin[b]); + } + } + } + + // Place all items. + for (int i = 0; i < n; ++i) { + cp_model.AddExactlyOne(x[i]); + } + + // Respect capacity. + for (int b = 0; b < n; ++b) { + LinearExpr demands; + for (int i = 0; i < n; ++i) { + if (i == b) continue; + demands += objects[i].demand.value() * x[i][b]; + } + // We shift by the bin demand since we always have x[b][b] at true if the + // bin is used as such. + cp_model.AddLessOrEqual(demands, objects[b].capacity.value()) + .OnlyEnforceIf(item_is_bin[b]); + } + + // Objective + cp_model.Minimize(LinearExpr::Sum(item_is_bin)); + + // Solving part. + SatParameters params; + params.set_log_search_progress(false); + const CpSolverResponse response = + SolveWithParameters(cp_model.Build(), params); + + // This is the convention used in our bound computation function. + if (response.status() == INFEASIBLE) return n + 1; + return static_cast(response.objective_value()); +} + +// Generate a random problem and make sure our bound is always valid. +// These problems are a bit easy, but with --runs_per_test 1000 there are a few +// instances where our lower bound is strictly worse than the true optimal. +TEST(SpecialBinPackingProblemTest, ComputeMinNumberOfBins) { + Model model; + absl::BitGen random; + const int num_objects = 20; + + std::vector objects; + for (int i = 0; i < num_objects; ++i) { + ItemOrBin o; + o.capacity = absl::Uniform(random, 0, 100); + o.demand = absl::Uniform(random, 0, 50); + const int type = absl::Uniform(random, 0, 2); + if (type == 0) o.type = ItemOrBin::MUST_BE_ITEM; + if (type == 1) o.type = ItemOrBin::ITEM_OR_BIN; + if (type == 2) o.type = ItemOrBin::MUST_BE_BIN; + objects.push_back(o); + } + + bool gcd_was_used; + const int obj_lb = + ComputeMinNumberOfBins(absl::MakeSpan(objects), &gcd_was_used); + const int optimal = SolveSpecialBinPackingWithCpSat(objects); + EXPECT_LE(obj_lb, optimal); + if (obj_lb != optimal) { + LOG(INFO) << "bound " << obj_lb << " optimal " << optimal; + } +} + +std::vector> GetRelationByDimensionAndArc( + const RouteRelationsHelper& helper) { + std::vector> result( + helper.num_dimensions()); + for (int i = 0; i < helper.num_arcs(); ++i) { + for (int d = 0; d < helper.num_dimensions(); ++d) { + if (!helper.GetArcRelation(i, d).empty()) { + result[d][i] = helper.GetArcRelation(i, d); + } + } + } + return result; +} + +TEST(RouteRelationsHelperTest, Basic) { + Model model; + // A graph with 6 nodes and the following arcs: + // + // l0 --->0<--- l1 + // | | + // 1--l2-->2--l3-->3 4--l4-->5 + // + const int num_nodes = 6; + const std::vector tails = {1, 2, 1, 2, 4}; + const std::vector heads = {0, 0, 2, 3, 5}; + const std::vector literals = { + Literal(model.Add(NewBooleanVariable()), true), + Literal(model.Add(NewBooleanVariable()), true), + Literal(model.Add(NewBooleanVariable()), true), + Literal(model.Add(NewBooleanVariable()), true), + Literal(model.Add(NewBooleanVariable()), true)}; + // Add relations with "time" variables A, B, C intended to be associated with + // nodes 0, 1, 2 respectively, and "load" variables U, V, W, X, Y, Z intended + // to be associated with nodes 0, 1, 2, 3, 4, 5 respectively. + const IntegerVariable a = model.Add(NewIntegerVariable(0, 100)); + const IntegerVariable b = model.Add(NewIntegerVariable(0, 100)); + const IntegerVariable c = model.Add(NewIntegerVariable(0, 100)); + const IntegerVariable u = model.Add(NewIntegerVariable(0, 10)); + const IntegerVariable v = model.Add(NewIntegerVariable(0, 10)); + const IntegerVariable w = model.Add(NewIntegerVariable(0, 10)); + const IntegerVariable x = model.Add(NewIntegerVariable(0, 10)); + const IntegerVariable y = model.Add(NewIntegerVariable(0, 10)); + const IntegerVariable z = model.Add(NewIntegerVariable(0, 10)); + BinaryRelationRepository repository; + repository.Add(literals[0], {a, 1}, {b, -1}, 50, 1000); + repository.Add(literals[1], {a, 1}, {c, -1}, 70, 1000); + repository.Add(literals[2], {c, 1}, {b, -1}, 40, 1000); + repository.Add(literals[0], {NegationOf(u), -1}, {NegationOf(v), 1}, 4, 100); + repository.Add(literals[1], {u, 1}, {w, -1}, 4, 100); + repository.Add(literals[2], {w, -1}, {v, 1}, -100, -3); + repository.Add(literals[3], {x, 1}, {w, -1}, 5, 100); + repository.Add(literals[4], {z, 1}, {y, -1}, 7, 100); + repository.Build(); + + std::unique_ptr helper = RouteRelationsHelper::Create( + num_nodes, tails, heads, literals, {}, repository); + + ASSERT_NE(helper, nullptr); + // Two dimensions (time and load) on the first connected component, and one + // dimension (load) on the second component. + EXPECT_EQ(helper->num_dimensions(), 3); + EXPECT_EQ(helper->num_nodes(), num_nodes); + EXPECT_EQ(helper->num_arcs(), 5); + // Check the node variables. + EXPECT_THAT( + GetNodeExpressionsByDimension(*helper), + UnorderedElementsAre( + UnorderedElementsAre(Pair(0, a), Pair(1, b), Pair(2, c)), + UnorderedElementsAre(Pair(0, u), Pair(1, v), Pair(2, w), Pair(3, x)), + // Variables y and z cannot be unambiguously associated with nodes. + IsEmpty())); + // Check the arc relations. + EXPECT_THAT(GetRelationByDimensionAndArc(*helper), + UnorderedElementsAre( + UnorderedElementsAre(Pair(0, Relation{-1, 1, 50, 1000}), + Pair(1, Relation{-1, 1, 70, 1000}), + Pair(2, Relation{-1, 1, 40, 1000})), + UnorderedElementsAre(Pair(0, Relation{-1, 1, 4, 100}), + Pair(1, Relation{-1, 1, 4, 100}), + Pair(2, Relation{-1, 1, 3, 100}), + Pair(3, Relation{-1, 1, 5, 100})), + // The relation for the arc 4->5 is not recovered since its + // variables cannot be unambiguously associated with nodes. + IsEmpty())); + + helper->RemoveArcs({0, 2}); + + EXPECT_EQ(helper->num_nodes(), num_nodes); + EXPECT_EQ(helper->num_arcs(), 3); + EXPECT_THAT(GetRelationByDimensionAndArc(*helper), + UnorderedElementsAre( + UnorderedElementsAre(Pair(0, Relation{-1, 1, 70, 1000})), + UnorderedElementsAre(Pair(0, Relation{-1, 1, 4, 100}), + Pair(1, Relation{-1, 1, 5, 100})), + IsEmpty())); +} + +TEST(RouteRelationsHelperTest, UnenforcedRelations) { + Model model; + // Graph: 0--l0-->1 + // ^\ | + // l3 | \_l4_ | l1 + // | \v + // 3<--l2--2 + // + const int num_nodes = 4; + const std::vector tails = {0, 1, 2, 3, 0}; + const std::vector heads = {1, 2, 3, 0, 2}; + const std::vector literals = { + Literal(model.Add(NewBooleanVariable()), true), + Literal(model.Add(NewBooleanVariable()), true), + Literal(model.Add(NewBooleanVariable()), true), + Literal(model.Add(NewBooleanVariable()), true), + Literal(model.Add(NewBooleanVariable()), true)}; + // Add relations with "time" variables A, B, C, D intended to be associated + // with nodes 0, 1, 2, 3 respectively. + const IntegerVariable a = model.Add(NewIntegerVariable(0, 100)); + const IntegerVariable b = model.Add(NewIntegerVariable(0, 100)); + const IntegerVariable c = model.Add(NewIntegerVariable(0, 100)); + const IntegerVariable d = model.Add(NewIntegerVariable(0, 100)); + BinaryRelationRepository repository; + repository.Add(literals[0], {b, 1}, {a, -1}, 1, 1); + repository.Add(literals[1], {c, 1}, {b, -1}, 2, 2); + repository.Add(literals[2], {d, 1}, {c, -1}, 3, 3); + repository.Add(literals[3], {a, 1}, {d, -1}, 4, 4); + // Several unenforced relations on the diagonal arc. The one with the +/-1 + // coefficients should be preferred. + repository.Add(Literal(kNoLiteralIndex), {c, 3}, {a, -2}, 1, 9); + repository.Add(Literal(kNoLiteralIndex), {c, 1}, {a, -1}, 5, 5); + repository.Add(Literal(kNoLiteralIndex), {c, 2}, {a, -3}, 3, 8); + repository.Build(); + + std::unique_ptr helper = RouteRelationsHelper::Create( + num_nodes, tails, heads, literals, {}, repository); + + ASSERT_NE(helper, nullptr); + EXPECT_THAT(GetNodeExpressionsByDimension(*helper), + UnorderedElementsAre(UnorderedElementsAre( + Pair(0, a), Pair(1, b), Pair(2, c), Pair(3, d)))); + // The unenforced relation is taken into account. + EXPECT_THAT( + GetRelationByDimensionAndArc(*helper), + UnorderedElementsAre(UnorderedElementsAre( + Pair(0, Relation{-1, 1, 1, 1}), Pair(1, Relation{-1, 1, 2, 2}), + Pair(2, Relation{-1, 1, 3, 3}), Pair(3, Relation{-1, 1, 4, 4}), + Pair(4, Relation{-1, 1, 5, 5})))); +} + +TEST(RouteRelationsHelperTest, SeveralVariablesPerNode) { + Model model; + // A graph with 3 nodes and the following arcs: 0--l0-->1--l2-->2 + const int num_nodes = 3; + const std::vector tails = {0, 1}; + const std::vector heads = {1, 2}; + const std::vector literals = { + Literal(model.Add(NewBooleanVariable()), true), + Literal(model.Add(NewBooleanVariable()), true)}; + // Add relations with "time" variables A, B, C and "load" variables X, Y, Z, + // intended to be associated with nodes 0, 1, 2 respectively. + const IntegerVariable a = model.Add(NewIntegerVariable(0, 100)); + const IntegerVariable b = model.Add(NewIntegerVariable(0, 100)); + const IntegerVariable c = model.Add(NewIntegerVariable(0, 100)); + const IntegerVariable x = model.Add(NewIntegerVariable(0, 10)); + const IntegerVariable y = model.Add(NewIntegerVariable(0, 10)); + const IntegerVariable z = model.Add(NewIntegerVariable(0, 10)); + BinaryRelationRepository repository; + repository.Add(literals[0], {b, 1}, {a, -1}, 50, 1000); + repository.Add(literals[1], {c, 1}, {b, -1}, 70, 1000); + repository.Add(literals[0], {z, 1}, {y, -1}, 5, 100); + repository.Add(literals[1], {y, 1}, {x, -1}, 7, 100); + // Weird relation linking time and load variables, causing all the variables + // to be in a single "dimension". + repository.Add(literals[0], {x, 1}, {a, -1}, 0, 100); + repository.Build(); + + std::unique_ptr helper = RouteRelationsHelper::Create( + num_nodes, tails, heads, literals, {}, repository); + + EXPECT_EQ(helper, nullptr); +} + +TEST(RouteRelationsHelperTest, SeveralRelationsPerArc) { + Model model; + // A graph with 3 nodes and the following arcs: 0--l0-->1--l1-->2 + const int num_nodes = 3; + const std::vector tails = {0, 1}; + const std::vector heads = {1, 2}; + const std::vector literals = { + Literal(model.Add(NewBooleanVariable()), true), + Literal(model.Add(NewBooleanVariable()), true)}; + // Add relations with "time" variables A, B, C intended to be associated with + // nodes 0, 1, 2 respectively. + const IntegerVariable a = model.Add(NewIntegerVariable(0, 100)); + const IntegerVariable b = model.Add(NewIntegerVariable(0, 100)); + const IntegerVariable c = model.Add(NewIntegerVariable(0, 100)); + BinaryRelationRepository repository; + repository.Add(literals[0], {b, 1}, {a, -1}, 50, 1000); + repository.Add(literals[1], {c, 1}, {b, -1}, 70, 1000); + // Add a second relation for some arc. + repository.Add(literals[1], {c, 2}, {b, -3}, 100, 200); + repository.Build(); + + using Relation = RouteRelationsHelper::Relation; + std::unique_ptr helper = RouteRelationsHelper::Create( + num_nodes, tails, heads, literals, {}, repository); + + ASSERT_NE(helper, nullptr); + EXPECT_EQ(helper->num_dimensions(), 1); + EXPECT_EQ(helper->GetNodeExpression(0, 0), a); + EXPECT_EQ(helper->GetNodeExpression(1, 0), b); + EXPECT_EQ(helper->GetNodeExpression(2, 0), c); + EXPECT_EQ(helper->GetArcRelation(0, 0), (Relation{-1, 1, 50, 1000})); + EXPECT_THAT( + helper->GetArcRelation(1, 0), + AnyOf(Eq(Relation{-1, 1, 70, 1000}), Eq(Relation{-3, 2, 100, 200}))); +} + +TEST(RouteRelationsHelperTest, SeveralArcsPerLiteral) { + // A graph with 3 nodes and the following arcs: 0--l0-->1--l0-->2, both + // enforced by the same literal l0. + Model model; + const int num_nodes = 3; + const std::vector tails = {0, 1}; + const std::vector heads = {1, 2}; + const Literal literal(model.Add(NewBooleanVariable()), true); + const std::vector literals = {literal, literal}; + // Add relations with "time" variables A, B, C intended to be associated with + // nodes 0, 1, 2 respectively. + const IntegerVariable a = model.Add(NewIntegerVariable(0, 100)); + const IntegerVariable b = model.Add(NewIntegerVariable(0, 100)); + const IntegerVariable c = model.Add(NewIntegerVariable(0, 100)); + BinaryRelationRepository repository; + repository.Add(literals[0], {b, 1}, {a, -1}, 50, 1000); + repository.Add(literals[0], {c, 1}, {b, -1}, 40, 1000); + repository.Build(); + + std::unique_ptr helper = RouteRelationsHelper::Create( + num_nodes, tails, heads, literals, {}, repository); + + // No variable should be associated with any node, since there is no unique + // way to do this ([A, B, C] or [C, B, A], for nodes [0, 1, 2] respectively). + // As a consequence, no relation should be recovered either. + EXPECT_EQ(helper, nullptr); +} + +TEST(RouteRelationsHelperTest, InconsistentRelationIsSkipped) { + // Graph: 0--l0-->1--l1-->2--l3-->3--l4-->4 + // | ^ + // | | + // l3 ------->5-------- l5 + // + Model model; + const int num_nodes = 6; + const std::vector tails = {0, 1, 2, 3, 1, 5}; + const std::vector heads = {1, 2, 3, 4, 5, 3}; + const std::vector literals = { + Literal(model.Add(NewBooleanVariable()), true), + Literal(model.Add(NewBooleanVariable()), true), + Literal(model.Add(NewBooleanVariable()), true), + Literal(model.Add(NewBooleanVariable()), true), + Literal(model.Add(NewBooleanVariable()), true), + Literal(model.Add(NewBooleanVariable()), true)}; + // Variables a, b, c, d, e, f are supposed to be associated with nodes 0, 1, + // 2, 3, 4, 5 respectively. + const IntegerVariable a = model.Add(NewIntegerVariable(0, 100)); + const IntegerVariable b = model.Add(NewIntegerVariable(0, 100)); + const IntegerVariable c = model.Add(NewIntegerVariable(0, 100)); + const IntegerVariable d = model.Add(NewIntegerVariable(0, 100)); + const IntegerVariable e = model.Add(NewIntegerVariable(0, 100)); + const IntegerVariable f = model.Add(NewIntegerVariable(0, 100)); + BinaryRelationRepository repository; + repository.Add(literals[0], {b, 1}, {a, -1}, 0, 0); + repository.Add(literals[1], {c, 1}, {b, -1}, 1, 1); + repository.Add(literals[2], {d, 1}, {c, -1}, 2, 2); + repository.Add(literals[3], {e, 1}, {d, -1}, 3, 3); + repository.Add(literals[4], {f, 1}, {b, -1}, 4, 4); + // Inconsistent relation for arc 5->3 (should be between f and d). + repository.Add(literals[5], {f, 2}, {b, -1}, 5, 5); + repository.Build(); + + std::unique_ptr helper = RouteRelationsHelper::Create( + num_nodes, tails, heads, literals, {}, repository); + + ASSERT_NE(helper, nullptr); + EXPECT_THAT(GetNodeExpressionsByDimension(*helper), + UnorderedElementsAre( + UnorderedElementsAre(Pair(0, a), Pair(1, b), Pair(2, c), + Pair(3, d), Pair(4, e), Pair(5, f)))); + // The relation for arc 5->3 is filtered out because it is inconsistent. + EXPECT_THAT( + GetRelationByDimensionAndArc(*helper), + UnorderedElementsAre(UnorderedElementsAre( + Pair(0, Relation{-1, 1, 0, 0}), Pair(1, Relation{-1, 1, 1, 1}), + Pair(2, Relation{-1, 1, 2, 2}), Pair(3, Relation{-1, 1, 3, 3}), + Pair(4, Relation{-1, 1, 4, 4})))); +} + +TEST(RouteRelationsHelperTest, InconsistentRelationWithMultipleArcsPerLiteral) { + // Graph: 0--l0-->1<--- + // ^ | | + // l3 l1 | + // | v l4 + // 3<--l2--2 | + // | | + // ----l4----->4 + Model model; + const int num_nodes = 5; + const std::vector tails = {0, 1, 2, 3, 4, 3}; + const std::vector heads = {1, 2, 3, 0, 1, 4}; + const Literal l4 = Literal(model.Add(NewBooleanVariable()), true); + const std::vector literals = { + Literal(model.Add(NewBooleanVariable()), true), + Literal(model.Add(NewBooleanVariable()), true), + Literal(model.Add(NewBooleanVariable()), true), + Literal(model.Add(NewBooleanVariable()), true), + l4, + l4}; + // Variables a, b, c, d, e are supposed to be associated with nodes 0, 1, 2, + // 3, 4 respectively. + const IntegerVariable a = model.Add(NewIntegerVariable(0, 100)); + const IntegerVariable b = model.Add(NewIntegerVariable(0, 100)); + const IntegerVariable c = model.Add(NewIntegerVariable(0, 100)); + const IntegerVariable d = model.Add(NewIntegerVariable(0, 100)); + const IntegerVariable e = model.Add(NewIntegerVariable(0, 100)); + BinaryRelationRepository repository; + repository.Add(literals[0], {b, 1}, {a, -1}, 0, 0); + repository.Add(literals[1], {c, 1}, {b, -1}, 1, 1); + repository.Add(literals[2], {d, 1}, {c, -1}, 2, 2); + repository.Add(literals[3], {a, 1}, {d, -1}, 3, 3); + // Inconsistent relation for arc 4->1 (should be between e and b). Note that + // arcs 4->1 and 4->3 are enforced by the same literal. + repository.Add(literals[4], {e, 1}, {d, -1}, 4, 4); + repository.Add(literals[5], {e, 1}, {d, -1}, 5, 5); + repository.Build(); + + std::unique_ptr helper = RouteRelationsHelper::Create( + num_nodes, tails, heads, literals, {}, repository); + + ASSERT_NE(helper, nullptr); + EXPECT_THAT(GetNodeExpressionsByDimension(*helper), + UnorderedElementsAre(UnorderedElementsAre( + Pair(0, a), Pair(1, b), Pair(2, c), Pair(3, d), Pair(4, e)))); + // The relation for arc 4->1 is filtered out because it is inconsistent. + EXPECT_THAT( + GetRelationByDimensionAndArc(*helper), + UnorderedElementsAre(UnorderedElementsAre( + Pair(0, Relation{-1, 1, 0, 0}), Pair(1, Relation{-1, 1, 1, 1}), + Pair(2, Relation{-1, 1, 2, 2}), Pair(3, Relation{-1, 1, 3, 3}), + Pair(5, Relation{-1, 1, 5, 5})))); +} + +TEST(MaybeFillMissingRoutesConstraintNodeExpressions, + FillsNodeVariablesIfNotPresent) { + // A graph with 4 nodes and the following arcs, with relations implying that + // variables 4, 5, 6, 7 should be associated with nodes 0, 1, 2, 3 + // respectively. + // + // l0 --->0<--- l1 + // | | + // 1--l2-->2--l3-->3 + // + const CpModelProto initial_model = ParseTestProto(R"pb( + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 10 ] } + variables { domain: [ 0, 10 ] } + variables { domain: [ 0, 10 ] } + variables { domain: [ 0, 10 ] } + constraints { + routes { + tails: [ 1, 2, 1, 2 ] + heads: [ 0, 0, 2, 3 ] + literals: [ 0, 1, 2, 3 ] + } + } + constraints { + enforcement_literal: 0 + linear { + vars: [ 4, 5 ] + coeffs: [ 1, -1 ] + domain: [ 0, 10 ] + } + } + constraints { + enforcement_literal: 1 + linear { + vars: [ 4, 6 ] + coeffs: [ 1, -1 ] + domain: [ 0, 10 ] + } + } + constraints { + enforcement_literal: 2 + linear { + vars: [ 5, 6 ] + coeffs: [ 1, -1 ] + domain: [ 0, 10 ] + } + } + constraints { + enforcement_literal: 3 + linear { + vars: [ 6, 7 ] + coeffs: [ 1, -1 ] + domain: [ 0, 10 ] + } + } + )pb"); + CpModelProto new_cp_model = initial_model; + const auto [num_routes, num_dimensions] = + MaybeFillMissingRoutesConstraintNodeExpressions(initial_model, + new_cp_model); + + EXPECT_EQ(num_routes, 1); + EXPECT_EQ(num_dimensions, 1); + const ConstraintProto expected_constraint = ParseTestProto(R"pb( + routes { + tails: [ 1, 2, 1, 2 ] + heads: [ 0, 0, 2, 3 ] + literals: [ 0, 1, 2, 3 ] + dimensions { + exprs { + vars: [ 4 ] + coeffs: [ 1 ] + } + exprs { + vars: [ 5 ] + coeffs: [ 1 ] + } + exprs { + vars: [ 6 ] + coeffs: [ 1 ] + } + exprs { + vars: [ 7 ] + coeffs: [ 1 ] + } + } + } + )pb"); + EXPECT_THAT(new_cp_model.constraints(0), EqualsProto(expected_constraint)); +} + +TEST(MaybeFillMissingRoutesConstraintNodeExpressions, + KeepsNodeVariablesIfPresent) { + // A graph with 4 nodes and the following arcs, with relations implying that + // variables 4, 5, 6, 7 should be associated with nodes 0, 1, 2, 3 + // respectively (but the user provided 7, 6, 5, 4 instead, respectively). + // + // l0 --->0<--- l1 + // | | + // 1--l2-->2--l3-->3 + // + const CpModelProto initial_model = ParseTestProto(R"pb( + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 10 ] } + variables { domain: [ 0, 10 ] } + variables { domain: [ 0, 10 ] } + variables { domain: [ 0, 10 ] } + constraints { + routes { + tails: [ 1, 2, 1, 2 ] + heads: [ 0, 0, 2, 3 ] + literals: [ 0, 1, 2, 3 ] + dimensions { + exprs { + vars: [ 7 ] + coeffs: [ 1 ] + } + exprs { + vars: [ 6 ] + coeffs: [ 1 ] + } + exprs { + vars: [ 5 ] + coeffs: [ 1 ] + } + exprs { + vars: [ 4 ] + coeffs: [ 1 ] + } + } + } + } + constraints { + enforcement_literal: 0 + linear { + vars: [ 4, 5 ] + coeffs: [ 1, -1 ] + domain: [ 0, 10 ] + } + } + constraints { + enforcement_literal: 1 + linear { + vars: [ 4, 6 ] + coeffs: [ 1, -1 ] + domain: [ 0, 10 ] + } + } + constraints { + enforcement_literal: 2 + linear { + vars: [ 5, 6 ] + coeffs: [ 1, -1 ] + domain: [ 0, 10 ] + } + } + constraints { + enforcement_literal: 3 + linear { + vars: [ 6, 7 ] + coeffs: [ 1, -1 ] + domain: [ 0, 10 ] + } + } + )pb"); + CpModelProto new_cp_model = initial_model; + const auto [num_routes, num_dimensions] = + MaybeFillMissingRoutesConstraintNodeExpressions(initial_model, + new_cp_model); + + EXPECT_EQ(num_routes, 0); + EXPECT_EQ(num_dimensions, 0); + EXPECT_THAT(new_cp_model, EqualsProto(initial_model)); +} + TEST(ExtractAllSubsetsFromForestTest, Basic) { std::vector parents = {3, 3, 1, 3, 1, 3}; @@ -391,8 +1546,8 @@ TEST(CreateStronglyConnectedGraphCutGeneratorTest, BasicExample) { } TEST(CreateStronglyConnectedGraphCutGeneratorTest, AnotherExample) { - // This time, the graph is fully connected, but we still detect that {1, 2, 3} - // do not have enough outgoing flow: + // This time, the graph is fully connected, but we still detect that {1, 2, + // 3} do not have enough outgoing flow: // // 0.5 // 0 <--> 1 @@ -423,7 +1578,7 @@ TEST(CreateStronglyConnectedGraphCutGeneratorTest, AnotherExample) { generator.generate_cuts(&manager); // The sets {2, 3} and {1, 2, 3} will generate cuts. - // However as an heuristic, we will wait another round to generate {1, 2 ,3}. + // However as an heuristic, we will wait another round to generate {1, 2, 3}. EXPECT_EQ(manager.num_cuts(), 1); EXPECT_THAT(manager.AllConstraints().back().constraint.DebugString(), ::testing::StartsWith("1 <= 1*X3 1*X6")); diff --git a/ortools/sat/sat_inprocessing.cc b/ortools/sat/sat_inprocessing.cc index f13067f17d..a8dbd02091 100644 --- a/ortools/sat/sat_inprocessing.cc +++ b/ortools/sat/sat_inprocessing.cc @@ -25,6 +25,7 @@ #include "absl/cleanup/cleanup.h" #include "absl/container/inlined_vector.h" #include "absl/log/check.h" +#include "absl/log/log.h" #include "absl/types/span.h" #include "ortools/base/logging.h" #include "ortools/base/stl_util.h" diff --git a/ortools/sat/sat_parameters.proto b/ortools/sat/sat_parameters.proto index 0c1fd8ce97..25d97c434e 100644 --- a/ortools/sat/sat_parameters.proto +++ b/ortools/sat/sat_parameters.proto @@ -24,7 +24,7 @@ option java_multiple_files = true; // Contains the definitions for all the sat algorithm parameters and their // default values. // -// NEXT TAG: 316 +// NEXT TAG: 318 message SatParameters { // In some context, like in a portfolio of search, it makes sense to name a // given parameters set for logging purpose. @@ -475,6 +475,9 @@ message SatParameters { // Permutations (#Variables = #Values) are always expanded. optional bool expand_alldiff_constraints = 170 [default = false]; + // Max domain size for all_different constraints to be expanded. + optional int32 max_alldiff_domain_size = 317 [default = 128]; + // If true, expand the reservoir constraints by creating booleans for all // possible precedences between event and encoding the constraint. optional bool expand_reservoir_constraints = 182 [default = true]; @@ -942,6 +945,12 @@ message SatParameters { optional int32 routing_cut_subset_size_for_tight_binary_relation_bound = 313 [default = 0]; + // Similar to above, but with an even stronger algorithm in O(n!). We try to + // be defensive and abort early or not run that often. Still the value of + // that parameter shouldn't really be much more than 10. + optional int32 routing_cut_subset_size_for_exact_binary_relation_bound = 316 + [default = 0]; + // The amount of "effort" to spend in dynamic programming for computing // routing cuts. This is in term of basic operations needed by the algorithm // in the worst case, so a value like 1e8 should take less than a second to diff --git a/ortools/sat/sat_runner.cc b/ortools/sat/sat_runner.cc index b8e7c1f94c..8f21d79c8b 100644 --- a/ortools/sat/sat_runner.cc +++ b/ortools/sat/sat_runner.cc @@ -20,13 +20,13 @@ #include "absl/log/check.h" #include "absl/log/flags.h" #include "absl/log/initialize.h" +#include "absl/log/log.h" #include "absl/strings/match.h" #include "absl/strings/str_format.h" #include "absl/strings/string_view.h" #include "google/protobuf/arena.h" #include "google/protobuf/text_format.h" #include "ortools/base/helpers.h" -#include "ortools/base/logging.h" #include "ortools/base/options.h" #include "ortools/base/path.h" #include "ortools/sat/boolean_problem.h" diff --git a/ortools/sat/sat_solver.cc b/ortools/sat/sat_solver.cc index 4d2a7f6059..d0a43a741e 100644 --- a/ortools/sat/sat_solver.cc +++ b/ortools/sat/sat_solver.cc @@ -26,6 +26,7 @@ #include "absl/container/btree_set.h" #include "absl/container/flat_hash_map.h" #include "absl/log/check.h" +#include "absl/log/log.h" #include "absl/meta/type_traits.h" #include "absl/strings/str_cat.h" #include "absl/strings/str_format.h" diff --git a/ortools/sat/scheduling_cuts.cc b/ortools/sat/scheduling_cuts.cc index 20746e0df1..f16172a087 100644 --- a/ortools/sat/scheduling_cuts.cc +++ b/ortools/sat/scheduling_cuts.cc @@ -17,7 +17,6 @@ #include #include -#include #include #include #include @@ -29,7 +28,9 @@ #include "absl/container/btree_set.h" #include "absl/container/flat_hash_map.h" #include "absl/log/check.h" +#include "absl/log/log.h" #include "absl/strings/str_cat.h" +#include "absl/strings/str_join.h" #include "absl/strings/string_view.h" #include "absl/types/span.h" #include "ortools/base/logging.h" @@ -38,7 +39,6 @@ #include "ortools/sat/cuts.h" #include "ortools/sat/integer.h" #include "ortools/sat/integer_base.h" -#include "ortools/sat/intervals.h" #include "ortools/sat/linear_constraint.h" #include "ortools/sat/linear_constraint_manager.h" #include "ortools/sat/model.h" @@ -63,7 +63,57 @@ BaseEvent::BaseEvent(int t, SchedulingConstraintHelper* x_helper) x_start_max(x_helper->StartMax(t)), x_end_min(x_helper->EndMin(t)), x_end_max(x_helper->EndMax(t)), - x_size_min(x_helper->SizeMin(t)) {} + x_size_min(x_helper->SizeMin(t)), + x_size_max(x_helper->SizeMax(t)) {} + +void BaseEvent::PropagateDecomposedEnergy( + const VariablesAssignment& assignment) { + if (decomposed_energy.empty()) return; + + IntegerValue new_x_size_min = kMaxIntegerValue; + IntegerValue new_x_size_max = kMinIntegerValue; + IntegerValue new_y_size_min = kMaxIntegerValue; + IntegerValue new_y_size_max = kMinIntegerValue; + IntegerValue new_energy_min = kMaxIntegerValue; + int new_size = 0; + for (const auto [lit, fixed_size, fixed_demand] : decomposed_energy) { + // Filter out false literals and out of bounds values. + if (assignment.LiteralIsFalse(lit) || fixed_size < x_size_min || + fixed_size > x_size_max || fixed_demand < y_size_min || + fixed_demand > y_size_max) { + continue; + } + + if (assignment.LiteralIsTrue(lit)) { + new_x_size_min = fixed_size; + new_x_size_max = fixed_size; + new_y_size_min = fixed_demand; + new_y_size_max = fixed_demand; + new_energy_min = fixed_size * fixed_demand; + decomposed_energy.clear(); + decomposed_energy.push_back({lit, fixed_size, fixed_demand}); + new_size = 1; + break; + } + + new_x_size_min = std::min(new_x_size_min, fixed_size); + new_x_size_max = std::max(new_x_size_max, fixed_size); + new_y_size_min = std::min(new_y_size_min, fixed_demand); + new_y_size_max = std::max(new_y_size_max, fixed_demand); + new_energy_min = std::min(new_energy_min, fixed_size * fixed_demand); + decomposed_energy[new_size++] = {lit, fixed_size, fixed_demand}; + } + decomposed_energy.resize(new_size); + CHECK(!decomposed_energy.empty()); + + // Update the event. + x_size_min = new_x_size_min; + x_size_max = new_x_size_max; + y_size_min = new_y_size_min; + y_size_max = new_y_size_max; + energy_min = new_energy_min; + use_energy = energy_min > x_size_min * y_size_min; +} struct EnergyEvent : BaseEvent { EnergyEvent(int t, SchedulingConstraintHelper* x_helper) @@ -604,6 +654,8 @@ CutGenerator CreateCumulativeEnergyCutGenerator( model](LinearConstraintManager* manager) { if (!helper->SynchronizeAndSetTimeDirection(true)) return false; if (!demands_helper->CacheAllEnergyValues()) return true; + const VariablesAssignment& assignment = + model->GetOrCreate()->Assignment(); const auto& lp_values = manager->LpValues(); std::vector events; @@ -617,8 +669,10 @@ CutGenerator CreateCumulativeEnergyCutGenerator( EnergyEvent e(i, helper); e.y_size = demands_helper->Demands()[i]; e.y_size_min = demands_helper->DemandMin(i); + e.y_size_max = demands_helper->DemandMax(i); e.decomposed_energy = demands_helper->DecomposedEnergies()[i]; e.energy_min = demands_helper->EnergyMin(i); + e.PropagateDecomposedEnergy(assignment); e.energy_is_quadratic = demands_helper->EnergyIsQuadratic(i); if (!helper->IsPresent(i)) { e.presence_literal_index = helper->PresenceLiteral(i).Index(); @@ -671,6 +725,7 @@ CutGenerator CreateNoOverlapEnergyCutGenerator( EnergyEvent e(i, helper); e.y_size = IntegerValue(1); e.y_size_min = IntegerValue(1); + e.y_size_max = IntegerValue(1); e.energy_min = e.x_size_min; if (!helper->IsPresent(i)) { e.presence_literal_index = helper->PresenceLiteral(i).Index(); @@ -762,16 +817,11 @@ CutGenerator CreateCumulativeTimeTableCutGenerator( // Sort events by time. // It is also important that all positive event with the same time as // negative events appear after for the correctness of the algo below. - std::sort(events.begin(), events.end(), - [](const TimeTableEvent& i, const TimeTableEvent& j) { - if (i.time == j.time) { - if (i.is_positive == j.is_positive) { - return i.interval_index < j.interval_index; - } - return !i.is_positive; - } - return i.time < j.time; - }); + std::stable_sort(events.begin(), events.end(), + [](const TimeTableEvent& i, const TimeTableEvent& j) { + return std::tie(i.time, i.is_positive) < + std::tie(j.time, j.is_positive); + }); double sum_of_demand_lp = 0.0; bool positive_event_added_since_last_check = false; @@ -863,11 +913,12 @@ void GenerateCutsBetweenPairOfNonOverlappingTasks( const int num_events = events.size(); if (num_events <= 1) return; - std::sort(events.begin(), events.end(), - [](const CachedIntervalData& e1, const CachedIntervalData& e2) { - return e1.start_min < e2.start_min || - (e1.start_min == e2.start_min && e1.end_max < e2.end_max); - }); + std::stable_sort( + events.begin(), events.end(), + [](const CachedIntervalData& e1, const CachedIntervalData& e2) { + return e1.start_min < e2.start_min || + (e1.start_min == e2.start_min && e1.end_max < e2.end_max); + }); // Balas disjunctive cuts on 2 tasks a and b: // start_1 * (duration_1 + start_min_1 - start_min_2) + @@ -1017,14 +1068,22 @@ CtEvent::CtEvent(int t, SchedulingConstraintHelper* x_helper) : BaseEvent(t, x_helper) {} std::string CtEvent::DebugString() const { - return absl::StrCat("CtEvent(x_end = ", x_end.DebugString(), - ", x_start_min = ", x_start_min.value(), - ", x_start_max = ", x_start_max.value(), - ", x_size_min = ", x_size_min.value(), - ", x_lp_end = ", x_lp_end, - ", y_size_min = ", y_size_min.value(), - ", energy_min = ", energy_min.value(), - ", use_energy = ", use_energy, ", lifted = ", lifted); + return absl::StrCat( + "CtEvent(x_end = ", x_end.DebugString(), + ", x_start_min = ", x_start_min.value(), + ", x_start_max = ", x_start_max.value(), + ", x_size_min = ", x_size_min.value(), + ", x_size_max = ", x_size_max.value(), + ", x_end_min = ", x_end_min.value(), ", x_end_max = ", x_end_max.value(), + ", x_lp_end = ", x_lp_end, ", y_size_min = ", y_size_min.value(), + ", y_size_max = ", y_size_max.value(), + ", energy_min = ", energy_min.value(), ", use_energy = ", use_energy, + ", lifted = ", lifted, ", decomposed_energy = [", + absl::StrJoin(decomposed_energy, ", ", + [](std::string* out, const LiteralValueValue& e) { + absl::StrAppend(out, e.left_value, " * ", e.right_value); + }), + "]"); } namespace { @@ -1152,11 +1211,11 @@ void GenerateShortCompletionTimeCutsWithExactBound( IntegerValue capacity_max, Model* model, LinearConstraintManager* manager) { TopNCuts top_n_cuts(5); // Sort by start min to bucketize by start_min. - std::sort(events.begin(), events.end(), - [](const CtEvent& e1, const CtEvent& e2) { - return std::tie(e1.x_start_min, e1.y_size_min, e1.x_lp_end) < - std::tie(e2.x_start_min, e2.y_size_min, e2.x_lp_end); - }); + std::stable_sort( + events.begin(), events.end(), [](const CtEvent& e1, const CtEvent& e2) { + return std::tie(e1.x_start_min, e1.y_size_min, e1.x_lp_end) < + std::tie(e2.x_start_min, e2.y_size_min, e2.x_lp_end); + }); std::vector permutable_events; for (int start = 0; start + 1 < events.size(); ++start) { // Skip to the next start_min value. @@ -1180,10 +1239,10 @@ void GenerateShortCompletionTimeCutsWithExactBound( } } - std::sort(residual_tasks.begin(), residual_tasks.end(), - [](const CtEvent& e1, const CtEvent& e2) { - return e1.x_lp_end < e2.x_lp_end; - }); + std::stable_sort(residual_tasks.begin(), residual_tasks.end(), + [](const CtEvent& e1, const CtEvent& e2) { + return e1.x_lp_end < e2.x_lp_end; + }); IntegerValue sum_of_durations(0); IntegerValue sum_of_energies(0); @@ -1242,7 +1301,8 @@ void GenerateShortCompletionTimeCutsWithExactBound( is_lifted |= event.lifted; cut.AddTerm(event.x_end, IntegerValue(1)); } - std::string full_name = cut_name; + std::string full_name = cut_name + "_unweighted"; + if (is_lifted) full_name.append("_lifted"); top_n_cuts.AddCut(cut.Build(), full_name, manager->LpValues()); } @@ -1270,21 +1330,28 @@ namespace { // Returns a copy of the event with the start time increased to time. // Energy (min and decomposed) are updated accordingly. -CtEvent TrimEventAfter(IntegerValue time, const CtEvent& old_event) { - DCHECK_GT(time, old_event.x_start_min); +CtEvent CopyAndTrimEventAfter(const CtEvent& old_event, IntegerValue time, + const VariablesAssignment& assignment) { + CHECK_GT(time, old_event.x_start_min); + CHECK_GT(old_event.x_start_min + old_event.x_size_min, time); CtEvent event = old_event; // Copy. event.lifted = true; - // Build the vector of energies as the vector of sizes. - event.energy_min = ComputeEnergyMinInWindow( - event.x_start_min, event.x_start_max, event.x_end_min, event.x_end_max, - event.x_size_min, event.y_size_min, event.decomposed_energy, time, - event.x_end_max); - event.x_size_min = event.x_size_min + event.x_start_min - time; - event.x_start_min = time; - if (event.energy_min > event.x_size_min * event.y_size_min) { - event.use_energy = true; + // Trim the decomposed energy and compute the energy min in the window. + + const IntegerValue shift = time - event.x_start_min; + CHECK_GT(shift, IntegerValue(0)); + event.x_size_min -= shift; + event.x_size_max -= shift; + event.energy_min = event.x_size_min * event.y_size_min; + if (!event.decomposed_energy.empty()) { + // Trim durations + for (auto& [literal, size, demand] : event.decomposed_energy) { + CHECK_GT(size, shift); + size -= shift; + } + event.PropagateDecomposedEnergy(assignment); } - DCHECK_GE(event.energy_min, event.x_size_min * event.y_size_min); + event.x_start_min = time; return event; } @@ -1295,7 +1362,7 @@ void AddEventDemandsToCapacitySubsetSum( IntegerValue capacity_max, std::vector& tmp_possible_demands, MaxBoundedSubsetSum& dp) { if (dp.CurrentMax() != capacity_max) { - if (event.y_size_is_fixed) { + if (event.y_size_is_fixed()) { dp.Add(event.y_size_min.value()); } else if (!event.decomposed_energy.empty()) { tmp_possible_demands.clear(); @@ -1373,11 +1440,11 @@ void GenerateCompletionTimeCutsWithEnergy(absl::string_view cut_name, std::vector tmp_possible_demands; // Sort by start min to bucketize by start_min. - std::sort(events.begin(), events.end(), - [](const CtEvent& e1, const CtEvent& e2) { - return std::tie(e1.x_start_min, e1.y_size_min, e1.x_lp_end) < - std::tie(e2.x_start_min, e2.y_size_min, e2.x_lp_end); - }); + std::stable_sort( + events.begin(), events.end(), [](const CtEvent& e1, const CtEvent& e2) { + return std::tie(e1.x_start_min, e1.y_size_min, e1.x_lp_end) < + std::tie(e2.x_start_min, e2.y_size_min, e2.x_lp_end); + }); // First loop: we loop on potential start times. for (int start = 0; start + 1 < events.size(); ++start) { @@ -1398,7 +1465,8 @@ void GenerateCompletionTimeCutsWithEnergy(absl::string_view cut_name, for (int before = 0; before < start; ++before) { if (events[before].x_start_min + events[before].x_size_min > sequence_start_min) { - CtEvent event = TrimEventAfter(sequence_start_min, events[before]); + CtEvent event = CopyAndTrimEventAfter(events[before], + sequence_start_min, assignment); if (event.energy_min <= 0) continue; residual_tasks.push_back(std::move(event)); } @@ -1408,7 +1476,8 @@ void GenerateCompletionTimeCutsWithEnergy(absl::string_view cut_name, int best_end = -1; double best_efficacy = 0.01; IntegerValue best_min_contrib = 0; - IntegerValue best_capacity = 0; + bool best_uses_subset_sum = false; + bool best_uses_shapes = false; // Used in the first term of the rhs of the equation. IntegerValue sum_event_contributions = 0; @@ -1416,6 +1485,8 @@ void GenerateCompletionTimeCutsWithEnergy(absl::string_view cut_name, IntegerValue sum_energy = 0; // For normalization. IntegerValue sum_square_energy = 0; + // Does the cut uses shapes when computing individual event contributions. + bool uses_shapes = false; double lp_contrib = 0.0; IntegerValue current_start_min(kMaxIntegerValue); @@ -1424,10 +1495,10 @@ void GenerateCompletionTimeCutsWithEnergy(absl::string_view cut_name, // We will add tasks one by one, sorted by end time, and evaluate the // potential cut at each step. - std::sort(residual_tasks.begin(), residual_tasks.end(), - [](const CtEvent& e1, const CtEvent& e2) { - return e1.x_lp_end < e2.x_lp_end; - }); + std::stable_sort(residual_tasks.begin(), residual_tasks.end(), + [](const CtEvent& e1, const CtEvent& e2) { + return e1.x_lp_end < e2.x_lp_end; + }); // Second loop: we add tasks one by one. for (int i = 0; i < residual_tasks.size(); ++i) { @@ -1443,9 +1514,42 @@ void GenerateCompletionTimeCutsWithEnergy(absl::string_view cut_name, // area = event.y_size_min * event.x_size_min * event.x_size_min // In the cumulative case, we can have energy_min > side_min * demand_min. // In that case, we use energy_min * size_min. - if (!AddProductTo(event.energy_min, event.x_size_min, - &sum_event_contributions)) { - break; + if (event.decomposed_energy.empty()) { + if (!AddProductTo(event.energy_min, event.x_size_min, + &sum_event_contributions)) { + break; + } + } else { + IntegerValue min_shape_area = kMaxIntegerValue; + for (const auto& [literal, size, demand] : event.decomposed_energy) { + IntegerValue shape_area = CapProdI(CapProdI(size, size), demand); + if (assignment.LiteralIsFalse(literal)) continue; + if (assignment.LiteralIsTrue(literal)) { + min_shape_area = shape_area; + break; + } else { + min_shape_area = std::min(min_shape_area, shape_area); + } + } + if (min_shape_area < event.energy_min * event.x_size_min) { + VLOG(2) << "min_shape_area: " << min_shape_area + << " energy_min: " << event.energy_min + << " x_size_min: " << event.x_size_min + << "simple_min_shape_area: " + << event.energy_min * event.x_size_min; + VLOG(2) << " event = " << event.DebugString(); + } + CHECK_GE(min_shape_area, event.energy_min * event.x_size_min); + + if (!AddTo(min_shape_area, &sum_event_contributions)) break; + if (min_shape_area > event.energy_min * event.x_size_min) { + VLOG(2) << "min_shape_area: " << min_shape_area + << " simple_min_shape_area: " + << event.energy_min * event.x_size_min; + VLOG(2) << " event = " << event.DebugString(); + + uses_shapes = true; + } } if (!AddSquareTo(event.energy_min, &sum_square_energy)) break; @@ -1492,7 +1596,8 @@ void GenerateCompletionTimeCutsWithEnergy(absl::string_view cut_name, best_efficacy = efficacy; best_end = i; best_min_contrib = min_contrib; - best_capacity = reachable_capacity; + best_uses_subset_sum = reachable_capacity < capacity_max; + best_uses_shapes = uses_shapes; } } @@ -1511,9 +1616,8 @@ void GenerateCompletionTimeCutsWithEnergy(absl::string_view cut_name, std::string full_name(cut_name); if (is_lifted) full_name.append("_lifted"); if (add_energy_to_name) full_name.append("_energy"); - if (best_capacity < capacity_max) { - full_name.append("_subsetsum"); - } + if (best_uses_subset_sum) full_name.append("_subsetsum"); + if (best_uses_shapes) full_name.append("_shapes"); top_n_cuts.AddCut(cut.Build(), full_name, manager->LpValues()); } } @@ -1543,6 +1647,7 @@ CutGenerator CreateNoOverlapCompletionTimeCutGenerator( event.x_end = end_expr; event.x_lp_end = end_expr.LpValue(lp_values); event.y_size_min = IntegerValue(1); + event.y_size_max = IntegerValue(1); event.energy_min = size_min; events.push_back(event); } @@ -1588,6 +1693,8 @@ CutGenerator CreateCumulativeCompletionTimeCutGenerator( capacity](bool mirror) { std::vector events; const auto& lp_values = manager->LpValues(); + const VariablesAssignment& assignment = + model->GetOrCreate()->Assignment(); for (int index = 0; index < helper->NumTasks(); ++index) { if (!helper->IsPresent(index)) continue; if (helper->SizeMin(index) > 0 && @@ -1596,9 +1703,12 @@ CutGenerator CreateCumulativeCompletionTimeCutGenerator( event.x_end = helper->Ends()[index]; event.x_lp_end = event.x_end.LpValue(lp_values); event.y_size_min = demands_helper->DemandMin(index); + event.y_size_max = demands_helper->DemandMax(index); event.energy_min = demands_helper->EnergyMin(index); + event.use_energy = + event.energy_min > event.x_size_min * event.y_size_min; event.decomposed_energy = demands_helper->DecomposedEnergies()[index]; - event.y_size_is_fixed = demands_helper->DemandIsFixed(index); + event.PropagateDecomposedEnergy(assignment); events.push_back(event); } } diff --git a/ortools/sat/scheduling_cuts.h b/ortools/sat/scheduling_cuts.h index 5f78d2ca98..e9a6a4be9b 100644 --- a/ortools/sat/scheduling_cuts.h +++ b/ortools/sat/scheduling_cuts.h @@ -22,6 +22,7 @@ #include "ortools/sat/integer.h" #include "ortools/sat/integer_base.h" #include "ortools/sat/model.h" +#include "ortools/sat/sat_base.h" #include "ortools/sat/scheduling_helpers.h" namespace operations_research { @@ -109,9 +110,11 @@ struct BaseEvent { IntegerValue x_end_min; IntegerValue x_end_max; IntegerValue x_size_min; + IntegerValue x_size_max; // Cache of the bounds on the y direction. IntegerValue y_size_min; + IntegerValue y_size_max; // The energy min of this event. IntegerValue energy_min; @@ -119,6 +122,16 @@ struct BaseEvent { // If non empty, a decomposed view of the energy of this event. // First value in each pair is x_size, second is y_size. std::vector decomposed_energy; + + // Indicates if the events used the optional energy information from the + // model. + bool use_energy = false; + + // If we know that the size on y is fixed, we can use some heuristic to + // compute the maximum subset sums under the capacity and use that instead + // of the full capacity. + bool y_size_is_fixed() const { return y_size_min == y_size_max; } + void PropagateDecomposedEnergy(const VariablesAssignment& assignment); }; // Stores the event for a rectangle along the two axis x and y. @@ -132,19 +145,10 @@ struct CtEvent : BaseEvent { AffineExpression x_end; double x_lp_end; - // Indicates if the events used the optional energy information from the - // model. - bool use_energy = false; - // Indicates if the cut is lifted, that is if it includes tasks that are not // strictly contained in the current time window. bool lifted = false; - // If we know that the size on y is fixed, we can use some heuristic to - // compute the maximum subset sums under the capacity and use that instead - // of the full capacity. - bool y_size_is_fixed = false; - std::string DebugString() const; }; diff --git a/ortools/sat/scheduling_helpers.cc b/ortools/sat/scheduling_helpers.cc index dd80a995fa..bbca7a12f1 100644 --- a/ortools/sat/scheduling_helpers.cc +++ b/ortools/sat/scheduling_helpers.cc @@ -824,18 +824,28 @@ bool SchedulingDemandHelper::DemandIsFixed(int t) const { } bool SchedulingDemandHelper::DecreaseEnergyMax(int t, IntegerValue value) { - if (value < EnergyMin(t)) { - if (helper_->IsOptional(t)) { - return helper_->PushTaskAbsence(t); - } else { - return helper_->ReportConflict(); - } - } else if (!decomposed_energies_[t].empty()) { + if (helper_->IsAbsent(t)) return true; + if (value < EnergyMin(t)) return helper_->PushTaskAbsence(t); + + if (!decomposed_energies_[t].empty()) { for (const auto [lit, fixed_size, fixed_demand] : decomposed_energies_[t]) { if (fixed_size * fixed_demand > value) { - if (assignment_.LiteralIsTrue(lit)) return helper_->ReportConflict(); + // `lit` encodes that the energy is higher than value. So either lit + // must be false or the task must be absent. if (assignment_.LiteralIsFalse(lit)) continue; - if (!helper_->PushLiteral(lit.Negated())) return false; + if (assignment_.LiteralIsTrue(lit)) { + // Task must be absent. + if (helper_->PresenceLiteral(t) != lit) { + helper_->MutableLiteralReason()->push_back(lit.Negated()); + } + return helper_->PushTaskAbsence(t); + } + if (helper_->IsPresent(t)) { + // Task is present, `lit` must be false. + DCHECK(!helper_->IsOptional(t) || helper_->PresenceLiteral(t) != lit); + helper_->AddPresenceReason(t); + if (!helper_->PushLiteral(lit.Negated())) return false; + } } } } else { diff --git a/ortools/sat/shaving_solver.cc b/ortools/sat/shaving_solver.cc index 7c0e611068..3ec00836bd 100644 --- a/ortools/sat/shaving_solver.cc +++ b/ortools/sat/shaving_solver.cc @@ -23,10 +23,10 @@ #include "absl/base/thread_annotations.h" #include "absl/flags/flag.h" #include "absl/log/check.h" +#include "absl/log/log.h" #include "absl/random/distributions.h" #include "absl/strings/str_cat.h" #include "absl/synchronization/mutex.h" -#include "ortools/base/logging.h" #include "ortools/graph/connected_components.h" #include "ortools/sat/cp_model_copy.h" #include "ortools/sat/cp_model_lns.h" diff --git a/ortools/sat/simplification.cc b/ortools/sat/simplification.cc index e5e7276881..f8fe5b9411 100644 --- a/ortools/sat/simplification.cc +++ b/ortools/sat/simplification.cc @@ -22,6 +22,7 @@ #include "absl/container/btree_set.h" #include "absl/log/check.h" +#include "absl/log/log.h" #include "absl/types/span.h" #include "ortools/algorithms/dynamic_partition.h" #include "ortools/base/adjustable_priority_queue-inl.h" diff --git a/ortools/sat/solution_crush.cc b/ortools/sat/solution_crush.cc index b3ceae15ef..f0ef4d1479 100644 --- a/ortools/sat/solution_crush.cc +++ b/ortools/sat/solution_crush.cc @@ -27,9 +27,9 @@ #include "absl/container/flat_hash_map.h" #include "absl/container/flat_hash_set.h" #include "absl/log/check.h" +#include "absl/log/log.h" #include "absl/types/span.h" #include "ortools/algorithms/sparse_permutation.h" -#include "ortools/base/logging.h" #include "ortools/sat/cp_model.pb.h" #include "ortools/sat/cp_model_utils.h" #include "ortools/sat/diffn_util.h" diff --git a/ortools/sat/subsolver.cc b/ortools/sat/subsolver.cc index e4d059910a..b9f59b9019 100644 --- a/ortools/sat/subsolver.cc +++ b/ortools/sat/subsolver.cc @@ -23,6 +23,7 @@ #include "absl/flags/flag.h" #include "absl/log/check.h" +#include "absl/log/log.h" #include "absl/strings/str_cat.h" #include "absl/strings/str_join.h" #include "absl/strings/string_view.h" diff --git a/ortools/sat/synchronization.cc b/ortools/sat/synchronization.cc index 2ea2419e93..0c079ea6c0 100644 --- a/ortools/sat/synchronization.cc +++ b/ortools/sat/synchronization.cc @@ -31,6 +31,7 @@ #include #include "absl/hash/hash.h" +#include "absl/log/log.h" #include "absl/time/time.h" #include "ortools/base/logging.h" #include "ortools/base/timer.h" diff --git a/ortools/sat/work_assignment.cc b/ortools/sat/work_assignment.cc index a6cb787cd9..30fc977c0c 100644 --- a/ortools/sat/work_assignment.cc +++ b/ortools/sat/work_assignment.cc @@ -28,10 +28,10 @@ #include "absl/container/flat_hash_set.h" #include "absl/log/check.h" +#include "absl/log/log.h" #include "absl/strings/str_cat.h" #include "absl/synchronization/mutex.h" #include "absl/types/span.h" -#include "ortools/base/logging.h" #include "ortools/sat/cp_model_mapping.h" #include "ortools/sat/cp_model_utils.h" #include "ortools/sat/integer.h" diff --git a/ortools/scheduling/BUILD.bazel b/ortools/scheduling/BUILD.bazel index 89968fe42d..b52047c8ac 100644 --- a/ortools/scheduling/BUILD.bazel +++ b/ortools/scheduling/BUILD.bazel @@ -13,8 +13,8 @@ load("@com_google_protobuf//bazel:cc_proto_library.bzl", "cc_proto_library") load("@com_google_protobuf//bazel:proto_library.bzl", "proto_library") +load("@com_google_protobuf//bazel:py_proto_library.bzl", "py_proto_library") load("@rules_cc//cc:defs.bzl", "cc_library") -load("@rules_python//python:proto.bzl", "py_proto_library") package(default_visibility = ["//visibility:public"]) diff --git a/ortools/service/v1/BUILD.bazel b/ortools/service/v1/BUILD.bazel index e8f10fdfa0..018e3dbcd8 100644 --- a/ortools/service/v1/BUILD.bazel +++ b/ortools/service/v1/BUILD.bazel @@ -13,7 +13,7 @@ load("@com_google_protobuf//bazel:cc_proto_library.bzl", "cc_proto_library") load("@com_google_protobuf//bazel:proto_library.bzl", "proto_library") -load("@rules_python//python:proto.bzl", "py_proto_library") +load("@com_google_protobuf//bazel:py_proto_library.bzl", "py_proto_library") package(default_visibility = [ "//ortools/math_opt:__subpackages__", diff --git a/ortools/service/v1/mathopt/BUILD.bazel b/ortools/service/v1/mathopt/BUILD.bazel index 596433d643..7498077bb2 100644 --- a/ortools/service/v1/mathopt/BUILD.bazel +++ b/ortools/service/v1/mathopt/BUILD.bazel @@ -14,7 +14,7 @@ load("@com_google_protobuf//bazel:cc_proto_library.bzl", "cc_proto_library") load("@com_google_protobuf//bazel:java_proto_library.bzl", "java_proto_library") load("@com_google_protobuf//bazel:proto_library.bzl", "proto_library") -load("@rules_python//python:proto.bzl", "py_proto_library") +load("@com_google_protobuf//bazel:py_proto_library.bzl", "py_proto_library") package(default_visibility = [ "//ortools/math_opt:__subpackages__", diff --git a/ortools/util/BUILD.bazel b/ortools/util/BUILD.bazel index 7afecd38b1..e478a5674b 100644 --- a/ortools/util/BUILD.bazel +++ b/ortools/util/BUILD.bazel @@ -13,8 +13,8 @@ load("@com_google_protobuf//bazel:cc_proto_library.bzl", "cc_proto_library") load("@com_google_protobuf//bazel:proto_library.bzl", "proto_library") +load("@com_google_protobuf//bazel:py_proto_library.bzl", "py_proto_library") load("@rules_cc//cc:defs.bzl", "cc_library") -load("@rules_python//python:proto.bzl", "py_proto_library") package(default_visibility = ["//visibility:public"]) @@ -79,7 +79,10 @@ cc_library( name = "bitset", srcs = ["bitset.cc"], hdrs = ["bitset.h"], - deps = ["//ortools/base"], + deps = [ + "//ortools/base", + "@com_google_absl//absl/base:log_severity", + ], ) cc_library( @@ -213,6 +216,7 @@ cc_library( deps = [ ":running_stat", "//ortools/base", + "//ortools/base:base_export", "//ortools/base:sysinfo", "//ortools/base:timer", "//ortools/base:types", diff --git a/ortools/util/bitset.cc b/ortools/util/bitset.cc index b0449ae4d0..4b000cf30c 100644 --- a/ortools/util/bitset.cc +++ b/ortools/util/bitset.cc @@ -13,8 +13,10 @@ #include "ortools/util/bitset.h" -#include "ortools/base/commandlineflags.h" -#include "ortools/base/logging.h" +#include + +#include "absl/flags/flag.h" +#include "absl/log/check.h" ABSL_FLAG(int, bitset_small_bitset_count, 8, "threshold to count bits with buckets"); diff --git a/ortools/util/bitset.h b/ortools/util/bitset.h index 9469bb8342..4122555eb7 100644 --- a/ortools/util/bitset.h +++ b/ortools/util/bitset.h @@ -911,7 +911,7 @@ class SparseBitset { // instance. This way, after the loop, a client can call this for efficiency. void NotifyAllClear() { #if !defined(NDEBUG) - for (IntegerType index : to_clear_) CHECK(!bitset_[index]); + for (IntegerType index : to_clear_) CHECK(!bitset_[index]); #endif to_clear_.clear(); } diff --git a/ortools/util/logging.h b/ortools/util/logging.h index 3770c50d56..407c428d8e 100644 --- a/ortools/util/logging.h +++ b/ortools/util/logging.h @@ -19,7 +19,7 @@ #include #include -#include "absl/strings/str_cat.h" +#include "absl/strings/str_cat.h" // IWYU pragma: export #include "ortools/base/timer.h" namespace operations_research { diff --git a/ortools/util/stats.cc b/ortools/util/stats.cc index d998a43dc5..a8e8073c34 100644 --- a/ortools/util/stats.cc +++ b/ortools/util/stats.cc @@ -15,11 +15,13 @@ #include #include +#include #include #include #include "absl/log/check.h" #include "absl/strings/str_format.h" +#include "absl/strings/string_view.h" #include "ortools/base/logging.h" #include "ortools/base/stl_util.h" #include "ortools/base/types.h" @@ -51,8 +53,6 @@ Stat::Stat(absl::string_view name, StatsGroup* group) : name_(name) { std::string Stat::StatString() const { return name_ + ": " + ValueAsString(); } -StatsGroup::~StatsGroup() { gtl::STLDeleteValues(&time_distributions_); } - void StatsGroup::Register(Stat* stat) { stats_.push_back(stat); } void StatsGroup::Reset() { @@ -117,13 +117,14 @@ std::string StatsGroup::StatString() const { return result; } -TimeDistribution* StatsGroup::LookupOrCreateTimeDistribution(std::string name) { - TimeDistribution*& ref = time_distributions_[name]; +TimeDistribution* StatsGroup::LookupOrCreateTimeDistribution( + absl::string_view name) { + std::unique_ptr& ref = time_distributions_[name]; if (ref == nullptr) { - ref = new TimeDistribution(name); - Register(ref); + ref = std::make_unique(name); + Register(ref.get()); } - return ref; + return ref.get(); } DistributionStat::DistributionStat(absl::string_view name) diff --git a/ortools/util/stats.h b/ortools/util/stats.h index 4a997a54e9..dc123d60a5 100644 --- a/ortools/util/stats.h +++ b/ortools/util/stats.h @@ -69,10 +69,13 @@ #define OR_TOOLS_UTIL_STATS_H_ #include +#include #include +#include #include #include +#include "absl/container/flat_hash_map.h" #include "absl/strings/string_view.h" #include "absl/time/time.h" #include "ortools/base/timer.h" @@ -138,7 +141,7 @@ class StatsGroup { // This type is neither copyable nor movable. StatsGroup(const StatsGroup&) = delete; StatsGroup& operator=(const StatsGroup&) = delete; - ~StatsGroup(); + ~StatsGroup() = default; // Registers a Stat, which will appear in the string returned by StatString(). // The Stat object must live as long as this StatsGroup. @@ -154,9 +157,9 @@ class StatsGroup { void SetPrintOrder(PrintOrder print_order) { print_order_ = print_order; } // Returns and if needed creates and registers a TimeDistribution with the - // given name. Note that this involve a map lookup and his thus slower than - // directly accessing a TimeDistribution variable. - TimeDistribution* LookupOrCreateTimeDistribution(std::string name); + // given name. Note that this involve a hash map lookup and is thus slower + // than directly accessing a TimeDistribution variable. + TimeDistribution* LookupOrCreateTimeDistribution(absl::string_view name); // Calls Reset() on all the statistics registered with this group. void Reset(); @@ -165,7 +168,8 @@ class StatsGroup { std::string name_; PrintOrder print_order_ = SORT_BY_PRIORITY_THEN_VALUE; std::vector stats_; - std::map time_distributions_; + absl::flat_hash_map> + time_distributions_; }; // Base class to track and compute statistics about the distribution of a @@ -348,38 +352,25 @@ class EnabledScopedTimeDistributionUpdater { class DisabledScopedTimeDistributionUpdater { public: - explicit DisabledScopedTimeDistributionUpdater(TimeDistribution* stat) {} + explicit DisabledScopedTimeDistributionUpdater(TimeDistribution*) {} // This type is neither copyable nor movable. DisabledScopedTimeDistributionUpdater( const DisabledScopedTimeDistributionUpdater&) = delete; DisabledScopedTimeDistributionUpdater& operator=( const DisabledScopedTimeDistributionUpdater&) = delete; - void AlsoUpdate(TimeDistribution* also_update) {} + void AlsoUpdate(TimeDistribution*) {} }; -class DisabledScopedInstructionCounter { +class DisabledScopedTimeStats { public: - explicit DisabledScopedInstructionCounter(absl::string_view) {} - DisabledScopedInstructionCounter(const DisabledScopedInstructionCounter&) = - delete; - DisabledScopedInstructionCounter& operator=( - const DisabledScopedInstructionCounter&) = delete; + explicit DisabledScopedTimeStats(StatsGroup*, const char*) {} + DisabledScopedTimeStats(const DisabledScopedTimeStats&) = delete; + DisabledScopedTimeStats& operator=(const DisabledScopedTimeStats&) = delete; + DisabledScopedTimeStats(DisabledScopedTimeStats&&) = delete; + DisabledScopedTimeStats& operator=(DisabledScopedTimeStats&&) = delete; }; -#ifdef OR_STATS - -using ScopedTimeDistributionUpdater = EnabledScopedTimeDistributionUpdater; -#ifdef HAS_PERF_SUBSYSTEM -using ScopedInstructionCounter = EnabledScopedInstructionCounter; -#else // HAS_PERF_SUBSYSTEM -using ScopedInstructionCounter = DisabledScopedInstructionCounter; -#endif // HAS_PERF_SUBSYSTEM - -// Simple macro to be used by a client that want to execute costly operations -// only if OR_STATS is defined. -#define IF_STATS_ENABLED(instructions) instructions - // Measures the time from this macro line to the end of the scope and adds it // to the distribution (from the given StatsGroup) with the same name as the // enclosing function. @@ -387,39 +378,51 @@ using ScopedInstructionCounter = DisabledScopedInstructionCounter; // Note(user): This adds more extra overhead around the measured code compared // to defining your own TimeDistribution stat in your StatsGroup. About 80ns // per measurement compared to about 20ns (as of 2012-06, on my workstation). -#define SCOPED_TIME_STAT(stats) \ - operations_research::ScopedTimeDistributionUpdater scoped_time_stat( \ - (stats)->LookupOrCreateTimeDistribution(__FUNCTION__)) +class EnabledScopedTimeStats { + public: + explicit EnabledScopedTimeStats(StatsGroup* stats, + absl::string_view function_name) + : scoped_time_stat_( + stats->LookupOrCreateTimeDistribution(function_name)) {} + EnabledScopedTimeStats(const EnabledScopedTimeStats&) = delete; + EnabledScopedTimeStats& operator=(const EnabledScopedTimeStats&) = delete; + EnabledScopedTimeStats(EnabledScopedTimeStats&&) = delete; + EnabledScopedTimeStats& operator=(EnabledScopedTimeStats&&) = delete; -#ifdef HAS_PERF_SUBSYSTEM + private: + operations_research::EnabledScopedTimeDistributionUpdater scoped_time_stat_; +}; -inline std::string RemoveOperationsResearchAndGlop( - const std::string& pretty_function) { - return strings::GlobalReplaceSubstrings( - pretty_function, {{"operations_research::", ""}, {"glop::", ""}}); -} +#ifdef OR_STATS -#define SCOPED_INSTRUCTION_COUNT(time_limit) \ - operations_research::ScopedInstructionCounter scoped_instruction_count( \ - RemoveOperationsResearchAndGlop(__PRETTY_FUNCTION__), time_limit) +using ScopedTimeDistributionUpdater = EnabledScopedTimeDistributionUpdater; +using ScopedTimeStats = EnabledScopedTimeStats; -#else // !HAS_PERF_SUBSYSTEM -#define SCOPED_INSTRUCTION_COUNT(time_limit) -#endif // HAS_PERF_SUBSYSTEM +// Simple macro to be used by a client that want to execute costly operations +// only if OR_STATS is defined. +#define IF_STATS_ENABLED(instructions) instructions #else // !OR_STATS // If OR_STATS is not defined, we remove some instructions that may be time // consuming. using ScopedTimeDistributionUpdater = DisabledScopedTimeDistributionUpdater; -using ScopedInstructionCounter = DisabledScopedInstructionCounter; +using ScopedTimeStats = DisabledScopedTimeStats; -#define IF_STATS_ENABLED(instructions) -#define SCOPED_TIME_STAT(stats) -#define SCOPED_INSTRUCTION_COUNT(time_limit) +// Defining it that way makes sure that the compiler still sees the code and +// checks that the syntax & types are valid. +#define IF_STATS_ENABLED(instructions) \ + if constexpr (false) { \ + instructions; \ + } #endif // OR_STATS +#define SCOPED_TIME_STAT(stats) \ + operations_research::ScopedTimeStats scoped_time_stat(stats, __FUNCTION__); + +#define SCOPED_INSTRUCTION_COUNT(time_limit) + } // namespace operations_research #endif // OR_TOOLS_UTIL_STATS_H_ diff --git a/ortools/util/time_limit.h b/ortools/util/time_limit.h index 4599724723..0ad1d8589d 100644 --- a/ortools/util/time_limit.h +++ b/ortools/util/time_limit.h @@ -30,6 +30,7 @@ #include "absl/synchronization/mutex.h" #include "absl/time/clock.h" #include "absl/time/time.h" +#include "ortools/base/base_export.h" #include "ortools/base/timer.h" #include "ortools/base/types.h" #include "ortools/util/running_stat.h" @@ -264,7 +265,7 @@ class OR_DLL TimeLimit { * If the passed limit contain an external Boolean, replace the current one * with it. Not that this does not change the secondary Boolean. */ - void MergeWithGlobalTimeLimit(TimeLimit* other); + void MergeWithGlobalTimeLimit(const TimeLimit* other); /** * Overwrites the deterministic time limit with the new value. @@ -484,7 +485,7 @@ inline void TimeLimit::ResetLimitFromParameters(const Parameters& parameters) { parameters.max_deterministic_time()); } -inline void TimeLimit::MergeWithGlobalTimeLimit(TimeLimit* other) { +inline void TimeLimit::MergeWithGlobalTimeLimit(const TimeLimit* other) { if (other == nullptr) return; ResetTimers( std::min(GetTimeLeft(), other->GetTimeLeft()), diff --git a/ortools/xpress/environment.h b/ortools/xpress/environment.h index e066ad94a3..326528754c 100644 --- a/ortools/xpress/environment.h +++ b/ortools/xpress/environment.h @@ -20,7 +20,7 @@ #include #include "absl/status/status.h" -#include "ortools/base/macros.h" +#include "ortools/base/base_export.h" extern "C" { typedef struct xo_prob_struct* XPRSprob;