From 14f1c7f6c88541352760a371006233d278817d7e Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Wed, 18 Sep 2024 17:28:14 +0200 Subject: [PATCH 001/105] sat: rework go_proto_library() to be in sync with google3 --- ortools/sat/BUILD.bazel | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/ortools/sat/BUILD.bazel b/ortools/sat/BUILD.bazel index 004f357360..6a8d73d039 100644 --- a/ortools/sat/BUILD.bazel +++ b/ortools/sat/BUILD.bazel @@ -62,12 +62,6 @@ cc_proto_library( deps = [":sat_parameters_proto"], ) -go_proto_library( - name = "sat_parameters_go_proto", - proto = ":sat_parameters_proto", - importpath = "github.com/google/or-tools/ortools/sat/proto/satparameters" -) - py_proto_library( name = "sat_parameters_py_pb2", deps = [":sat_parameters_proto"], @@ -78,6 +72,12 @@ java_proto_library( deps = [":sat_parameters_proto"], ) +go_proto_library( + name = "sat_parameters_go_proto", + proto = ":sat_parameters_proto", + importpath = "github.com/google/or-tools/ortools/sat/proto/satparameters", +) + proto_library( name = "cp_model_proto", srcs = ["cp_model.proto"], @@ -88,12 +88,6 @@ cc_proto_library( deps = [":cp_model_proto"], ) -go_proto_library( - name = "cp_model_go_proto", - importpath = "github.com/google/or-tools/ortools/sat/proto/cpmodel", - proto = ":cp_model_proto", -) - py_proto_library( name = "cp_model_py_pb2", deps = [":cp_model_proto"], @@ -104,6 +98,12 @@ java_proto_library( deps = [":cp_model_proto"], ) +go_proto_library( + name = "cp_model_go_proto", + proto = ":cp_model_proto", + importpath = "github.com/google/or-tools/ortools/sat/proto/cpmodel", +) + cc_library( name = "cp_model_utils", srcs = ["cp_model_utils.cc"], From 601830f6ff9289b2dd526b630dc16b2b5b376920 Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Wed, 18 Sep 2024 17:27:37 +0200 Subject: [PATCH 002/105] graph: enable tests in bazel --- .bazelrc | 11 +- ortools/base/top_n.h | 16 ++ ortools/graph/BUILD.bazel | 242 +++++++++++++++++++ ortools/graph/bidirectional_dijkstra_test.cc | 3 +- ortools/graph/christofides.h | 15 +- ortools/graph/ebert_graph.h | 3 + ortools/graph/ebert_graph_test.cc | 6 - ortools/graph/k_shortest_paths.h | 4 +- ortools/graph/linear_assignment.h | 8 + ortools/graph/rooted_tree_test.cc | 4 +- 10 files changed, 296 insertions(+), 16 deletions(-) diff --git a/.bazelrc b/.bazelrc index 57502972c0..ef1068979a 100644 --- a/.bazelrc +++ b/.bazelrc @@ -21,10 +21,15 @@ build --apple_platform_type=macos # platform. build --enable_platform_specific_config -build:linux --cxxopt="-std=c++17" --cxxopt=-Wno-sign-compare --host_cxxopt="-std=c++17" --host_cxxopt=-Wno-sign-compare -build:macos --cxxopt="-std=c++17" --cxxopt=-Wno-sign-compare --cxxopt=-mmacos-version-min=10.15 --cxxopt=-Wno-dangling-field --features=-supports_dynamic_linker +build:linux --cxxopt="-std=c++17" --cxxopt=-Wno-sign-compare +build:linux --host_cxxopt="-std=c++17" --host_cxxopt=-Wno-sign-compare + +build:macos --features=-supports_dynamic_linker +build:macos --cxxopt="-std=c++17" --cxxopt=-Wno-sign-compare --cxxopt=-mmacos-version-min=10.15 --cxxopt=-Wno-dangling-field build:macos --host_cxxopt="-std=c++17" --host_cxxopt=-Wno-sign-compare --host_cxxopt=-mmacos-version-min=10.15 --host_cxxopt=-Wno-dangling-field -build:windows --cxxopt="/std:c++20" --host_cxxopt="/std:c++20" + +build:windows --cxxopt="/std:c++20" +build:windows --host_cxxopt="/std:c++20" # Enable the runfiles symlink tree on Windows. This makes it possible to build # the pip package on Windows without an intermediate data-file archive, as the diff --git a/ortools/base/top_n.h b/ortools/base/top_n.h index 885735d5d2..2ce971a02c 100644 --- a/ortools/base/top_n.h +++ b/ortools/base/top_n.h @@ -106,6 +106,9 @@ class TopN { } // Peeks the bottom result without calling Extract() const T& peek_bottom(); + // Destructively extract the elements as a vector, sorted in descending order. + // Leaves TopN in an empty state. + std::vector Take(); // Extract the elements as a vector sorted in descending order. The caller // assumes ownership of the vector and must delete it when done. This is a // destructive operation. The only method that can be called immediately @@ -250,6 +253,19 @@ const T& TopN::peek_bottom() { } return elements_.front(); } +template +std::vector TopN::Take() { + std::vector out = std::move(elements_); + if (state_ != State::HEAP_SORTED) { + std::sort(out.begin(), out.end(), cmp_); + } else { + out.pop_back(); + std::sort_heap(out.begin(), out.end(), cmp_); + } + Reset(); + return out; +} + template std::vector* TopN::Extract() { auto out = new std::vector; diff --git a/ortools/graph/BUILD.bazel b/ortools/graph/BUILD.bazel index fe0f5883b5..c3ae7f7dcd 100644 --- a/ortools/graph/BUILD.bazel +++ b/ortools/graph/BUILD.bazel @@ -68,6 +68,7 @@ cc_library( ":graph", "//ortools/base:iterator_adaptors", "//ortools/base:threadpool", + "//ortools/base:top_n", "@com_google_absl//absl/algorithm:container", "@com_google_absl//absl/base:core_headers", "@com_google_absl//absl/log:check", @@ -85,6 +86,25 @@ cc_library( ], ) +cc_test( + name = "multi_dijkstra_test", + size = "small", + srcs = ["multi_dijkstra_test.cc"], + deps = [ + ":connected_components", + ":graph", + ":multi_dijkstra", + ":random_graph", + ":util", + "//ortools/base:gmock_main", + "//ortools/base:map_util", + "//ortools/base:types", + "@com_google_absl//absl/container:flat_hash_map", + "@com_google_absl//absl/container:flat_hash_set", + "@com_google_absl//absl/random:distributions", + ], +) + cc_library( name = "bidirectional_dijkstra", hdrs = ["bidirectional_dijkstra.h"], @@ -99,6 +119,23 @@ cc_library( ], ) +cc_test( + name = "bidirectional_dijkstra_test", + size = "small", + srcs = ["bidirectional_dijkstra_test.cc"], + deps = [ + ":bidirectional_dijkstra", + ":bounded_dijkstra", + ":graph", + "//ortools/base:gmock_main", + "//ortools/base:iterator_adaptors", + "@com_google_absl//absl/base:log_severity", + "@com_google_absl//absl/random:distributions", + "@com_google_absl//absl/strings", + "@com_google_absl//absl/types:span", + ], +) + cc_library( name = "cliques", srcs = ["cliques.cc"], @@ -126,6 +163,21 @@ cc_library( ], ) +cc_test( + name = "hamiltonian_path_test", + size = "medium", + timeout = "long", + srcs = ["hamiltonian_path_test.cc"], + deps = [ + ":hamiltonian_path", + "//ortools/base", + "//ortools/base:gmock_main", + "@com_google_absl//absl/random:distributions", + "@com_google_absl//absl/strings:str_format", + "@com_google_absl//absl/types:span", + ], +) + cc_library( name = "christofides", hdrs = ["christofides.h"], @@ -144,6 +196,20 @@ cc_library( ], ) +cc_test( + name = "christofides_test", + srcs = ["christofides_test.cc"], + deps = [ + ":christofides", + "//ortools/base", + "//ortools/base:gmock_main", + "@com_google_absl//absl/strings", + "@com_google_absl//absl/strings:str_format", + "@com_google_absl//absl/types:span", + "@com_google_benchmark//:benchmark", + ], +) + cc_library( name = "eulerian_path", hdrs = ["eulerian_path.h"], @@ -152,6 +218,18 @@ cc_library( ], ) +cc_test( + name = "eulerian_path_test", + srcs = ["eulerian_path_test.cc"], + deps = [ + ":eulerian_path", + ":graph", + "//ortools/base", + "//ortools/base:gmock_main", + "@com_google_benchmark//:benchmark", + ], +) + cc_library( name = "minimum_spanning_tree", hdrs = ["minimum_spanning_tree.h"], @@ -164,6 +242,21 @@ cc_library( ], ) +cc_test( + name = "minimum_spanning_tree_test", + srcs = ["minimum_spanning_tree_test.cc"], + deps = [ + ":graph", + ":minimum_spanning_tree", + "//ortools/base:gmock_main", + "//ortools/base:types", + "@com_google_absl//absl/base:core_headers", + "@com_google_absl//absl/random:distributions", + "@com_google_absl//absl/types:span", + "@com_google_benchmark//:benchmark", + ], +) + cc_library( name = "one_tree_lower_bound", hdrs = ["one_tree_lower_bound.h"], @@ -185,6 +278,23 @@ cc_library( "//ortools/util:permutation", "//ortools/util:zvector", "@com_google_absl//absl/strings", + "@com_google_googletest//:gtest_prod", + ], +) + +cc_test( + name = "ebert_graph_test", + size = "small", + srcs = ["ebert_graph_test.cc"], + deps = [ + ":ebert_graph", + "//ortools/base", + "//ortools/base:gmock_main", + "//ortools/util:permutation", + "@com_google_absl//absl/base:core_headers", + "@com_google_absl//absl/random:distributions", + "@com_google_absl//absl/strings", + "@com_google_benchmark//:benchmark", ], ) @@ -209,6 +319,23 @@ cc_library( ], ) +cc_test( + name = "shortest_paths_test", + size = "medium", + srcs = ["shortest_paths_test.cc"], + tags = ["noasan"], # Times out occasionally in ASAN mode. + deps = [ + ":ebert_graph", + ":shortest_paths", + ":strongly_connected_components", + "//ortools/base:gmock_main", + "//ortools/util:zvector", + "@com_google_absl//absl/base:core_headers", + "@com_google_absl//absl/log:check", + "@com_google_absl//absl/random", + ], +) + cc_library( name = "k_shortest_paths", hdrs = ["k_shortest_paths.h"], @@ -226,6 +353,24 @@ cc_library( ], ) +# need C++20 +#cc_test( +# name = "k_shortest_paths_test", +# srcs = ["k_shortest_paths_test.cc"], +# deps = [ +# ":graph", +# ":io", +# ":k_shortest_paths", +# ":shortest_paths", +# "//ortools/base:gmock_main", +# "@com_google_absl//absl/algorithm:container", +# "@com_google_absl//absl/log:check", +# "@com_google_absl//absl/random:distributions", +# "@com_google_absl//absl/strings", +# "@com_google_benchmark//:benchmark", +# ], +#) + # Flow problem protobuf representation proto_library( name = "flow_problem_proto", @@ -343,6 +488,16 @@ cc_library( ], ) +cc_test( + name = "assignment_test", + size = "small", + srcs = ["assignment_test.cc"], + deps = [ + ":assignment", + "//ortools/base:gmock_main", + ], +) + # Linear Assignment with full-featured interface and efficient # implementation. cc_library( @@ -357,6 +512,23 @@ cc_library( "//ortools/util:zvector", "@com_google_absl//absl/flags:flag", "@com_google_absl//absl/strings:str_format", + "@com_google_googletest//:gtest_prod", + ], +) + +cc_test( + name = "linear_assignment_test", + size = "small", + srcs = ["linear_assignment_test.cc"], + deps = [ + ":ebert_graph", + ":graph", + ":linear_assignment", + "//ortools/base", + "//ortools/base:gmock_main", + "@com_google_absl//absl/random:distributions", + "@com_google_absl//absl/types:span", + "@com_google_benchmark//:benchmark", ], ) @@ -428,6 +600,76 @@ cc_library( ], ) +cc_test( + name = "rooted_tree_test", + srcs = ["rooted_tree_test.cc"], + deps = [ + ":graph", + ":rooted_tree", + "//ortools/base:gmock_main", + "@com_google_absl//absl/algorithm:container", + "@com_google_absl//absl/log:check", + "@com_google_absl//absl/random", + "@com_google_absl//absl/status", + "@com_google_benchmark//:benchmark", + ], +) + +cc_test( + name = "perfect_matching_test", + size = "small", + srcs = ["perfect_matching_test.cc"], + deps = [ + ":perfect_matching", + "//ortools/base:gmock_main", + "//ortools/linear_solver:linear_solver_cc_proto", + "//ortools/linear_solver:solve_mp_model", + "@com_google_absl//absl/random", + "@com_google_absl//absl/types:span", + ], +) + +cc_test( + name = "dag_shortest_path_test", + size = "small", + srcs = ["dag_shortest_path_test.cc"], + deps = [ + ":dag_shortest_path", + ":graph", + ":io", + "//ortools/base:dump_vars", + "//ortools/base:gmock_main", + "//ortools/util:flat_matrix", + "@com_google_absl//absl/algorithm:container", + "@com_google_absl//absl/log:check", + "@com_google_absl//absl/random", + "@com_google_absl//absl/status", + "@com_google_absl//absl/types:span", + "@com_google_benchmark//:benchmark", + ], +) + +cc_test( + name = "dag_constrained_shortest_path_test", + srcs = ["dag_constrained_shortest_path_test.cc"], + deps = [ + ":dag_constrained_shortest_path", + ":dag_shortest_path", + ":graph", + ":io", + "//ortools/base:dump_vars", + "//ortools/base:gmock_main", + "//ortools/math_opt/cpp:math_opt", + "//ortools/math_opt/solvers:cp_sat_solver", + "@com_google_absl//absl/algorithm:container", + "@com_google_absl//absl/log:check", + "@com_google_absl//absl/random", + "@com_google_absl//absl/strings", + "@com_google_absl//absl/types:span", + "@com_google_benchmark//:benchmark", + ], +) + # From util/graph cc_library( name = "connected_components", diff --git a/ortools/graph/bidirectional_dijkstra_test.cc b/ortools/graph/bidirectional_dijkstra_test.cc index 5de9f168ab..f71bbc350f 100644 --- a/ortools/graph/bidirectional_dijkstra_test.cc +++ b/ortools/graph/bidirectional_dijkstra_test.cc @@ -27,6 +27,7 @@ #include "absl/types/span.h" #include "gtest/gtest.h" #include "ortools/base/gmock.h" +#include "ortools/base/iterator_adaptors.h" #include "ortools/graph/bounded_dijkstra.h" #include "ortools/graph/graph.h" @@ -202,7 +203,7 @@ TEST(BidirectionalDijkstraTest, RandomizedCorrectnessTest) { ref_dijkstra.ArcPathToNode(ref_dests[0]); const auto path = tested_dijkstra.SetToSetShortestPath(srcs, dsts); std::vector arc_path = path.forward_arc_path; - for (const int arc : gtl::reversed_view(path.backward_arc_path)) { + for (const int arc : ::gtl::reversed_view(path.backward_arc_path)) { arc_path.push_back(forward_arc_of_backward_arc[arc]); } ASSERT_THAT(arc_path, ElementsAreArray(ref_arc_path)) diff --git a/ortools/graph/christofides.h b/ortools/graph/christofides.h index 38f0c907bf..3fb91ea79b 100644 --- a/ortools/graph/christofides.h +++ b/ortools/graph/christofides.h @@ -28,7 +28,6 @@ #include #include -#include #include #include @@ -84,7 +83,19 @@ class ChristofidesPathSolver { bool Solve(); private: - int64_t SafeAdd(int64_t a, int64_t b) { return CapAdd(a, b); } + // Safe addition operator to avoid overflows when possible. + template + struct Add { + static T apply(T a, T b) { return a + b; } + }; + template + struct Add { + static int64_t apply(int64_t a, int64_t b) { return CapAdd(a, b); } + }; + template + T SafeAdd(T a, T b) { + return Add::apply(a, b); + } // Matching algorithm to use. MatchingAlgorithm matching_; diff --git a/ortools/graph/ebert_graph.h b/ortools/graph/ebert_graph.h index e3541a995f..71e6daed82 100644 --- a/ortools/graph/ebert_graph.h +++ b/ortools/graph/ebert_graph.h @@ -176,6 +176,7 @@ #include #include "absl/strings/str_cat.h" +#include "gtest/gtest_prod.h" #include "ortools/base/logging.h" #include "ortools/util/permutation.h" #include "ortools/util/zvector.h" @@ -949,6 +950,8 @@ const ArcIndexType template class EbertGraphBase : public StarGraphBase { + FRIEND_TEST(ForwardEbertGraphTest, ImpossibleBuildTailArray); + typedef StarGraphBase Base; friend class StarGraphBase; diff --git a/ortools/graph/ebert_graph_test.cc b/ortools/graph/ebert_graph_test.cc index 5dfa4073ff..7409781f7f 100644 --- a/ortools/graph/ebert_graph_test.cc +++ b/ortools/graph/ebert_graph_test.cc @@ -20,13 +20,11 @@ #include "absl/base/macros.h" #include "absl/random/distributions.h" #include "absl/strings/str_cat.h" -#include "absl/strings/str_join.h" #include "absl/strings/string_view.h" #include "benchmark/benchmark.h" #include "gtest/gtest.h" #include "ortools/base/macros.h" #include "ortools/util/permutation.h" -#include "testing/base/public/test_utils.h" namespace operations_research { @@ -1033,10 +1031,6 @@ TYPED_TEST(TinyEbertGraphTest, CheckDeathOnBadBounds) { int num_nodes = SmallStarGraph::kMaxNumNodes; int num_arcs = SmallStarGraph::kMaxNumArcs; SmallStarGraph(num_nodes, num_arcs); // Construct an unused graph. All fine. - EXPECT_DFATAL(SmallStarGraph(num_nodes + 1, num_arcs), - "Could not reserve memory for -128 nodes and 127 arcs."); - EXPECT_DFATAL(SmallStarGraph(num_nodes, num_arcs + 1), - "Could not reserve memory for 127 nodes and -128 arcs."); } // An empty fixture to collect the types of small graphs for which we want to do diff --git a/ortools/graph/k_shortest_paths.h b/ortools/graph/k_shortest_paths.h index 89011bc989..108c393de5 100644 --- a/ortools/graph/k_shortest_paths.h +++ b/ortools/graph/k_shortest_paths.h @@ -165,14 +165,14 @@ std::tuple, PathDistance> ComputeShortestPath( // This case only happens when some arcs have an infinite length (i.e. // larger than `kMaxDistance`): `BoundedDijkstraWrapper::NodePathTo` fails // to return a path, even empty. - return {{}, kDisconnectedDistance}; + return {std::vector{}, kDisconnectedDistance}; } if (std::vector path = std::move(dijkstra.NodePathTo(destination)); !path.empty()) { return {std::move(path), path_length}; } else { - return {{}, kDisconnectedDistance}; + return {std::vector{}, kDisconnectedDistance}; } } diff --git a/ortools/graph/linear_assignment.h b/ortools/graph/linear_assignment.h index dba3e28b73..449c6c54c8 100644 --- a/ortools/graph/linear_assignment.h +++ b/ortools/graph/linear_assignment.h @@ -207,6 +207,7 @@ #include "absl/flags/declare.h" #include "absl/flags/flag.h" #include "absl/strings/str_format.h" +#include "gtest/gtest_prod.h" #include "ortools/base/logging.h" #include "ortools/graph/ebert_graph.h" #include "ortools/util/permutation.h" @@ -227,6 +228,13 @@ class LinearSumAssignment { typedef typename GraphType::NodeIndex NodeIndex; typedef typename GraphType::ArcIndex ArcIndex; +#ifndef SWIG + // Friends don't let friends drive untested. One or more of our + // tests are white-box tests, i.e., they look inside the + // implementation and check various internal invariants. + FRIEND_TEST(LinearSumAssignmentFriendTest, EpsilonOptimal); +#endif + // Constructor for the case in which we will build the graph // incrementally as we discover arc costs, as might be done with any // of the dynamic graph representations such as StarGraph or ForwardStarGraph. diff --git a/ortools/graph/rooted_tree_test.cc b/ortools/graph/rooted_tree_test.cc index d2160c2457..d8033bcb61 100644 --- a/ortools/graph/rooted_tree_test.cc +++ b/ortools/graph/rooted_tree_test.cc @@ -219,12 +219,12 @@ TYPED_TEST_P(RootedTreeTest, AllDistancesToRoot) { // 0 3 // | // 2 - const int root = 1; + const Node root = 1; std::vector parents = {1, this->kNullParent, 3, 1}; const std::vector arc_lengths = {1, 0, 10, 100}; ASSERT_OK_AND_ASSIGN(const auto tree, RootedTree::Create(root, parents)); - EXPECT_THAT(tree.AllDistancesToRoot(arc_lengths), + EXPECT_THAT(tree.template AllDistancesToRoot(arc_lengths), ElementsAre(1.0, 0.0, 110.0, 100.0)); } From de5109182c1c7c6b300f4b052ea2b88edd92b9a1 Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Mon, 23 Sep 2024 09:53:57 +0200 Subject: [PATCH 003/105] graph: fixup --- ortools/graph/christofides.h | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/ortools/graph/christofides.h b/ortools/graph/christofides.h index 3fb91ea79b..92bd1bfcce 100644 --- a/ortools/graph/christofides.h +++ b/ortools/graph/christofides.h @@ -28,6 +28,7 @@ #include #include +#include #include #include @@ -84,17 +85,15 @@ class ChristofidesPathSolver { private: // Safe addition operator to avoid overflows when possible. - template - struct Add { - static T apply(T a, T b) { return a + b; } - }; - template - struct Add { - static int64_t apply(int64_t a, int64_t b) { return CapAdd(a, b); } - }; template T SafeAdd(T a, T b) { - return Add::apply(a, b); + // TODO(user): use std::remove_cvref_t once C++20 is available. + if constexpr (std::is_same_v>, + int64_t> == true) { + return CapAdd(a, b); + } else { + return a + b; + } } // Matching algorithm to use. From 838ad4d97ac0a179923b870d795fd51b5032319c Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Wed, 18 Sep 2024 17:34:33 +0200 Subject: [PATCH 004/105] sat: add some _test.cc --- ortools/sat/2d_rectangle_presolve_test.cc | 596 ++++++++ ortools/sat/BUILD.bazel | 644 +++++++- ortools/sat/all_different_test.cc | 159 ++ ortools/sat/circuit_test.cc | 334 +++++ ortools/sat/cp_constraints_test.cc | 120 ++ ortools/sat/cumulative_energy_test.cc | 562 +++++++ ortools/sat/cumulative_test.cc | 421 ++++++ ortools/sat/cuts_test.cc | 1163 ++++++++++++++ ortools/sat/diffn_test.cc | 176 +++ ortools/sat/diffn_util_test.cc | 960 ++++++++++++ ortools/sat/disjunctive_test.cc | 527 +++++++ ortools/sat/encoding_test.cc | 106 ++ ortools/sat/feasibility_jump_test.cc | 92 ++ ortools/sat/go/cpmodel/BUILD.bazel | 13 + ortools/sat/implied_bounds_test.cc | 706 +++++++++ ortools/sat/inclusion_test.cc | 177 +++ ortools/sat/integer_test.cc | 1333 +++++++++++++++++ ortools/sat/intervals_test.cc | 278 ++++ ortools/sat/linear_constraint_manager_test.cc | 421 ++++++ ortools/sat/linear_constraint_test.cc | 480 ++++++ ortools/sat/linear_propagation_test.cc | 321 ++++ ortools/sat/model_test.cc | 92 ++ ortools/sat/optimization_test.cc | 172 +++ ortools/sat/parameters_validation_test.cc | 125 ++ ortools/sat/pb_constraint_test.cc | 673 +++++++++ ortools/sat/precedences_test.cc | 592 ++++++++ ortools/sat/probing_test.cc | 80 + ortools/sat/pseudo_costs_test.cc | 263 ++++ ortools/sat/restart_test.cc | 86 ++ ortools/sat/routing_cuts_test.cc | 422 ++++++ ortools/sat/sat_base_test.cc | 74 + ortools/sat/sat_inprocessing_test.cc | 287 ++++ ortools/sat/scheduling_cuts_test.cc | 576 +++++++ ortools/sat/subsolver_test.cc | 105 ++ ortools/sat/symmetry_test.cc | 151 ++ ortools/sat/theta_tree_test.cc | 291 ++++ ortools/sat/timetable_test.cc | 555 +++++++ ortools/sat/zero_half_cuts_test.cc | 114 ++ 38 files changed, 14243 insertions(+), 4 deletions(-) create mode 100644 ortools/sat/2d_rectangle_presolve_test.cc create mode 100644 ortools/sat/all_different_test.cc create mode 100644 ortools/sat/circuit_test.cc create mode 100644 ortools/sat/cp_constraints_test.cc create mode 100644 ortools/sat/cumulative_energy_test.cc create mode 100644 ortools/sat/cumulative_test.cc create mode 100644 ortools/sat/cuts_test.cc create mode 100644 ortools/sat/diffn_test.cc create mode 100644 ortools/sat/diffn_util_test.cc create mode 100644 ortools/sat/disjunctive_test.cc create mode 100644 ortools/sat/encoding_test.cc create mode 100644 ortools/sat/feasibility_jump_test.cc create mode 100644 ortools/sat/implied_bounds_test.cc create mode 100644 ortools/sat/inclusion_test.cc create mode 100644 ortools/sat/integer_test.cc create mode 100644 ortools/sat/intervals_test.cc create mode 100644 ortools/sat/linear_constraint_manager_test.cc create mode 100644 ortools/sat/linear_constraint_test.cc create mode 100644 ortools/sat/linear_propagation_test.cc create mode 100644 ortools/sat/model_test.cc create mode 100644 ortools/sat/optimization_test.cc create mode 100644 ortools/sat/parameters_validation_test.cc create mode 100644 ortools/sat/pb_constraint_test.cc create mode 100644 ortools/sat/precedences_test.cc create mode 100644 ortools/sat/probing_test.cc create mode 100644 ortools/sat/pseudo_costs_test.cc create mode 100644 ortools/sat/restart_test.cc create mode 100644 ortools/sat/routing_cuts_test.cc create mode 100644 ortools/sat/sat_base_test.cc create mode 100644 ortools/sat/sat_inprocessing_test.cc create mode 100644 ortools/sat/scheduling_cuts_test.cc create mode 100644 ortools/sat/subsolver_test.cc create mode 100644 ortools/sat/symmetry_test.cc create mode 100644 ortools/sat/theta_tree_test.cc create mode 100644 ortools/sat/timetable_test.cc create mode 100644 ortools/sat/zero_half_cuts_test.cc diff --git a/ortools/sat/2d_rectangle_presolve_test.cc b/ortools/sat/2d_rectangle_presolve_test.cc new file mode 100644 index 0000000000..bbe5a5bee6 --- /dev/null +++ b/ortools/sat/2d_rectangle_presolve_test.cc @@ -0,0 +1,596 @@ +// Copyright 2010-2024 Google LLC +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "ortools/sat/2d_rectangle_presolve.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "absl/container/flat_hash_map.h" +#include "absl/container/flat_hash_set.h" +#include "absl/log/check.h" +#include "absl/random/bit_gen_ref.h" +#include "absl/random/random.h" +#include "absl/strings/str_split.h" +#include "absl/types/span.h" +#include "gtest/gtest.h" +#include "ortools/base/gmock.h" +#include "ortools/base/logging.h" +#include "ortools/sat/2d_orthogonal_packing_testing.h" +#include "ortools/sat/diffn_util.h" +#include "ortools/sat/integer.h" + +namespace operations_research { +namespace sat { +namespace { + +using ::testing::ElementsAre; +using ::testing::IsEmpty; + +std::vector BuildFromAsciiArt(std::string_view input) { + std::vector rectangles; + std::vector lines = absl::StrSplit(input, '\n'); + for (int i = 0; i < lines.size(); i++) { + for (int j = 0; j < lines[i].size(); j++) { + if (lines[i][j] != ' ') { + rectangles.push_back( + {.x_min = j, .x_max = j + 1, .y_min = i, .y_max = i + 1}); + } + } + } + std::vector empty; + ReduceNumberofBoxes(&rectangles, &empty); + return rectangles; +} + +TEST(RectanglePresolve, Basic) { + std::vector input = BuildFromAsciiArt(R"( + *********** *********** + *********** *********** + *********** *********** + + + *********** *********** + *********** *********** + *********** *********** + )"); + // Note that a single naive pass over the fixed rectangles' gaps would not + // fill the middle region. + std::vector input_in_range; + // Add a single object that is too large to fit between the fixed boxes. + input_in_range.push_back( + {.box_index = 0, + .bounding_area = {.x_min = 0, .x_max = 80, .y_min = 0, .y_max = 80}, + .x_size = 5, + .y_size = 5}); + + EXPECT_TRUE(PresolveFixed2dRectangles(input_in_range, &input)); + EXPECT_EQ(input.size(), 1); +} + +TEST(RectanglePresolve, Trim) { + std::vector input = { + {.x_min = 0, .x_max = 5, .y_min = 0, .y_max = 5}}; + std::vector input_in_range; + input_in_range.push_back( + {.box_index = 0, + .bounding_area = {.x_min = 1, .x_max = 80, .y_min = 1, .y_max = 80}, + .x_size = 5, + .y_size = 5}); + + EXPECT_TRUE(PresolveFixed2dRectangles(input_in_range, &input)); + EXPECT_THAT(input, ElementsAre(Rectangle{ + .x_min = 1, .x_max = 5, .y_min = 1, .y_max = 5})); +} + +TEST(RectanglePresolve, FillBoundingBoxEdge) { + std::vector input = { + {.x_min = 1, .x_max = 5, .y_min = 1, .y_max = 5}}; + std::vector input_in_range; + input_in_range.push_back( + {.box_index = 0, + .bounding_area = {.x_min = 0, .x_max = 80, .y_min = 0, .y_max = 80}, + .x_size = 5, + .y_size = 5}); + + EXPECT_TRUE(PresolveFixed2dRectangles(input_in_range, &input)); + EXPECT_THAT(input, ElementsAre(Rectangle{ + .x_min = 0, .x_max = 5, .y_min = 0, .y_max = 5})); +} + +TEST(RectanglePresolve, UseAreaNotOccupiable) { + std::vector input = { + {.x_min = 20, .x_max = 25, .y_min = 0, .y_max = 5}}; + std::vector input_in_range; + input_in_range.push_back( + {.box_index = 0, + .bounding_area = {.x_min = 0, .x_max = 10, .y_min = 0, .y_max = 10}, + .x_size = 5, + .y_size = 5}); + input_in_range.push_back( + {.box_index = 1, + .bounding_area = {.x_min = 0, .x_max = 15, .y_min = 0, .y_max = 10}, + .x_size = 5, + .y_size = 5}); + input_in_range.push_back( + {.box_index = 1, + .bounding_area = {.x_min = 25, .x_max = 100, .y_min = 0, .y_max = 10}, + .x_size = 5, + .y_size = 5}); + + EXPECT_TRUE(PresolveFixed2dRectangles(input_in_range, &input)); + EXPECT_THAT(input, ElementsAre(Rectangle{ + .x_min = 15, .x_max = 25, .y_min = 0, .y_max = 10})); +} + +TEST(RectanglePresolve, RemoveOutsideBB) { + std::vector input = { + {.x_min = 0, .x_max = 5, .y_min = 0, .y_max = 5}}; + std::vector input_in_range; + input_in_range.push_back( + {.box_index = 0, + .bounding_area = {.x_min = 5, .x_max = 80, .y_min = 5, .y_max = 80}, + .x_size = 5, + .y_size = 5}); + + EXPECT_TRUE(PresolveFixed2dRectangles(input_in_range, &input)); + EXPECT_THAT(input, IsEmpty()); +} + +TEST(RectanglePresolve, RandomTest) { + constexpr int kTotalRectangles = 100; + constexpr int kFixedRectangleSize = 60; + constexpr int kNumRuns = 1000; + absl::BitGen bit_gen; + + for (int run = 0; run < kNumRuns; ++run) { + // Start by generating a feasible problem that we know the solution with + // some items fixed. + std::vector input = + GenerateNonConflictingRectangles(kTotalRectangles, bit_gen); + std::shuffle(input.begin(), input.end(), bit_gen); + CHECK_EQ(input.size(), kTotalRectangles); + absl::Span fixed_rectangles = + absl::MakeConstSpan(input).subspan(0, kFixedRectangleSize); + absl::Span other_rectangles = + absl::MakeSpan(input).subspan(kFixedRectangleSize); + std::vector new_fixed_rectangles(fixed_rectangles.begin(), + fixed_rectangles.end()); + const std::vector input_in_range = + MakeItemsFromRectangles(other_rectangles, 0.6, bit_gen); + + // Presolve the fixed items. + PresolveFixed2dRectangles(input_in_range, &new_fixed_rectangles); + LOG(INFO) << "Presolved:\n" + << RenderDot(std::nullopt, fixed_rectangles) << "To:\n" + << RenderDot(std::nullopt, new_fixed_rectangles); + + CHECK_LE(new_fixed_rectangles.size(), kFixedRectangleSize); + + // Check if the original solution is still a solution. + std::vector all_rectangles(new_fixed_rectangles.begin(), + new_fixed_rectangles.end()); + all_rectangles.insert(all_rectangles.end(), other_rectangles.begin(), + other_rectangles.end()); + for (int i = 0; i < all_rectangles.size(); ++i) { + for (int j = i + 1; j < all_rectangles.size(); ++j) { + CHECK(all_rectangles[i].IsDisjoint(all_rectangles[j])) + << RenderDot(std::nullopt, {all_rectangles[i], all_rectangles[j]}); + } + } + } +} + +Neighbours NaiveBuildNeighboursGraph(const std::vector& rectangles) { + auto interval_intersect = [](IntegerValue begin1, IntegerValue end1, + IntegerValue begin2, IntegerValue end2) { + return std::max(begin1, begin2) < std::min(end1, end2); + }; + std::vector> neighbors; + for (int i = 0; i < rectangles.size(); ++i) { + for (int j = 0; j < rectangles.size(); ++j) { + if (i == j) continue; + const Rectangle& r1 = rectangles[i]; + const Rectangle& r2 = rectangles[j]; + if (r1.x_min == r2.x_max && + interval_intersect(r1.y_min, r1.y_max, r2.y_min, r2.y_max)) { + neighbors.push_back({i, EdgePosition::LEFT, j}); + neighbors.push_back({j, EdgePosition::RIGHT, i}); + } + if (r1.y_min == r2.y_max && + interval_intersect(r1.x_min, r1.x_max, r2.x_min, r2.x_max)) { + neighbors.push_back({i, EdgePosition::BOTTOM, j}); + neighbors.push_back({j, EdgePosition::TOP, i}); + } + } + } + return Neighbours(rectangles, neighbors); +} + +std::string RenderNeighborsGraph(std::optional bb, + absl::Span rectangles, + const Neighbours& neighbours) { + const absl::flat_hash_map edge_colors = { + {EdgePosition::TOP, "red"}, + {EdgePosition::BOTTOM, "green"}, + {EdgePosition::LEFT, "blue"}, + {EdgePosition::RIGHT, "cyan"}}; + std::stringstream ss; + ss << " edge[headclip=false, tailclip=false, penwidth=30];\n"; + for (int box_index = 0; box_index < neighbours.NumRectangles(); ++box_index) { + for (int edge_int = 0; edge_int < 4; ++edge_int) { + const EdgePosition edge = static_cast(edge_int); + const auto edge_neighbors = + neighbours.GetSortedNeighbors(box_index, edge); + for (int neighbor : edge_neighbors) { + ss << " " << box_index << "->" << neighbor << " [color=\"" + << edge_colors.find(edge)->second << "\"];\n"; + } + } + } + return RenderDot(bb, rectangles, ss.str()); +} + +std::string RenderContour(std::optional bb, + absl::Span rectangles, + const ShapePath& path) { + const std::vector colors = {"red", "green", "blue", + "cyan", "yellow", "purple"}; + std::stringstream ss; + ss << " edge[headclip=false, tailclip=false, penwidth=30];\n"; + for (int i = 0; i < path.step_points.size(); ++i) { + std::pair p = path.step_points[i]; + ss << " p" << i << "[pos=\"" << 2 * p.first << "," << 2 * p.second + << "!\" shape=point]\n"; + if (i != path.step_points.size() - 1) { + ss << " p" << i << "->p" << i + 1 << "\n"; + } + } + return RenderDot(bb, rectangles, ss.str()); +} + +TEST(BuildNeighboursGraphTest, Simple) { + std::vector rectangles = { + {.x_min = 0, .x_max = 10, .y_min = 0, .y_max = 10}, + {.x_min = 10, .x_max = 20, .y_min = 0, .y_max = 10}, + {.x_min = 0, .x_max = 10, .y_min = 10, .y_max = 20}}; + const Neighbours neighbours = BuildNeighboursGraph(rectangles); + EXPECT_THAT(neighbours.GetSortedNeighbors(0, EdgePosition::RIGHT), + ElementsAre(1)); + EXPECT_THAT(neighbours.GetSortedNeighbors(0, EdgePosition::TOP), + ElementsAre(2)); + EXPECT_THAT(neighbours.GetSortedNeighbors(1, EdgePosition::LEFT), + ElementsAre(0)); + EXPECT_THAT(neighbours.GetSortedNeighbors(2, EdgePosition::BOTTOM), + ElementsAre(0)); +} + +TEST(BuildNeighboursGraphTest, NeighborsAroundCorner) { + std::vector rectangles = { + {.x_min = 0, .x_max = 10, .y_min = 0, .y_max = 10}, + {.x_min = 10, .x_max = 20, .y_min = 10, .y_max = 20}}; + const Neighbours neighbours = BuildNeighboursGraph(rectangles); + for (int i = 0; i < 4; ++i) { + const EdgePosition edge = static_cast(i); + EXPECT_THAT(neighbours.GetSortedNeighbors(0, edge), IsEmpty()); + EXPECT_THAT(neighbours.GetSortedNeighbors(1, edge), IsEmpty()); + } +} + +TEST(BuildNeighboursGraphTest, RandomTest) { + constexpr int kNumRuns = 100; + absl::BitGen bit_gen; + + for (int run = 0; run < kNumRuns; ++run) { + // Start by generating a feasible problem that we know the solution with + // some items fixed. + std::vector input = + GenerateNonConflictingRectanglesWithPacking({100, 100}, 60, bit_gen); + std::shuffle(input.begin(), input.end(), bit_gen); + auto neighbours = BuildNeighboursGraph(input); + auto expected_neighbours = NaiveBuildNeighboursGraph(input); + for (int box_index = 0; box_index < neighbours.NumRectangles(); + ++box_index) { + for (int edge_int = 0; edge_int < 4; ++edge_int) { + const EdgePosition edge = static_cast(edge_int); + if (neighbours.GetSortedNeighbors(box_index, edge) != + expected_neighbours.GetSortedNeighbors(box_index, edge)) { + LOG(FATAL) << "Got:\n" + << RenderNeighborsGraph(std::nullopt, input, neighbours) + << "Expected:\n" + << RenderNeighborsGraph(std::nullopt, input, + expected_neighbours); + } + } + } + } +} + +ShapePath TraceBoundaryNaive( + std::pair starting_corner, + absl::Span rectangles) { + // First build a grid that tells by which box each 1x1 rectangle is occupied + // or -1 if empty. + constexpr int kBoundingBoxSize = 100; + std::vector> grid( + kBoundingBoxSize + 1, std::vector(kBoundingBoxSize + 1, -1)); + + for (int n = 0; n < rectangles.size(); n++) { + const Rectangle& r = rectangles[n]; + CHECK_GE(r.x_min, 0); + CHECK_LE(r.x_max, kBoundingBoxSize); + CHECK_GE(r.y_min, 0); + CHECK_LE(r.y_max, kBoundingBoxSize); + for (IntegerValue i = r.x_min; i < r.x_max; i++) { + for (IntegerValue j = r.y_min; j < r.y_max; j++) { + grid[i.value()][j.value()] = n; + } + } + } + + // Now collect all the boundary edges: an occupied cell that touches an + // unoccupied one. + absl::flat_hash_map, int> x_edges; + absl::flat_hash_map, int> y_edges; + for (int i = -1; i < kBoundingBoxSize; i++) { + for (int j = -1; j < kBoundingBoxSize; j++) { + if (i != -1) { + if ((j == -1 || grid[i][j] == -1) && grid[i][j + 1] != -1) { + x_edges[{i, j + 1}] = grid[i][j + 1]; + } + if (j != -1 && grid[i][j + 1] == -1 && grid[i][j] != -1) { + x_edges[{i, j + 1}] = grid[i][j]; + } + } + if (j != -1) { + if ((i == -1 || grid[i][j] == -1) && grid[i + 1][j] != -1) { + y_edges[{i + 1, j}] = grid[i + 1][j]; + } + if (i != -1 && grid[i + 1][j] == -1 && grid[i][j] != -1) { + y_edges[{i + 1, j}] = grid[i][j]; + } + } + } + } + + ShapePath path; + std::pair cur = starting_corner; + int cur_index; + if (x_edges.contains(starting_corner)) { + cur_index = x_edges.at(starting_corner); + } else if (y_edges.contains(starting_corner)) { + cur_index = y_edges.at(starting_corner); + } else { + LOG(FATAL) << "Should not happen: {" << starting_corner.first << "," + << starting_corner.second << "} " + << RenderDot(std::nullopt, rectangles); + } + const int first_index = cur_index; + + auto is_aligned = [](const std::pair& p1, + const std::pair& p2, + const std::pair& p3) { + return ((p1.first == p2.first) == (p2.first == p3.first)) && + ((p1.second == p2.second) == (p2.second == p3.second)); + }; + + // Grow the path by a segment of size one. + const auto add_segment = + [&path, &is_aligned](const std::pair& segment, + int index) { + if (path.step_points.size() > 1 && + is_aligned(path.step_points[path.step_points.size() - 1], + path.step_points[path.step_points.size() - 2], + segment) && + path.touching_box_index.back() == index) { + path.step_points.back() = segment; + } else { + if (!path.step_points.empty()) { + path.touching_box_index.push_back(index); + } + path.step_points.push_back(segment); + } + }; + + // Now we navigate from one edge to the next. To avoid going back, we remove + // used edges from the hash map. + do { + add_segment(cur, cur_index); + + // Find the next segment. + if (x_edges.contains({cur.first, cur.second}) && + x_edges.contains({cur.first - 1, cur.second}) && + !path.touching_box_index.empty()) { + // Corner case (literally): + // ******** + // ******** + // ******** + // ******** + // +++++++++ + // +++++++++ + // +++++++++ + // +++++++++ + // + // In this case we keep following the same box. + auto it_x = x_edges.find({cur.first, cur.second}); + if (cur_index == it_x->second) { + auto extract = x_edges.extract({cur.first, cur.second}); + cur = {cur.first + 1, cur.second}; + cur_index = extract.mapped(); + } else { + auto extract = x_edges.extract({cur.first - 1, cur.second}); + cur = extract.key(); + cur_index = extract.mapped(); + } + } else if (y_edges.contains({cur.first, cur.second}) && + y_edges.contains({cur.first, cur.second - 1}) && + !path.touching_box_index.empty()) { + auto it_y = y_edges.find({cur.first, cur.second}); + if (cur_index == it_y->second) { + auto extract = y_edges.extract({cur.first, cur.second}); + cur = {cur.first, cur.second + 1}; + cur_index = extract.mapped(); + } else { + auto extract = y_edges.extract({cur.first, cur.second - 1}); + cur = extract.key(); + cur_index = extract.mapped(); + } + } else if (auto extract = y_edges.extract({cur.first, cur.second}); + !extract.empty()) { + cur = {cur.first, cur.second + 1}; + cur_index = extract.mapped(); + } else if (auto extract = x_edges.extract({cur.first - 1, cur.second}); + !extract.empty()) { + cur = extract.key(); + cur_index = extract.mapped(); + } else if (auto extract = x_edges.extract({cur.first, cur.second}); + !extract.empty()) { + cur = {cur.first + 1, cur.second}; + cur_index = extract.mapped(); + } else if (auto extract = y_edges.extract({cur.first, cur.second - 1}); + !extract.empty()) { + cur = extract.key(); + cur_index = extract.mapped(); + } else { + LOG(FATAL) << "Should not happen: {" << cur.first << "," << cur.second + << "} " << RenderContour(std::nullopt, rectangles, path); + } + } while (cur != starting_corner); + + add_segment(cur, cur_index); + path.touching_box_index.push_back(first_index); + return path; +} + +TEST(ContourTest, Random) { + constexpr int kNumRuns = 100; + absl::BitGen bit_gen; + + for (int run = 0; run < kNumRuns; ++run) { + // Start by generating a feasible problem that we know the solution with + // some items fixed. + std::vector input = + GenerateNonConflictingRectanglesWithPacking({100, 100}, 60, bit_gen); + std::shuffle(input.begin(), input.end(), bit_gen); + const int num_fixed_rectangles = input.size() * 2 / 3; + absl::Span fixed_rectangles = + absl::MakeConstSpan(input).subspan(0, num_fixed_rectangles); + absl::Span other_rectangles = + absl::MakeSpan(input).subspan(num_fixed_rectangles); + std::vector new_fixed_rectangles(fixed_rectangles.begin(), + fixed_rectangles.end()); + const std::vector input_in_range = + MakeItemsFromRectangles(other_rectangles, 0.6, bit_gen); + + auto neighbours = BuildNeighboursGraph(fixed_rectangles); + const auto components = SplitInConnectedComponents(neighbours); + const Rectangle bb = {.x_min = 0, .x_max = 100, .y_min = 0, .y_max = 100}; + int min_index = -1; + std::pair min_coord = { + std::numeric_limits::max(), + std::numeric_limits::max()}; + for (const int box_index : components[0]) { + const Rectangle& rectangle = fixed_rectangles[box_index]; + if (std::make_pair(rectangle.x_min, rectangle.y_min) < min_coord) { + min_coord = {rectangle.x_min, rectangle.y_min}; + min_index = box_index; + } + } + + const ShapePath shape = + TraceBoundary(min_coord, min_index, fixed_rectangles, neighbours); + absl::flat_hash_set seen; + std::vector component; + std::vector index_map(input.size()); + for (const int box_index : components[0]) { + component.push_back(fixed_rectangles[box_index]); + index_map[box_index] = component.size() - 1; + } + + const ShapePath expected_shape = + TraceBoundaryNaive(shape.step_points[0], component); + if (shape.step_points != expected_shape.step_points) { + LOG(ERROR) << "Fast algo:\n" + << RenderContour(bb, fixed_rectangles, shape); + LOG(ERROR) << "Naive algo:\n" + << RenderContour(bb, component, expected_shape); + LOG(FATAL) << "Found different solutions between naive and fast algo!"; + } + EXPECT_EQ(shape.step_points, expected_shape.step_points); + for (int i = 0; i < shape.step_points.size(); ++i) { + EXPECT_EQ(index_map[shape.touching_box_index[i]], + expected_shape.touching_box_index[i]); + } + } +} + +TEST(ContourTest, SimpleShapes) { + std::vector rectangles = { + {.x_min = 0, .x_max = 10, .y_min = 10, .y_max = 20}, + {.x_min = 3, .x_max = 8, .y_min = 0, .y_max = 10}}; + ShapePath shape = + TraceBoundary({0, 20}, 0, rectangles, BuildNeighboursGraph(rectangles)); + EXPECT_THAT(shape.touching_box_index, ElementsAre(0, 0, 0, 1, 1, 1, 0, 0, 0)); + EXPECT_THAT(shape.step_points, + ElementsAre(std::make_pair(0, 20), std::make_pair(10, 20), + std::make_pair(10, 10), std::make_pair(8, 10), + std::make_pair(8, 0), std::make_pair(3, 0), + std::make_pair(3, 10), std::make_pair(0, 10), + std::make_pair(0, 20))); + + rectangles = {{.x_min = 0, .x_max = 10, .y_min = 10, .y_max = 20}, + {.x_min = 0, .x_max = 10, .y_min = 0, .y_max = 10}}; + shape = + TraceBoundary({0, 20}, 0, rectangles, BuildNeighboursGraph(rectangles)); + EXPECT_THAT(shape.touching_box_index, ElementsAre(0, 0, 1, 1, 1, 0, 0)); + EXPECT_THAT(shape.step_points, + ElementsAre(std::make_pair(0, 20), std::make_pair(10, 20), + std::make_pair(10, 10), std::make_pair(10, 0), + std::make_pair(0, 0), std::make_pair(0, 10), + std::make_pair(0, 20))); + + rectangles = {{.x_min = 0, .x_max = 10, .y_min = 10, .y_max = 20}, + {.x_min = 0, .x_max = 15, .y_min = 0, .y_max = 10}}; + shape = + TraceBoundary({0, 20}, 0, rectangles, BuildNeighboursGraph(rectangles)); + EXPECT_THAT(shape.touching_box_index, ElementsAre(0, 0, 1, 1, 1, 1, 0, 0)); + EXPECT_THAT(shape.step_points, + ElementsAre(std::make_pair(0, 20), std::make_pair(10, 20), + std::make_pair(10, 10), std::make_pair(15, 10), + std::make_pair(15, 0), std::make_pair(0, 0), + std::make_pair(0, 10), std::make_pair(0, 20))); + + rectangles = {{.x_min = 0, .x_max = 10, .y_min = 10, .y_max = 20}, + {.x_min = 0, .x_max = 10, .y_min = 0, .y_max = 10}, + {.x_min = 10, .x_max = 20, .y_min = 0, .y_max = 10}}; + shape = + TraceBoundary({0, 20}, 0, rectangles, BuildNeighboursGraph(rectangles)); + EXPECT_THAT(shape.touching_box_index, ElementsAre(0, 0, 2, 2, 2, 1, 1, 0, 0)); + EXPECT_THAT(shape.step_points, + ElementsAre(std::make_pair(0, 20), std::make_pair(10, 20), + std::make_pair(10, 10), std::make_pair(20, 10), + std::make_pair(20, 0), std::make_pair(10, 0), + std::make_pair(0, 0), std::make_pair(0, 10), + std::make_pair(0, 20))); +} + +} // namespace +} // namespace sat +} // namespace operations_research diff --git a/ortools/sat/BUILD.bazel b/ortools/sat/BUILD.bazel index 6a8d73d039..ee14f5a97d 100644 --- a/ortools/sat/BUILD.bazel +++ b/ortools/sat/BUILD.bazel @@ -13,8 +13,8 @@ # Home of CP/SAT solver (which includes SAT, max-SAT and PB problems). -load("@rules_cc//cc:defs.bzl", "cc_library", "cc_proto_library") load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library") +load("@rules_cc//cc:defs.bzl", "cc_library", "cc_proto_library") load("@rules_java//java:defs.bzl", "java_proto_library") load("@rules_proto//proto:defs.bzl", "proto_library") load("@rules_python//python:proto.bzl", "py_proto_library") @@ -52,6 +52,16 @@ cc_library( ], ) +cc_test( + name = "model_test", + size = "small", + srcs = ["model_test.cc"], + deps = [ + ":model", + "//ortools/base:gmock_main", + ], +) + proto_library( name = "sat_parameters_proto", srcs = ["sat_parameters.proto"], @@ -74,8 +84,8 @@ java_proto_library( go_proto_library( name = "sat_parameters_go_proto", - proto = ":sat_parameters_proto", importpath = "github.com/google/or-tools/ortools/sat/proto/satparameters", + protos = [":sat_parameters_proto"], ) proto_library( @@ -100,8 +110,8 @@ java_proto_library( go_proto_library( name = "cp_model_go_proto", - proto = ":cp_model_proto", importpath = "github.com/google/or-tools/ortools/sat/proto/cpmodel", + protos = [":cp_model_proto"], ) cc_library( @@ -252,6 +262,15 @@ cc_library( ], ) +cc_test( + name = "feasibility_jump_test", + srcs = ["feasibility_jump_test.cc"], + deps = [ + ":feasibility_jump", + "//ortools/base:gmock_main", + ], +) + cc_library( name = "linear_model", srcs = ["linear_model.cc"], @@ -278,6 +297,18 @@ cc_library( ], ) +cc_test( + name = "parameters_validation_test", + size = "small", + srcs = ["parameters_validation_test.cc"], + deps = [ + ":parameters_validation", + ":sat_parameters_cc_proto", + "//ortools/base:gmock_main", + "@com_google_protobuf//:protobuf", + ], +) + cc_library( name = "cp_model_search", srcs = ["cp_model_search.cc"], @@ -627,6 +658,7 @@ cc_library( hdrs = ["presolve_context.h"], deps = [ ":cp_model_cc_proto", + ":cp_model_checker", ":cp_model_loader", ":cp_model_mapping", ":cp_model_utils", @@ -790,6 +822,17 @@ cc_library( ], ) +cc_test( + name = "sat_base_test", + size = "small", + srcs = ["sat_base_test.cc"], + deps = [ + ":sat_base", + "//ortools/base:gmock_main", + "//ortools/util:strong_integers", + ], +) + # Enable a warning to check for floating point to integer conversions. # In GCC-4.8, this was "-Wreal-conversion", but was removed in 4.9 # In Clang, this warning is "-Wfloat-conversion" @@ -824,6 +867,7 @@ cc_library( "//ortools/util:stats", "//ortools/util:strong_integers", "//ortools/util:time_limit", + "@com_google_absl//absl/algorithm:container", "@com_google_absl//absl/base:core_headers", "@com_google_absl//absl/container:btree", "@com_google_absl//absl/container:flat_hash_map", @@ -853,6 +897,19 @@ cc_library( ], ) +cc_test( + name = "restart_test", + size = "small", + srcs = ["restart_test.cc"], + deps = [ + ":model", + ":restart", + ":sat_parameters_cc_proto", + "//ortools/base:gmock_main", + "@com_google_absl//absl/base:core_headers", + ], +) + cc_library( name = "probing", srcs = ["probing.cc"], @@ -883,6 +940,22 @@ cc_library( ], ) +cc_test( + name = "probing_test", + size = "small", + srcs = ["probing_test.cc"], + deps = [ + ":integer", + ":model", + ":probing", + ":sat_base", + ":sat_solver", + "//ortools/base:gmock_main", + "//ortools/util:sorted_interval_list", + "@com_google_absl//absl/types:span", + ], +) + cc_library( name = "sat_inprocessing", srcs = ["sat_inprocessing.cc"], @@ -916,6 +989,23 @@ cc_library( ], ) +cc_test( + name = "sat_inprocessing_test", + size = "small", + srcs = ["sat_inprocessing_test.cc"], + deps = [ + ":clause", + ":model", + ":sat_base", + ":sat_inprocessing", + ":sat_solver", + "//ortools/base:gmock_main", + "@com_google_absl//absl/container:inlined_vector", + "@com_google_absl//absl/log:check", + "@com_google_absl//absl/types:span", + ], +) + cc_library( name = "sat_decision", srcs = ["sat_decision.cc"], @@ -1021,6 +1111,23 @@ cc_library( ], ) +cc_test( + name = "pb_constraint_test", + size = "small", + srcs = ["pb_constraint_test.cc"], + deps = [ + ":model", + ":pb_constraint", + ":sat_base", + "//ortools/base:gmock_main", + "//ortools/base:strong_vector", + "//ortools/util:strong_integers", + "@com_google_absl//absl/container:flat_hash_map", + "@com_google_absl//absl/log:check", + "@com_google_absl//absl/types:span", + ], +) + cc_library( name = "symmetry", srcs = ["symmetry.cc"], @@ -1036,6 +1143,19 @@ cc_library( ], ) +cc_test( + name = "symmetry_test", + size = "small", + srcs = ["symmetry_test.cc"], + deps = [ + ":sat_base", + ":symmetry", + "//ortools/algorithms:sparse_permutation", + "//ortools/base:gmock_main", + "@com_google_absl//absl/types:span", + ], +) + cc_library( name = "symmetry_util", srcs = ["symmetry_util.cc"], @@ -1207,6 +1327,22 @@ cc_library( ], ) +cc_test( + name = "pseudo_costs_test", + size = "small", + srcs = ["pseudo_costs_test.cc"], + deps = [ + ":integer", + ":model", + ":pseudo_costs", + ":sat_base", + ":sat_parameters_cc_proto", + ":sat_solver", + "//ortools/base:gmock_main", + "//ortools/util:strong_integers", + ], +) + cc_library( name = "intervals", srcs = ["intervals.cc"], @@ -1236,6 +1372,22 @@ cc_library( ], ) +cc_test( + name = "intervals_test", + size = "small", + srcs = ["intervals_test.cc"], + deps = [ + ":integer", + ":intervals", + ":linear_constraint", + ":model", + ":sat_base", + ":sat_solver", + "//ortools/base:gmock_main", + "//ortools/util:strong_integers", + ], +) + cc_library( name = "precedences", srcs = ["precedences.cc"], @@ -1271,6 +1423,46 @@ cc_library( ], ) +cc_test( + name = "precedences_test", + size = "small", + srcs = ["precedences_test.cc"], + deps = [ + ":integer", + ":integer_search", + ":model", + ":precedences", + ":sat_base", + ":sat_solver", + "//ortools/base:gmock_main", + "//ortools/base:types", + "//ortools/util:sorted_interval_list", + "//ortools/util:strong_integers", + "@com_google_absl//absl/types:span", + ], +) + +cc_test( + name = "integer_test", + size = "small", + srcs = ["integer_test.cc"], + deps = [ + ":integer", + ":integer_search", + ":model", + ":sat_base", + ":sat_solver", + "//ortools/base", + "//ortools/base:gmock_main", + "//ortools/base:types", + "//ortools/util:sorted_interval_list", + "//ortools/util:strong_integers", + "@com_google_absl//absl/log:check", + "@com_google_absl//absl/types:span", + "@com_google_benchmark//:benchmark", + ], +) + cc_library( name = "integer_expr", srcs = ["integer_expr.cc"], @@ -1330,6 +1522,23 @@ cc_library( ], ) +cc_test( + name = "linear_propagation_test", + size = "small", + srcs = ["linear_propagation_test.cc"], + deps = [ + ":integer", + ":linear_propagation", + ":model", + ":sat_base", + ":sat_solver", + "//ortools/base:gmock_main", + "//ortools/util:strong_integers", + "@com_google_absl//absl/log:check", + "@com_google_absl//absl/types:span", + ], +) + cc_library( name = "all_different", srcs = ["all_different.cc"], @@ -1351,6 +1560,22 @@ cc_library( ], ) +cc_test( + name = "all_different_test", + srcs = ["all_different_test.cc"], + deps = [ + ":all_different", + ":integer", + ":integer_search", + ":model", + ":sat_solver", + "//ortools/base", + "//ortools/base:gmock_main", + "//ortools/base:types", + "//ortools/util:sorted_interval_list", + ], +) + cc_library( name = "theta_tree", srcs = ["theta_tree.cc"], @@ -1362,6 +1587,20 @@ cc_library( ], ) +cc_test( + name = "theta_tree_test", + size = "small", + srcs = ["theta_tree_test.cc"], + deps = [ + ":integer", + ":theta_tree", + "//ortools/base:gmock_main", + "//ortools/util:random_engine", + "//ortools/util:strong_integers", + "@com_google_benchmark//:benchmark", + ], +) + cc_library( name = "disjunctive", srcs = ["disjunctive.cc"], @@ -1388,6 +1627,30 @@ cc_library( ], ) +cc_test( + name = "disjunctive_test", + size = "small", + srcs = ["disjunctive_test.cc"], + deps = [ + ":disjunctive", + ":integer", + ":integer_search", + ":intervals", + ":model", + ":precedences", + ":sat_base", + ":sat_solver", + "//ortools/base", + "//ortools/base:gmock_main", + "//ortools/util:strong_integers", + "@com_google_absl//absl/log:check", + "@com_google_absl//absl/random", + "@com_google_absl//absl/random:bit_gen_ref", + "@com_google_absl//absl/strings", + "@com_google_absl//absl/types:span", + ], +) + cc_library( name = "timetable", srcs = ["timetable.cc"], @@ -1404,6 +1667,30 @@ cc_library( ], ) +cc_test( + name = "timetable_test", + size = "medium", + srcs = ["timetable_test.cc"], + deps = [ + ":all_different", + ":cumulative", + ":integer", + ":integer_search", + ":intervals", + ":model", + ":precedences", + ":sat_base", + ":sat_solver", + ":timetable", + "//ortools/base", + "//ortools/base:gmock_main", + "@com_google_absl//absl/container:btree", + "@com_google_absl//absl/log:check", + "@com_google_absl//absl/strings", + "@com_google_absl//absl/types:span", + ], +) + cc_library( name = "timetable_edgefinding", srcs = ["timetable_edgefinding.cc"], @@ -1445,6 +1732,29 @@ cc_library( ], ) +cc_test( + name = "cumulative_test", + size = "large", + srcs = ["cumulative_test.cc"], + shard_count = 32, + deps = [ + ":cumulative", + ":integer", + ":integer_search", + ":intervals", + ":model", + ":sat_base", + ":sat_parameters_cc_proto", + ":sat_solver", + "//ortools/base", + "//ortools/base:gmock_main", + "//ortools/util:strong_integers", + "@com_google_absl//absl/log:check", + "@com_google_absl//absl/random", + "@com_google_absl//absl/strings", + ], +) + cc_library( name = "cumulative_energy", srcs = ["cumulative_energy.cc"], @@ -1467,6 +1777,37 @@ cc_library( ], ) +cc_test( + name = "cumulative_energy_test", + size = "medium", + srcs = ["cumulative_energy_test.cc"], + deps = [ + ":2d_orthogonal_packing_testing", + ":cp_model_solver", + ":cumulative", + ":cumulative_energy", + ":diffn_util", + ":integer", + ":integer_search", + ":intervals", + ":linear_constraint", + ":model", + ":precedences", + ":sat_base", + ":sat_parameters_cc_proto", + ":sat_solver", + "//ortools/base", + "//ortools/base:gmock_main", + "//ortools/util:strong_integers", + "@com_google_absl//absl/log:check", + "@com_google_absl//absl/random", + "@com_google_absl//absl/random:bit_gen_ref", + "@com_google_absl//absl/random:distributions", + "@com_google_absl//absl/strings", + "@com_google_absl//absl/types:span", + ], +) + cc_library( name = "boolean_problem", srcs = ["boolean_problem.cc"], @@ -1564,6 +1905,21 @@ cc_library( ], ) +cc_test( + name = "linear_constraint_test", + srcs = ["linear_constraint_test.cc"], + deps = [ + ":integer", + ":linear_constraint", + ":model", + ":sat_base", + "//ortools/base:gmock_main", + "//ortools/base:strong_vector", + "//ortools/util:strong_integers", + "@com_google_absl//absl/types:span", + ], +) + cc_library( name = "linear_programming_constraint", srcs = ["linear_programming_constraint.cc"], @@ -1640,6 +1996,24 @@ cc_library( ], ) +cc_test( + name = "linear_constraint_manager_test", + srcs = ["linear_constraint_manager_test.cc"], + deps = [ + ":integer", + ":linear_constraint", + ":linear_constraint_manager", + ":model", + ":sat_base", + ":sat_parameters_cc_proto", + "//ortools/base:gmock_main", + "//ortools/base:strong_vector", + "//ortools/glop:variables_info", + "//ortools/lp_data:base", + "//ortools/util:strong_integers", + ], +) + cc_library( name = "cuts", srcs = ["cuts.cc"], @@ -1673,6 +2047,29 @@ cc_library( ], ) +cc_test( + name = "cuts_test", + srcs = ["cuts_test.cc"], + deps = [ + ":cuts", + ":implied_bounds", + ":integer", + ":linear_constraint", + ":linear_constraint_manager", + ":model", + ":sat_base", + ":sat_parameters_cc_proto", + "//ortools/base:gmock_main", + "//ortools/base:strong_vector", + "//ortools/util:fp_utils", + "//ortools/util:sorted_interval_list", + "//ortools/util:strong_integers", + "@com_google_absl//absl/numeric:int128", + "@com_google_absl//absl/strings", + "@com_google_absl//absl/types:span", + ], +) + cc_library( name = "routing_cuts", srcs = ["routing_cuts.cc"], @@ -1700,6 +2097,25 @@ cc_library( ], ) +cc_test( + name = "routing_cuts_test", + srcs = ["routing_cuts_test.cc"], + deps = [ + ":cuts", + ":integer", + ":linear_constraint", + ":linear_constraint_manager", + ":model", + ":routing_cuts", + ":sat_base", + "//ortools/base:gmock_main", + "//ortools/base:strong_vector", + "//ortools/graph:max_flow", + "//ortools/util:strong_integers", + "@com_google_absl//absl/types:span", + ], +) + cc_library( name = "scheduling_cuts", srcs = ["scheduling_cuts.cc"], @@ -1730,6 +2146,30 @@ cc_library( ], ) +cc_test( + name = "scheduling_cuts_test", + srcs = ["scheduling_cuts_test.cc"], + deps = [ + ":cp_model", + ":cp_model_cc_proto", + ":cp_model_solver", + ":cuts", + ":integer", + ":intervals", + ":linear_constraint", + ":linear_constraint_manager", + ":model", + ":sat_base", + ":scheduling_cuts", + "//ortools/base:gmock_main", + "//ortools/base:strong_vector", + "//ortools/util:strong_integers", + "@com_google_absl//absl/base:log_severity", + "@com_google_absl//absl/random", + "@com_google_absl//absl/types:span", + ], +) + cc_library( name = "diffn_cuts", srcs = ["diffn_cuts.cc"], @@ -1776,6 +2216,17 @@ cc_library( ], ) +cc_test( + name = "zero_half_cuts_test", + srcs = ["zero_half_cuts_test.cc"], + deps = [ + ":integer", + ":zero_half_cuts", + "//ortools/base:gmock_main", + "//ortools/lp_data:base", + ], +) + cc_library( name = "lp_utils", srcs = ["lp_utils.cc"], @@ -1891,6 +2342,29 @@ cc_library( ], ) +cc_test( + name = "optimization_test", + srcs = ["optimization_test.cc"], + deps = [ + ":boolean_problem", + ":boolean_problem_cc_proto", + ":integer", + ":integer_search", + ":model", + ":optimization", + ":pb_constraint", + ":sat_base", + ":sat_parameters_cc_proto", + ":sat_solver", + "//ortools/base:gmock_main", + "//ortools/util:strong_integers", + "@com_google_absl//absl/log:check", + "@com_google_absl//absl/random:bit_gen_ref", + "@com_google_absl//absl/random:distributions", + "@com_google_absl//absl/strings:str_format", + ], +) + cc_library( name = "util", srcs = ["util.cc"], @@ -1930,7 +2404,6 @@ cc_library( hdrs = ["stat_tables.h"], deps = [ ":cp_model_cc_proto", - ":cp_model_lns", ":linear_programming_constraint", ":model", ":sat_solver", @@ -1980,6 +2453,23 @@ cc_library( ], ) +cc_test( + name = "cp_constraints_test", + srcs = ["cp_constraints_test.cc"], + deps = [ + ":cp_constraints", + ":integer", + ":integer_search", + ":model", + ":precedences", + ":sat_base", + ":sat_solver", + "//ortools/base", + "//ortools/base:gmock_main", + "@com_google_absl//absl/types:span", + ], +) + cc_library( name = "diffn_util", srcs = ["diffn_util.cc"], @@ -2043,7 +2533,12 @@ cc_library( deps = [ ":diffn_util", ":integer", + "//ortools/base:stl_util", + "//ortools/graph:strongly_connected_components", + "@com_google_absl//absl/algorithm:container", "@com_google_absl//absl/container:flat_hash_map", + "@com_google_absl//absl/container:flat_hash_set", + "@com_google_absl//absl/container:inlined_vector", "@com_google_absl//absl/log", "@com_google_absl//absl/log:check", "@com_google_absl//absl/strings", @@ -2051,6 +2546,26 @@ cc_library( ], ) +cc_test( + name = "2d_rectangle_presolve_test", + srcs = ["2d_rectangle_presolve_test.cc"], + deps = [ + ":2d_orthogonal_packing_testing", + ":2d_rectangle_presolve", + ":diffn_util", + ":integer", + "//ortools/base:gmock_main", + "@com_google_absl//absl/container:flat_hash_map", + "@com_google_absl//absl/container:flat_hash_set", + "@com_google_absl//absl/log", + "@com_google_absl//absl/log:check", + "@com_google_absl//absl/random", + "@com_google_absl//absl/random:bit_gen_ref", + "@com_google_absl//absl/strings", + "@com_google_absl//absl/types:span", + ], +) + cc_library( name = "2d_orthogonal_packing_testing", testonly = 1, @@ -2066,6 +2581,29 @@ cc_library( ], ) +cc_test( + name = "diffn_util_test", + size = "small", + srcs = ["diffn_util_test.cc"], + deps = [ + ":2d_orthogonal_packing_testing", + ":diffn_util", + ":integer", + "//ortools/base", + "//ortools/base:gmock_main", + "//ortools/graph:connected_components", + "//ortools/util:strong_integers", + "@com_google_absl//absl/algorithm:container", + "@com_google_absl//absl/log:check", + "@com_google_absl//absl/random", + "@com_google_absl//absl/random:bit_gen_ref", + "@com_google_absl//absl/random:distributions", + "@com_google_absl//absl/strings", + "@com_google_absl//absl/types:span", + "@com_google_benchmark//:benchmark", + ], +) + cc_library( name = "diffn", srcs = ["diffn.cc"], @@ -2095,6 +2633,28 @@ cc_library( ], ) +cc_test( + name = "diffn_test", + size = "small", + srcs = ["diffn_test.cc"], + deps = [ + ":cp_model", + ":cp_model_cc_proto", + ":cp_model_solver", + ":diffn", + ":integer", + ":integer_search", + ":intervals", + ":model", + ":sat_base", + ":sat_parameters_cc_proto", + ":sat_solver", + "//ortools/base", + "//ortools/base:gmock_main", + "@com_google_absl//absl/strings", + ], +) + cc_library( name = "circuit", srcs = ["circuit.cc"], @@ -2117,6 +2677,23 @@ cc_library( ], ) +cc_test( + name = "circuit_test", + srcs = ["circuit_test.cc"], + deps = [ + ":circuit", + ":integer", + ":integer_search", + ":model", + ":sat_base", + ":sat_solver", + "//ortools/base:gmock_main", + "//ortools/graph:strongly_connected_components", + "@com_google_absl//absl/log:check", + "@com_google_absl//absl/types:span", + ], +) + cc_library( name = "encoding", srcs = ["encoding.cc"], @@ -2137,6 +2714,19 @@ cc_library( ], ) +cc_test( + name = "encoding_test", + srcs = ["encoding_test.cc"], + deps = [ + ":encoding", + ":pb_constraint", + ":sat_base", + ":sat_solver", + "//ortools/base:gmock_main", + "@com_google_absl//absl/random:distributions", + ], +) + cc_library( name = "cp_model_lns", srcs = ["cp_model_lns.cc"], @@ -2145,6 +2735,7 @@ cc_library( ":cp_model_cc_proto", ":cp_model_mapping", ":cp_model_presolve", + ":cp_model_solver_helpers", ":cp_model_utils", ":integer", ":linear_constraint_manager", @@ -2250,6 +2841,17 @@ cc_library( ], ) +cc_test( + name = "subsolver_test", + size = "small", + srcs = ["subsolver_test.cc"], + deps = [ + ":subsolver", + "//ortools/base:gmock_main", + "@com_google_absl//absl/synchronization", + ], +) + cc_library( name = "drat_proof_handler", srcs = ["drat_proof_handler.cc"], @@ -2439,6 +3041,27 @@ cc_library( ], ) +cc_test( + name = "implied_bounds_test", + size = "small", + srcs = ["implied_bounds_test.cc"], + deps = [ + ":implied_bounds", + ":integer", + ":linear_constraint", + ":model", + ":sat_base", + ":sat_parameters_cc_proto", + ":sat_solver", + "//ortools/base:gmock_main", + "//ortools/base:strong_vector", + "//ortools/lp_data:base", + "//ortools/util:strong_integers", + "@com_google_absl//absl/container:flat_hash_map", + "@com_google_absl//absl/types:span", + ], +) + cc_library( name = "inclusion", hdrs = ["inclusion.h"], @@ -2492,3 +3115,16 @@ cc_library( "@com_google_absl//absl/types:span", ], ) + +cc_test( + name = "inclusion_test", + size = "small", + srcs = ["inclusion_test.cc"], + deps = [ + ":inclusion", + ":util", + "//ortools/base:gmock_main", + "@com_google_absl//absl/random", + "@com_google_absl//absl/types:span", + ], +) diff --git a/ortools/sat/all_different_test.cc b/ortools/sat/all_different_test.cc new file mode 100644 index 0000000000..bdc91b6090 --- /dev/null +++ b/ortools/sat/all_different_test.cc @@ -0,0 +1,159 @@ +// Copyright 2010-2024 Google LLC +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "ortools/sat/all_different.h" + +#include +#include +#include +#include +#include +#include + +#include "gtest/gtest.h" +#include "ortools/base/logging.h" +#include "ortools/base/types.h" +#include "ortools/sat/integer.h" +#include "ortools/sat/integer_search.h" +#include "ortools/sat/model.h" +#include "ortools/sat/sat_solver.h" +#include "ortools/util/sorted_interval_list.h" + +namespace operations_research { +namespace sat { +namespace { + +class AllDifferentTest : public ::testing::TestWithParam { + public: + std::function AllDifferent( + const std::vector& vars) { + return [=](Model* model) { + if (GetParam() == "binary") { + model->Add(AllDifferentBinary(vars)); + } else if (GetParam() == "ac") { + model->Add(AllDifferentBinary(vars)); + model->Add(AllDifferentAC(vars)); + } else if (GetParam() == "bounds") { + model->Add(AllDifferentOnBounds(vars)); + } else { + LOG(FATAL) << "Unknown implementation " << GetParam(); + } + }; + } +}; + +INSTANTIATE_TEST_SUITE_P(All, AllDifferentTest, + ::testing::Values("binary", "ac", "bounds")); + +TEST_P(AllDifferentTest, BasicBehavior) { + Model model; + std::vector vars; + vars.push_back(model.Add(NewIntegerVariable(1, 3))); + vars.push_back(model.Add(NewIntegerVariable(0, 2))); + vars.push_back(model.Add(NewIntegerVariable(1, 3))); + vars.push_back(model.Add(NewIntegerVariable(0, 2))); + model.Add(AllDifferent(vars)); + EXPECT_EQ(SatSolver::FEASIBLE, SolveIntegerProblemWithLazyEncoding(&model)); + + std::vector value_seen(5, false); + for (const IntegerVariable var : vars) { + const int64_t value = model.Get(Value(var)); + EXPECT_FALSE(value_seen[value]); + value_seen[value] = true; + } +} + +TEST_P(AllDifferentTest, PerfectMatching) { + Model model; + std::vector vars; + for (int i = 0; i < 4; ++i) { + vars.push_back(model.Add(NewIntegerVariable(0, 10))); + } + IntegerTrail* integer_trail = model.GetOrCreate(); + integer_trail->UpdateInitialDomain(vars[0], Domain::FromValues({3, 9})); + integer_trail->UpdateInitialDomain(vars[1], Domain::FromValues({3, 8})); + integer_trail->UpdateInitialDomain(vars[2], Domain::FromValues({1, 8})); + integer_trail->UpdateInitialDomain(vars[3], Domain(1)); + model.Add(AllDifferent(vars)); + EXPECT_EQ(SatSolver::FEASIBLE, SolveIntegerProblemWithLazyEncoding(&model)); + EXPECT_EQ(1, model.Get(Value(vars[3]))); + EXPECT_EQ(8, model.Get(Value(vars[2]))); + EXPECT_EQ(3, model.Get(Value(vars[1]))); + EXPECT_EQ(9, model.Get(Value(vars[0]))); +} + +TEST_P(AllDifferentTest, EnumerateAllPermutations) { + const int n = 6; + Model model; + std::vector vars; + for (int i = 0; i < n; ++i) { + vars.push_back(model.Add(NewIntegerVariable(0, n - 1))); + } + model.Add(AllDifferent(vars)); + + std::vector> solutions; + while (true) { + const auto status = SolveIntegerProblemWithLazyEncoding(&model); + if (status != SatSolver::Status::FEASIBLE) break; + solutions.emplace_back(n); + for (int i = 0; i < n; ++i) solutions.back()[i] = model.Get(Value(vars[i])); + model.Add(ExcludeCurrentSolutionAndBacktrack()); + } + + // Test that we do have all the permutations (but in a random order). + std::sort(solutions.begin(), solutions.end()); + std::vector expected(n); + std::iota(expected.begin(), expected.end(), 0); + for (int i = 0; i < solutions.size(); ++i) { + EXPECT_EQ(expected, solutions[i]); + if (i + 1 < solutions.size()) { + EXPECT_TRUE(std::next_permutation(expected.begin(), expected.end())); + } else { + // We enumerated all the permutations. + EXPECT_FALSE(std::next_permutation(expected.begin(), expected.end())); + } + } +} + +int Factorial(int n) { return n ? n * Factorial(n - 1) : 1; } + +TEST_P(AllDifferentTest, EnumerateAllInjections) { + const int n = 5; + const int m = n + 2; + Model model; + std::vector vars; + for (int i = 0; i < n; ++i) { + vars.push_back(model.Add(NewIntegerVariable(0, m - 1))); + } + model.Add(AllDifferent(vars)); + + std::vector solution(n); + int num_solutions = 0; + while (true) { + const auto status = SolveIntegerProblemWithLazyEncoding(&model); + if (status != SatSolver::Status::FEASIBLE) break; + for (int i = 0; i < n; i++) solution[i] = model.Get(Value(vars[i])); + std::sort(solution.begin(), solution.end()); + for (int i = 1; i < n; i++) { + EXPECT_LT(solution[i - 1], solution[i]); + } + num_solutions++; + model.Add(ExcludeCurrentSolutionAndBacktrack()); + } + // Count the number of solutions, it should be m!/(m-n)!. + EXPECT_EQ(num_solutions, Factorial(m) / Factorial(m - n)); +} + +} // namespace +} // namespace sat +} // namespace operations_research diff --git a/ortools/sat/circuit_test.cc b/ortools/sat/circuit_test.cc new file mode 100644 index 0000000000..d6d3cf98f4 --- /dev/null +++ b/ortools/sat/circuit_test.cc @@ -0,0 +1,334 @@ +// Copyright 2010-2024 Google LLC +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "ortools/sat/circuit.h" + +#include +#include +#include +#include +#include +#include +#include + +#include "absl/log/check.h" +#include "absl/types/span.h" +#include "gtest/gtest.h" +#include "ortools/graph/strongly_connected_components.h" +#include "ortools/sat/integer.h" +#include "ortools/sat/integer_search.h" +#include "ortools/sat/model.h" +#include "ortools/sat/sat_base.h" +#include "ortools/sat/sat_solver.h" + +namespace operations_research { +namespace sat { +namespace { + +std::function DenseCircuitConstraint( + int num_nodes, bool allow_subcircuit, + bool allow_multiple_subcircuit_through_zero) { + return [=](Model* model) { + std::vector tails; + std::vector heads; + std::vector literals; + for (int tail = 0; tail < num_nodes; ++tail) { + for (int head = 0; head < num_nodes; ++head) { + if (!allow_subcircuit && tail == head) continue; + tails.push_back(tail); + heads.push_back(head); + literals.push_back(Literal(model->Add(NewBooleanVariable()), true)); + } + } + LoadSubcircuitConstraint(num_nodes, tails, heads, literals, model, + allow_multiple_subcircuit_through_zero); + }; +} + +int CountSolutions(Model* model) { + int num_solutions = 0; + while (true) { + const SatSolver::Status status = SolveIntegerProblemWithLazyEncoding(model); + if (status != SatSolver::Status::FEASIBLE) break; + + // Add the solution. + ++num_solutions; + + // Loop to the next solution. + model->Add(ExcludeCurrentSolutionAndBacktrack()); + } + return num_solutions; +} + +int Factorial(int n) { return n ? n * Factorial(n - 1) : 1; } + +TEST(ReindexArcTest, BasicCase) { + const int num_nodes = 1000; + std::vector tails(num_nodes); + std::vector heads(num_nodes); + for (int i = 0; i < num_nodes; ++i) { + tails[i] = 100 * i; + heads[i] = 100 * i; + } + ReindexArcs(&tails, &heads); + for (int i = 0; i < num_nodes; ++i) { + EXPECT_EQ(i, tails[i]); + EXPECT_EQ(i, heads[i]); + } +} + +TEST(ReindexArcTest, NegativeNumbering) { + const int num_nodes = 1000; + std::vector tails(num_nodes); + std::vector heads(num_nodes); + for (int i = 0; i < num_nodes; ++i) { + tails[i] = -100 * i; + heads[i] = -100 * i; + } + ReindexArcs(&tails, &heads); + for (int i = 0; i < num_nodes; ++i) { + EXPECT_EQ(i, tails[num_nodes - 1 - i]); + EXPECT_EQ(i, heads[num_nodes - 1 - i]); + } +} + +TEST(CircuitConstraintTest, NodeWithNoArcsIsUnsat) { + static const int kNumNodes = 2; + Model model; + std::vector tails; + std::vector heads; + std::vector literals; + tails.push_back(0); + heads.push_back(1); + literals.push_back(Literal(model.Add(NewBooleanVariable()), true)); + LoadSubcircuitConstraint(kNumNodes, tails, heads, literals, &model); + EXPECT_TRUE(model.GetOrCreate()->ModelIsUnsat()); +} + +TEST(CircuitConstraintTest, AllCircuits) { + static const int kNumNodes = 4; + Model model; + model.Add( + DenseCircuitConstraint(kNumNodes, /*allow_subcircuit=*/false, + /*allow_multiple_subcircuit_through_zero=*/false)); + + const int num_solutions = CountSolutions(&model); + EXPECT_EQ(num_solutions, Factorial(kNumNodes - 1)); +} + +TEST(CircuitConstraintTest, AllSubCircuits) { + static const int kNumNodes = 4; + + Model model; + model.Add( + DenseCircuitConstraint(kNumNodes, /*allow_subcircuit=*/true, + /*allow_multiple_subcircuit_through_zero=*/false)); + + const int num_solutions = CountSolutions(&model); + int expected = 1; // No circuit at all. + for (int circuit_size = 2; circuit_size <= kNumNodes; ++circuit_size) { + // The number of circuit of a given size is: + // - n for the first element + // - times (n-1) for the second + // - ... + // - times (n - (circuit_size - 1)) for the last. + // That is n! / (n - circuit_size)!, and like this we count circuit_size + // times the same circuit, so we have to divide by circuit_size in the end. + expected += Factorial(kNumNodes) / + (circuit_size * Factorial(kNumNodes - circuit_size)); + } + EXPECT_EQ(num_solutions, expected); +} + +TEST(CircuitConstraintTest, AllVehiculeRoutes) { + static const int kNumNodes = 4; + Model model; + + model.Add( + DenseCircuitConstraint(kNumNodes, /*allow_subcircuit=*/false, + /*allow_multiple_subcircuit_through_zero=*/true)); + + const int num_solutions = CountSolutions(&model); + int expected = 1; // 3 outgoing arcs from zero. + expected += 2 * 3; // 2 outgoing arcs from zero. 3 pairs, 2 direction. + expected += 6; // full circuit. + EXPECT_EQ(num_solutions, expected); +} + +TEST(CircuitConstraintTest, AllCircuitCoverings) { + // This test counts the number of circuit coverings of the clique on + // num_nodes with num_distinguished distinguished nodes, i.e. graphs that are + // vertex-disjoint circuits where every circuit must contain exactly one + // distinguished node. + // + // When writing n the number of nodes and k the number of distinguished nodes, + // and the number of such coverings T(n, k), we have: + // T(n,1) = (n-1)!, T(k,k) = 1, T(n,k) = (n-1)!/(k-1)! for n >= k >= 1. + // Indeed, we can enumerate canonical representations, e.g. [1]64[2]35, + // by starting with [1][2]...[k], and place every node in turn at its final + // place w.r.t. existing neighbours. To generate the above example, we go + // though [1][2], [1][2]3, [1]4[2]3, [1]4[2]35, [1]64[2]35. + // At the first iteration, there are k choices, then k+1 ... n-1. + for (int num_nodes = 1; num_nodes <= 6; num_nodes++) { + for (int num_distinguished = 1; num_distinguished <= num_nodes; + num_distinguished++) { + Model model; + std::vector distinguished(num_distinguished); + std::iota(distinguished.begin(), distinguished.end(), 0); + std::vector> graph(num_nodes); + std::vector arcs; + for (int i = 0; i < num_nodes; i++) { + graph[i].resize(num_nodes); + for (int j = 0; j < num_nodes; j++) { + const auto var = model.Add(NewBooleanVariable()); + graph[i][j] = Literal(var, true); + arcs.emplace_back(graph[i][j]); + } + if (i >= num_distinguished) { + model.Add(ClauseConstraint({graph[i][i].Negated()})); + } + } + model.Add(ExactlyOnePerRowAndPerColumn(graph)); + model.Add(CircuitCovering(graph, distinguished)); + const int64_t num_solutions = CountSolutions(&model); + EXPECT_EQ(num_solutions * Factorial(num_distinguished - 1), + Factorial(num_nodes - 1)); + } + } +} + +TEST(CircuitConstraintTest, InfeasibleBecauseOfMissingArcs) { + Model model; + std::vector tails; + std::vector heads; + std::vector literals; + for (const auto arcs : + std::vector>{{0, 1}, {1, 1}, {0, 2}, {2, 2}}) { + tails.push_back(arcs.first); + heads.push_back(arcs.second); + literals.push_back(Literal(model.Add(NewBooleanVariable()), true)); + } + LoadSubcircuitConstraint(3, tails, heads, literals, &model, false); + const SatSolver::Status status = SolveIntegerProblemWithLazyEncoding(&model); + EXPECT_EQ(status, SatSolver::Status::INFEASIBLE); +} + +// The graph look like this with a self-loop at 2. If 2 is not selected +// (self-loop) then there is one solution (0,1,3,0) and (0,3,5,0). Otherwise, +// there is 2 more solutions with 2 inserteed in one of the two routes. +// +// 0 ---> 1 ---> 4 ------------- +// | | ^ | +// | -----> 2* --> 5 ---> 0 +// | ^ ^ +// | | | +// -------------> 3 ------ +// +TEST(CircuitConstraintTest, RouteConstraint) { + Model model; + std::vector tails; + std::vector heads; + std::vector literals; + for (const auto arcs : std::vector>{{0, 1}, + {0, 3}, + {1, 2}, + {1, 4}, + {2, 2}, + {2, 4}, + {2, 5}, + {3, 2}, + {3, 5}, + {4, 0}, + {5, 0}}) { + tails.push_back(arcs.first); + heads.push_back(arcs.second); + literals.push_back(Literal(model.Add(NewBooleanVariable()), true)); + } + LoadSubcircuitConstraint(6, tails, heads, literals, &model, true); + const int64_t num_solutions = CountSolutions(&model); + EXPECT_EQ(num_solutions, 3); +} + +TEST(NoCyclePropagatorTest, CountAllSolutions) { + // We create a 2 * 2 grid with diagonal arcs. + Model model; + int num_nodes = 0; + const int num_x = 2; + const int num_y = 2; + const auto get_index = [&num_nodes](int x, int y) { + const int index = x * num_y + y; + num_nodes = std::max(num_nodes, index + 1); + return index; + }; + + std::vector tails; + std::vector heads; + std::vector literals; + for (int x = 0; x < num_x; ++x) { + for (int y = 0; y < num_y; ++y) { + for (const int x_dir : {-1, 0, 1}) { + for (const int y_dir : {-1, 0, 1}) { + const int head_x = x + x_dir; + const int head_y = y + y_dir; + if (x_dir == 0 && y_dir == 0) continue; + if (head_x < 0 || head_x >= num_x) continue; + if (head_y < 0 || head_y >= num_y) continue; + tails.push_back(get_index(x, y)); + heads.push_back(get_index(head_x, head_y)); + literals.push_back(Literal(model.Add(NewBooleanVariable()), true)); + } + } + } + } + model.TakeOwnership( + new NoCyclePropagator(num_nodes, tails, heads, literals, &model)); + + // Graph is small enough. + CHECK_EQ(num_nodes, 4); + CHECK_EQ(tails.size(), 12); + + // Counts solution with brute-force algo. + int num_expected_solutions = 0; + std::vector> subgraph(num_nodes); + std::vector> components; + const int num_cases = 1 << tails.size(); + for (int mask = 0; mask < num_cases; ++mask) { + for (int n = 0; n < num_nodes; ++n) { + subgraph[n].clear(); + } + for (int a = 0; a < tails.size(); ++a) { + if ((1 << a) & mask) { + subgraph[tails[a]].push_back(heads[a]); + } + } + components.clear(); + FindStronglyConnectedComponents(num_nodes, subgraph, &components); + bool has_cycle = false; + for (const std::vector compo : components) { + if (compo.size() > 1) { + has_cycle = true; + break; + } + } + if (!has_cycle) ++num_expected_solutions; + } + EXPECT_EQ(num_expected_solutions, 543); + + // There is 12 arcs. + // So out of 2^12 solution, we have to exclude all the one with cycles. + EXPECT_EQ(CountSolutions(&model), 543); +} + +} // namespace +} // namespace sat +} // namespace operations_research diff --git a/ortools/sat/cp_constraints_test.cc b/ortools/sat/cp_constraints_test.cc new file mode 100644 index 0000000000..bceee075ad --- /dev/null +++ b/ortools/sat/cp_constraints_test.cc @@ -0,0 +1,120 @@ +// Copyright 2010-2024 Google LLC +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "ortools/sat/cp_constraints.h" + +#include + +#include +#include + +#include "absl/types/span.h" +#include "gtest/gtest.h" +#include "ortools/base/logging.h" +#include "ortools/sat/integer.h" +#include "ortools/sat/integer_search.h" +#include "ortools/sat/model.h" +#include "ortools/sat/precedences.h" +#include "ortools/sat/sat_base.h" +#include "ortools/sat/sat_solver.h" + +namespace operations_research { +namespace sat { +namespace { + +TEST(LiteralXorIsTest, OneVariable) { + Model model; + const BooleanVariable a = model.Add(NewBooleanVariable()); + const BooleanVariable b = model.Add(NewBooleanVariable()); + model.Add(LiteralXorIs({Literal(a, true)}, true)); + model.Add(LiteralXorIs({Literal(b, true)}, false)); + SatSolver* solver = model.GetOrCreate(); + EXPECT_TRUE(solver->Propagate()); + EXPECT_TRUE(solver->Assignment().LiteralIsTrue(Literal(a, true))); + EXPECT_TRUE(solver->Assignment().LiteralIsFalse(Literal(b, true))); +} + +// A simple macro to make the code more readable. +#define EXPECT_BOUNDS_EQ(var, lb, ub) \ + EXPECT_EQ(model.Get(LowerBound(var)), lb); \ + EXPECT_EQ(model.Get(UpperBound(var)), ub) + +TEST(PartialIsOneOfVarTest, MinMaxPropagation) { + Model model; + const IntegerVariable target_var = model.Add(NewIntegerVariable(-10, 20)); + std::vector vars; + std::vector selectors; + for (int i = 0; i < 10; ++i) { + vars.push_back(model.Add(ConstantIntegerVariable(i))); + selectors.push_back(Literal(model.Add(NewBooleanVariable()), true)); + } + model.Add(PartialIsOneOfVar(target_var, vars, selectors)); + + EXPECT_TRUE(model.GetOrCreate()->Propagate()); + EXPECT_BOUNDS_EQ(target_var, 0, 9); + + model.Add(ClauseConstraint({selectors[0].Negated()})); + EXPECT_TRUE(model.GetOrCreate()->Propagate()); + EXPECT_BOUNDS_EQ(target_var, 1, 9); + + model.Add(ClauseConstraint({selectors[8].Negated()})); + EXPECT_TRUE(model.GetOrCreate()->Propagate()); + EXPECT_BOUNDS_EQ(target_var, 1, 9); + + model.Add(ClauseConstraint({selectors[9].Negated()})); + EXPECT_TRUE(model.GetOrCreate()->Propagate()); + EXPECT_BOUNDS_EQ(target_var, 1, 7); +} + +TEST(GreaterThanAtLeastOneOfPropagatorTest, BasicTest) { + for (int i = 0; i < 2; ++i) { + Model model; + + // We create a simple model with 3 variables and 2 conditional precedences. + // We only add the GreaterThanAtLeastOneOfPropagator() for i == 1. + const IntegerVariable a = model.Add(NewIntegerVariable(0, 3)); + const IntegerVariable b = model.Add(NewIntegerVariable(0, 3)); + const IntegerVariable c = model.Add(NewIntegerVariable(0, 3)); + const Literal ac = Literal(model.Add(NewBooleanVariable()), true); + const Literal bc = Literal(model.Add(NewBooleanVariable()), true); + model.Add(ConditionalLowerOrEqualWithOffset(a, c, 3, ac)); + model.Add(ConditionalLowerOrEqualWithOffset(b, c, 2, bc)); + model.Add(ClauseConstraint({ac, bc})); + if (i == 1) { + model.Add(GreaterThanAtLeastOneOf( + c, {a, b}, {IntegerValue(3), IntegerValue(2)}, {ac, bc}, {})); + } + + // Test that we do propagate more with the extra propagator. + EXPECT_TRUE(model.GetOrCreate()->Propagate()); + EXPECT_EQ(model.Get(LowerBound(c)), i == 0 ? 0 : 2); + + // Test that we find all solutions. + int num_solutions = 0; + while (true) { + const auto status = SolveIntegerProblemWithLazyEncoding(&model); + if (status != SatSolver::Status::FEASIBLE) break; + ++num_solutions; + VLOG(1) << model.Get(Value(a)) << " " << model.Get(Value(b)) << " " + << model.Get(Value(c)); + model.Add(ExcludeCurrentSolutionAndBacktrack()); + } + EXPECT_EQ(num_solutions, 18); + } +} + +#undef EXPECT_BOUNDS_EQ + +} // namespace +} // namespace sat +} // namespace operations_research diff --git a/ortools/sat/cumulative_energy_test.cc b/ortools/sat/cumulative_energy_test.cc new file mode 100644 index 0000000000..8e58b53a28 --- /dev/null +++ b/ortools/sat/cumulative_energy_test.cc @@ -0,0 +1,562 @@ +// Copyright 2010-2024 Google LLC +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "ortools/sat/cumulative_energy.h" + +#include + +#include +#include +#include +#include +#include + +#include "absl/log/check.h" +#include "absl/random/bit_gen_ref.h" +#include "absl/random/distributions.h" +#include "absl/random/random.h" +#include "absl/strings/str_cat.h" +#include "absl/types/span.h" +#include "gtest/gtest.h" +#include "ortools/base/logging.h" +#include "ortools/sat/2d_orthogonal_packing_testing.h" +#include "ortools/sat/cp_model_solver.h" +#include "ortools/sat/cumulative.h" +#include "ortools/sat/diffn_util.h" +#include "ortools/sat/integer.h" +#include "ortools/sat/integer_search.h" +#include "ortools/sat/intervals.h" +#include "ortools/sat/linear_constraint.h" +#include "ortools/sat/model.h" +#include "ortools/sat/precedences.h" +#include "ortools/sat/sat_base.h" +#include "ortools/sat/sat_parameters.pb.h" +#include "ortools/sat/sat_solver.h" +#include "ortools/util/strong_integers.h" + +namespace operations_research { +namespace sat { +namespace { + +// An instance is a set of energy tasks and a capacity. +struct EnergyTask { + int start_min; + int end_max; + int energy_min; + int energy_max; + int duration_min; + int duration_max; + bool is_optional; +}; + +struct EnergyInstance { + std::vector tasks; + int capacity; +}; + +std::string InstanceDebugString(const EnergyInstance& instance) { + std::string result; + absl::StrAppend(&result, "Instance capacity:", instance.capacity, "\n"); + for (const EnergyTask& task : instance.tasks) { + absl::StrAppend(&result, "[", task.start_min, ", ", task.end_max, + "] duration:", task.duration_min, "..", task.duration_max, + " energy:", task.energy_min, "..", task.energy_max, + " is_optional:", task.is_optional, "\n"); + } + return result; +} + +// Satisfiability using the constraint. +bool SolveUsingConstraint(const EnergyInstance& instance) { + Model model; + std::vector intervals; + std::vector energies; + for (const auto& task : instance.tasks) { + LinearExpression energy; + energy.vars.push_back( + model.Add(NewIntegerVariable(task.energy_min, task.energy_max))); + energy.coeffs.push_back(IntegerValue(1)); + energies.push_back(energy); + if (task.is_optional) { + const Literal is_present = Literal(model.Add(NewBooleanVariable()), true); + const IntegerVariable start = + model.Add(NewIntegerVariable(task.start_min, task.end_max)); + const IntegerVariable end = + model.Add(NewIntegerVariable(task.start_min, task.end_max)); + const IntegerVariable duration = + model.Add(NewIntegerVariable(task.duration_min, task.duration_max)); + intervals.push_back( + model.Add(NewOptionalInterval(start, end, duration, is_present))); + } else { + intervals.push_back(model.Add(NewIntervalWithVariableSize( + task.start_min, task.end_max, task.duration_min, task.duration_max))); + } + } + + const AffineExpression capacity( + model.Add(ConstantIntegerVariable(instance.capacity))); + + SchedulingConstraintHelper* helper = + new SchedulingConstraintHelper(intervals, &model); + model.TakeOwnership(helper); + SchedulingDemandHelper* demands_helper = + new SchedulingDemandHelper({}, helper, &model); + demands_helper->OverrideLinearizedEnergies(energies); + model.TakeOwnership(demands_helper); + + AddCumulativeOverloadChecker(capacity, helper, demands_helper, &model); + + return SolveIntegerProblemWithLazyEncoding(&model) == + SatSolver::Status::FEASIBLE; +} + +// One task by itself is infeasible. +TEST(CumulativeEnergyTest, UnfeasibleFixedCharacteristics) { + EnergyInstance instance = {{{0, 100, 11, 11, 2, 2, false}}, 5}; + EXPECT_FALSE(SolveUsingConstraint(instance)) << InstanceDebugString(instance); +} + +// Tasks are feasible iff all are at energy min. +TEST(CumulativeEnergyTest, FeasibleEnergyMin) { + EnergyInstance instance = {{ + {-10, 10, 10, 15, 0, 20, false}, + {-10, 10, 15, 20, 0, 20, false}, + {-10, 10, 5, 10, 0, 20, false}, + }, + 3}; + EXPECT_TRUE(SolveUsingConstraint(instance)) << InstanceDebugString(instance); +} + +// Tasks are feasible iff optionals tasks are removed. +TEST(CumulativeEnergyTest, FeasibleRemoveOptionals) { + EnergyInstance instance = {{ + {-10, 10, 1, 1, 1, 1, true}, + {-10, 10, 5, 10, 7, 7, true}, + {-10, 10, 10, 15, 0, 20, false}, + {-10, 10, 15, 20, 0, 20, false}, + {-10, 10, 5, 10, 0, 20, false}, + }, + 3}; + EXPECT_TRUE(SolveUsingConstraint(instance)) << InstanceDebugString(instance); +} + +// This instance was problematic. +TEST(CumulativeEnergyTest, Problematic1) { + EnergyInstance instance = {{ + {2, 18, 6, 7, 5, 10, false}, + {2, 25, 6, 9, 14, 17, false}, + {-4, 19, 6, 9, 10, 20, false}, + {-9, 7, 6, 15, 9, 16, false}, + {-1, 19, 6, 12, 6, 14, false}, + }, + 1}; + EXPECT_TRUE(SolveUsingConstraint(instance)) << InstanceDebugString(instance); +} + +// Satisfiability using a naive model: one task per unit of energy. +// Force energy-based reasoning in Cumulative() and add symmetry breaking, +// or the solver has a much harder time. +bool SolveUsingNaiveModel(const EnergyInstance& instance) { + Model model; + std::vector intervals; + std::vector consumptions; + IntegerVariable one = model.Add(ConstantIntegerVariable(1)); + IntervalsRepository* intervals_repository = + model.GetOrCreate(); + + for (const auto& task : instance.tasks) { + if (task.is_optional) { + const Literal is_present = Literal(model.Add(NewBooleanVariable()), true); + for (int i = 0; i < task.energy_min; i++) { + const IntegerVariable start = + model.Add(NewIntegerVariable(task.start_min, task.end_max)); + const IntegerVariable end = + model.Add(NewIntegerVariable(task.start_min, task.end_max)); + + intervals.push_back( + model.Add(NewOptionalInterval(start, end, one, is_present))); + consumptions.push_back(AffineExpression(IntegerValue(1))); + } + } else { + IntegerVariable first_start = kNoIntegerVariable; + IntegerVariable previous_start = kNoIntegerVariable; + for (int i = 0; i < task.energy_min; i++) { + IntervalVariable interval = + model.Add(NewInterval(task.start_min, task.end_max, 1)); + intervals.push_back(interval); + consumptions.push_back(AffineExpression(IntegerValue(1))); + const AffineExpression start_expr = + intervals_repository->Start(interval); + CHECK_EQ(start_expr.coeff, 1); + CHECK_EQ(start_expr.constant, 0); + CHECK_NE(start_expr.var, kNoIntegerVariable); + const IntegerVariable start = start_expr.var; + if (previous_start != kNoIntegerVariable) { + model.Add(LowerOrEqual(previous_start, start)); + } else { + first_start = start; + } + previous_start = start; + } + // start[last] <= start[0] + duration_max - 1 + if (previous_start != kNoIntegerVariable) { + model.Add(LowerOrEqualWithOffset(previous_start, first_start, + -task.duration_max + 1)); + } + } + } + + SatParameters params = + model.Add(NewSatParameters("use_overload_checker_in_cumulative:true")); + model.Add(Cumulative(intervals, consumptions, + AffineExpression(IntegerValue(instance.capacity)))); + + return SolveIntegerProblemWithLazyEncoding(&model) == + SatSolver::Status::FEASIBLE; +} + +// Generates random instances, fill the schedule to try and make a tricky case. +EnergyInstance GenerateRandomInstance(int num_tasks, + absl::BitGenRef randomizer) { + const int capacity = absl::Uniform(randomizer, 1, 12); + std::vector tasks; + for (int i = 0; i < num_tasks; i++) { + int start_min = absl::Uniform(randomizer, -10, 10); + int duration_min = absl::Uniform(randomizer, 1, 21); + int duration_max = absl::Uniform(randomizer, 1, 21); + if (duration_min > duration_max) std::swap(duration_min, duration_max); + int end_max = start_min + duration_max + absl::Uniform(randomizer, 0, 10); + int energy_min = (capacity * 30) / num_tasks; + int energy_max = energy_min + absl::Uniform(randomizer, 1, 10); + tasks.push_back({start_min, end_max, energy_min, energy_max, duration_min, + duration_max, false}); + } + + return {tasks, capacity}; +} + +// Compare constraint to naive model. +TEST(CumulativeEnergyTest, CompareToNaiveModel) { + const int num_tests = 10; + std::mt19937 randomizer(12345); + for (int test = 0; test < num_tests; test++) { + EnergyInstance instance = + GenerateRandomInstance(absl::Uniform(randomizer, 4, 7), randomizer); + bool result_constraint = SolveUsingConstraint(instance); + bool result_naive = SolveUsingNaiveModel(instance); + EXPECT_EQ(result_naive, result_constraint) << InstanceDebugString(instance); + LOG(INFO) << result_constraint; + } +} + +struct CumulativeTasks { + int64_t duration; + int64_t demand; + int64_t min_start; + int64_t max_end; +}; + +enum class PropagatorChoice { + OVERLOAD, + OVERLOAD_DFF, +}; +bool TestOverloadCheckerPropagation( + absl::Span tasks, int capacity_min_before, + int capacity_min_after, int capacity_max, + PropagatorChoice propagator_choice = PropagatorChoice::OVERLOAD) { + Model model; + IntegerTrail* integer_trail = model.GetOrCreate(); + PrecedencesPropagator* precedences = + model.GetOrCreate(); + + const int num_tasks = tasks.size(); + std::vector interval_vars(num_tasks); + std::vector demands(num_tasks); + const AffineExpression capacity = + AffineExpression(integer_trail->AddIntegerVariable( + IntegerValue(capacity_min_before), IntegerValue(capacity_max))); + + // Build the task variables. + for (int t = 0; t < num_tasks; ++t) { + interval_vars[t] = model.Add( + NewInterval(tasks[t].min_start, tasks[t].max_end, tasks[t].duration)); + demands[t] = AffineExpression(IntegerValue(tasks[t].demand)); + } + + // Propagate properly the other bounds of the intervals. + EXPECT_TRUE(precedences->Propagate()); + + // Propagator responsible for filtering the capacity variable. + SchedulingConstraintHelper* helper = + new SchedulingConstraintHelper(interval_vars, &model); + model.TakeOwnership(helper); + SchedulingDemandHelper* demands_helper = + new SchedulingDemandHelper(demands, helper, &model); + model.TakeOwnership(demands_helper); + + if (propagator_choice == PropagatorChoice::OVERLOAD) { + AddCumulativeOverloadChecker(capacity, helper, demands_helper, &model); + } else if (propagator_choice == PropagatorChoice::OVERLOAD_DFF) { + AddCumulativeOverloadCheckerDff(capacity, helper, demands_helper, &model); + } else { + LOG(FATAL) << "Unknown propagator choice!"; + } + + // Check initial satisfiability. + auto* sat_solver = model.GetOrCreate(); + if (!sat_solver->Propagate()) return false; + + // Check capacity. + EXPECT_EQ(capacity_min_after, integer_trail->LowerBound(capacity)); + return true; +} + +// This is a trivially infeasible instance. +TEST(OverloadCheckerTest, UNSAT1) { + EXPECT_FALSE( + TestOverloadCheckerPropagation({{4, 2, 0, 7}, {4, 2, 0, 7}}, 2, 2, 2)); +} + +// This is an infeasible instance on which timetabling finds nothing. The +// overload checker finds the contradiction. +TEST(OverloadCheckerTest, UNSAT2) { + EXPECT_FALSE(TestOverloadCheckerPropagation( + {{4, 2, 0, 8}, {4, 2, 0, 8}, {4, 2, 0, 8}}, 2, 2, 2)); +} + +// This is the same instance as in UNSAT1 but here the capacity can increase. +TEST(OverloadCheckerTest, IncreaseCapa1) { + EXPECT_TRUE( + TestOverloadCheckerPropagation({{4, 2, 2, 9}, {4, 2, 2, 9}}, 2, 3, 10)); +} + +// This is an instance in which tasks can be perfectly packed in a rectangle of +// size 5 to 6. OverloadChecker increases the capacity from 3 to 5. +TEST(OverloadCheckerTest, IncreaseCapa2) { + EXPECT_TRUE(TestOverloadCheckerPropagation({{5, 2, 2, 8}, + {2, 3, 2, 8}, + {2, 1, 2, 8}, + {1, 3, 2, 8}, + {1, 3, 2, 8}, + {3, 2, 2, 8}}, + 3, 5, 10)); +} + +// This is an instance in which OverloadChecker increases the capacity. +TEST(OverloadCheckerTest, IncreaseCapa3) { + EXPECT_TRUE(TestOverloadCheckerPropagation( + {{1, 3, 3, 6}, {1, 3, 3, 6}, {1, 1, 3, 8}}, 0, 2, 10)); +} + +// This is a trivially infeasible instance with negative times. +TEST(OverloadCheckerTest, UNSATNeg1) { + EXPECT_FALSE( + TestOverloadCheckerPropagation({{4, 2, -7, 0}, {4, 2, -7, 0}}, 2, 2, 2)); +} + +// This is an infeasible instance with negative times on which timetabling finds +// nothing. The overload checker finds the contradiction. +TEST(OverloadCheckerTest, UNSATNeg2) { + EXPECT_FALSE(TestOverloadCheckerPropagation( + {{4, 2, -4, 4}, {4, 2, -4, 4}, {4, 2, -4, 4}}, 2, 2, 2)); +} + +// This is the same instance as in UNSATNeg1 but here the capacity can increase. +TEST(OverloadCheckerTest, IncreaseCapaNeg1) { + EXPECT_TRUE(TestOverloadCheckerPropagation({{4, 2, -10, -3}, {4, 2, -10, -3}}, + 2, 3, 10)); +} + +// This is an instance with negative times in which tasks can be perfectly +// packed in a rectangle of size 5 to 6. OverloadChecker increases the capacity +// from 3 to 5. +TEST(OverloadCheckerTest, IncreaseCapaNeg2) { + EXPECT_TRUE(TestOverloadCheckerPropagation({{5, 2, -2, 4}, + {2, 3, -2, 4}, + {2, 1, -2, 4}, + {1, 3, -2, 4}, + {1, 3, -2, 4}, + {3, 2, -2, 4}}, + 3, 5, 10)); +} + +// This is an instance with negative times in which OverloadChecker increases +// the capacity. +TEST(OverloadCheckerTest, IncreaseCapaNeg3) { + EXPECT_TRUE(TestOverloadCheckerPropagation( + {{1, 3, -3, 0}, {1, 3, -3, 0}, {1, 1, -3, 2}}, 0, 2, 10)); +} + +TEST(OverloadCheckerTest, OptionalTaskPropagatedToAbsent) { + Model model; + const Literal is_present = Literal(model.Add(NewBooleanVariable()), true); + + // TODO(user): Fix the code! the propagation is dependent on the order of + // tasks. If we use the proper theta-lambda tree, this will be fixed. + const IntervalVariable i2 = model.Add(NewInterval(0, 10, /*size=*/8)); + const IntervalVariable i1 = + model.Add(NewOptionalInterval(0, 10, /*size=*/8, is_present)); + + SchedulingConstraintHelper* helper = + new SchedulingConstraintHelper({i1, i2}, &model); + model.TakeOwnership(helper); + const AffineExpression cte(IntegerValue(2)); + SchedulingDemandHelper* demands_helper = + new SchedulingDemandHelper({cte, cte}, helper, &model); + model.TakeOwnership(demands_helper); + + AddCumulativeOverloadChecker(cte, helper, demands_helper, &model); + EXPECT_TRUE(model.GetOrCreate()->Propagate()); + EXPECT_FALSE(model.Get(Value(is_present))); +} + +TEST(OverloadCheckerTest, OptionalTaskMissedPropagationCase) { + Model model; + const Literal is_present = Literal(model.Add(NewBooleanVariable()), true); + const IntervalVariable i1 = + model.Add(NewOptionalInterval(0, 10, /*size=*/8, is_present)); + const IntervalVariable i2 = + model.Add(NewOptionalInterval(0, 10, /*size=*/8, is_present)); + + SchedulingConstraintHelper* helper = + new SchedulingConstraintHelper({i1, i2}, &model); + model.TakeOwnership(helper); + const AffineExpression cte(IntegerValue(2)); + SchedulingDemandHelper* demands_helper = + new SchedulingDemandHelper({cte, cte}, helper, &model); + model.TakeOwnership(demands_helper); + + AddCumulativeOverloadChecker(cte, helper, demands_helper, &model); + EXPECT_TRUE(model.GetOrCreate()->Propagate()); + EXPECT_FALSE(model.GetOrCreate()->Assignment().VariableIsAssigned( + is_present.Variable())); +} + +TEST(OverloadCheckerDffTest, DffIsNeeded) { + const std::vector tasks = { + {.duration = 10, .demand = 5, .min_start = 0, .max_end = 22}, + {.duration = 10, .demand = 5, .min_start = 0, .max_end = 22}, + {.duration = 10, .demand = 5, .min_start = 0, .max_end = 22}, + {.duration = 10, .demand = 5, .min_start = 0, .max_end = 22}, + }; + EXPECT_FALSE(TestOverloadCheckerPropagation(tasks, /*capacity_min_before=*/9, + /*capacity_min_after=*/9, + /*capacity_max=*/9, + PropagatorChoice::OVERLOAD_DFF)); +} + +TEST(OverloadCheckerDffTest, NoConflictRandomFeasibleProblem) { + absl::BitGen random; + for (int i = 0; i < 100; ++i) { + const std::vector rectangles = GenerateNonConflictingRectangles( + absl::Uniform(random, 6, 20), random); + Rectangle bounding_box; + for (const auto& item : rectangles) { + bounding_box.x_min = std::min(bounding_box.x_min, item.x_min); + bounding_box.x_max = std::max(bounding_box.x_max, item.x_max); + bounding_box.y_min = std::min(bounding_box.y_min, item.y_min); + bounding_box.y_max = std::max(bounding_box.y_max, item.y_max); + } + const std::vector range_items = + MakeItemsFromRectangles(rectangles, 0.3, random); + std::vector tasks(range_items.size()); + + for (int i = 0; i < range_items.size(); ++i) { + tasks[i] = {.duration = range_items[i].x_size.value(), + .demand = range_items[i].y_size.value(), + .min_start = range_items[i].bounding_area.x_min.value(), + .max_end = range_items[i].bounding_area.x_max.value()}; + } + EXPECT_TRUE(TestOverloadCheckerPropagation( + tasks, /*capacity_min_before=*/bounding_box.SizeY().value(), + /*capacity_min_after=*/bounding_box.SizeY().value(), + /*capacity_max=*/bounding_box.SizeY().value(), + PropagatorChoice::OVERLOAD_DFF)); + } +} + +bool TestIsAfterCumulative(absl::Span tasks, + int capacity_max, int expected_end_min) { + Model model; + IntegerTrail* integer_trail = model.GetOrCreate(); + PrecedencesPropagator* precedences = + model.GetOrCreate(); + + const int num_tasks = tasks.size(); + std::vector interval_vars(num_tasks); + std::vector demands(num_tasks); + const AffineExpression capacity = + AffineExpression(integer_trail->AddIntegerVariable( + IntegerValue(capacity_max), IntegerValue(capacity_max))); + + // Build the task variables. + std::vector subtasks; + for (int t = 0; t < num_tasks; ++t) { + interval_vars[t] = model.Add( + NewInterval(tasks[t].min_start, tasks[t].max_end, tasks[t].duration)); + demands[t] = AffineExpression(IntegerValue(tasks[t].demand)); + subtasks.push_back(t); + } + + // Propagate properly the other bounds of the intervals. + EXPECT_TRUE(precedences->Propagate()); + + // Propagator responsible for filtering the capacity variable. + SchedulingConstraintHelper* helper = + new SchedulingConstraintHelper(interval_vars, &model); + model.TakeOwnership(helper); + SchedulingDemandHelper* demands_helper = + new SchedulingDemandHelper(demands, helper, &model); + model.TakeOwnership(demands_helper); + + const IntegerVariable var = + integer_trail->AddIntegerVariable(IntegerValue(0), IntegerValue(100)); + + std::vector offsets(subtasks.size(), IntegerValue(0)); + CumulativeIsAfterSubsetConstraint* propag = + new CumulativeIsAfterSubsetConstraint(var, capacity, subtasks, offsets, + helper, demands_helper, &model); + propag->RegisterWith(model.GetOrCreate()); + model.TakeOwnership(propag); + + // Check initial satisfiability. + auto* sat_solver = model.GetOrCreate(); + if (!sat_solver->Propagate()) return false; + + // Check bound + EXPECT_EQ(expected_end_min, integer_trail->LowerBound(var)); + return true; +} + +// We detect that the interval cannot overlap. +TEST(IsAfterCumulativeTest, BasicCase1) { + // duration, demand, start_min, end_max + EXPECT_TRUE(TestIsAfterCumulative({{4, 2, 0, 8}, {4, 2, 0, 10}}, + /*capacity_max=*/3, + /*expected_end_min=*/8)); +} + +// Now, one interval can overlap. It is also after the other, so the best bound +// we get is not that great: energy = 2 + 8 + 8 = 18, with capa = 3, we get 6. +// +// TODO(user): Maybe we can do more advanced reasoning to recover the 8 here. +TEST(IsAfterCumulativeTest, BasicCase2) { + // duration, demand, start_min, end_max. + EXPECT_TRUE(TestIsAfterCumulative({{2, 1, 3, 8}, {4, 2, 0, 8}, {4, 2, 0, 10}}, + /*capacity_max=*/3, + /*expected_end_min=*/6)); +} + +} // namespace +} // namespace sat +} // namespace operations_research diff --git a/ortools/sat/cumulative_test.cc b/ortools/sat/cumulative_test.cc new file mode 100644 index 0000000000..e14f2c63ce --- /dev/null +++ b/ortools/sat/cumulative_test.cc @@ -0,0 +1,421 @@ +// Copyright 2010-2024 Google LLC +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "ortools/sat/cumulative.h" + +#include +#include +#include +#include +#include + +#include "absl/log/check.h" +#include "absl/random/random.h" +#include "absl/strings/str_cat.h" +#include "absl/strings/str_join.h" +#include "gtest/gtest.h" +#include "ortools/base/logging.h" +#include "ortools/sat/integer.h" +#include "ortools/sat/integer_search.h" +#include "ortools/sat/intervals.h" +#include "ortools/sat/model.h" +#include "ortools/sat/sat_base.h" +#include "ortools/sat/sat_parameters.pb.h" +#include "ortools/sat/sat_solver.h" +#include "ortools/util/strong_integers.h" + +namespace operations_research { +namespace sat { +namespace { + +// RcpspInstance contains the data to define an instance of the Resource +// Constrained Project Scheduling Problem (RCPSP). We only consider a restricted +// variant of the RCPSP which is the problem of scheduling a set of +// non-premptive tasks that consume a given quantity of a resource without +// exceeding the resource's capacity. We assume that the duration of a task, its +// demand, and the resource capacity are fixed. +struct RcpspInstance { + RcpspInstance() : capacity(0), min_start(0), max_end(0) {} + std::vector durations; + std::vector optional; + std::vector demands; + int64_t capacity; + int64_t min_start; + int64_t max_end; + std::string DebugString() const { + std::string result = "RcpspInstance {\n"; + result += " demands: {" + absl::StrJoin(demands, ", ") + "}\n"; + result += " durations: {" + absl::StrJoin(durations, ", ") + "}\n"; + result += " optional: {" + absl::StrJoin(optional, ", ") + "}\n"; + result += " min_start: " + absl::StrCat(min_start) + "\n"; + result += " max_end: " + absl::StrCat(max_end) + "\n"; + result += " capacity: " + absl::StrCat(capacity) + "\n}"; + return result; + } +}; + +// Generates a random RcpspInstance with num_tasks tasks such that: +// - the duration of a task is a fixed random number in +// [min_duration, max_durations]; +// - tasks can be optional if enable_optional is true; +// - the demand of a task is a fixed random number in [min_demand, max_demand]; +// - the resource capacity is a fixed random number in +// [max_demand - 1, max_capacity]. This allows the capacity to be lower than +// the highest demand to generate trivially unfeasible instances. +// - the energy (i.e. surface) of the resource is 120% of the total energy of +// the tasks. This allows the generation of infeasible instances. +RcpspInstance GenerateRandomInstance(int num_tasks, int min_duration, + int max_duration, int min_demand, + int max_demand, int max_capacity, + int min_start, bool enable_optional) { + absl::BitGen random; + RcpspInstance instance; + int energy = 0; + + // Generate task demands and durations. + int max_of_all_durations = 0; + for (int t = 0; t < num_tasks; ++t) { + const int duration = absl::Uniform(random, min_duration, max_duration + 1); + const int demand = absl::Uniform(random, min_demand, max_demand + 1); + energy += duration * demand; + max_of_all_durations = std::max(max_of_all_durations, duration); + instance.durations.push_back(duration); + instance.demands.push_back(demand); + instance.optional.push_back(enable_optional && + absl::Bernoulli(random, 0.5)); + } + + // Generate the resource capacity. + instance.capacity = absl::Uniform(random, max_demand, max_capacity + 1); + + // Generate the time window. + instance.min_start = min_start; + instance.max_end = + min_start + + std::max(static_cast(std::round(energy * 1.2 / instance.capacity)), + max_of_all_durations); + return instance; +} + +template +int CountAllSolutions(const RcpspInstance& instance, SatParameters parameters, + const Cumulative& cumulative) { + Model model; + parameters.set_use_disjunctive_constraint_in_cumulative(false); + model.GetOrCreate()->SetParameters(parameters); + + DCHECK_EQ(instance.demands.size(), instance.durations.size()); + DCHECK_LE(instance.min_start, instance.max_end); + + const int num_tasks = instance.demands.size(); + std::vector intervals(num_tasks); + std::vector demands(num_tasks); + const AffineExpression capacity = IntegerValue(instance.capacity); + + for (int t = 0; t < num_tasks; ++t) { + if (instance.optional[t]) { + const Literal is_present = Literal(model.Add(NewBooleanVariable()), true); + intervals[t] = + model.Add(NewOptionalInterval(instance.min_start, instance.max_end, + instance.durations[t], is_present)); + } else { + intervals[t] = model.Add(NewInterval(instance.min_start, instance.max_end, + instance.durations[t])); + } + demands[t] = IntegerValue(instance.demands[t]); + } + + model.Add(cumulative(intervals, demands, capacity, nullptr)); + + // Make sure that every Boolean variable is considered as a decision variable + // to be fixed. + if (parameters.search_branching() == SatParameters::FIXED_SEARCH) { + SatSolver* sat_solver = model.GetOrCreate(); + for (int i = 0; i < sat_solver->NumVariables(); ++i) { + model.Add( + NewIntegerVariableFromLiteral(Literal(BooleanVariable(i), true))); + } + } + + int num_solutions_found = 0; + // Loop until there is no remaining solution to find. + while (true) { + // Try to find a solution. + const SatSolver::Status status = + SolveIntegerProblemWithLazyEncoding(&model); + // Leave the loop if there is no solution left. + if (status != SatSolver::Status::FEASIBLE) break; + num_solutions_found++; + model.Add(ExcludeCurrentSolutionAndBacktrack()); + } + + return num_solutions_found; +} + +TEST(CumulativeTimeDecompositionTest, AllPermutations) { + RcpspInstance instance; + instance.demands = {1, 1, 1, 1, 1}; + instance.durations = {1, 1, 1, 1, 1}; + instance.optional = {false, false, false, false, false}; + instance.capacity = 1; + instance.min_start = 0; + instance.max_end = 5; + ASSERT_EQ(120, CountAllSolutions(instance, {}, CumulativeTimeDecomposition)); +} + +TEST(CumulativeTimeDecompositionTest, FindAll) { + RcpspInstance instance; + instance.demands = {1, 1, 1, 1, 4, 4}; + instance.durations = {1, 2, 3, 3, 3, 3}; + instance.optional = {false, false, false, false, false, false}; + instance.capacity = 4; + instance.min_start = 0; + instance.max_end = 11; + ASSERT_EQ(2040, CountAllSolutions(instance, {}, CumulativeTimeDecomposition)); + ASSERT_EQ(2040, CountAllSolutions(instance, {}, CumulativeUsingReservoir)); +} + +TEST(CumulativeTimeDecompositionTest, OptionalTasks1) { + RcpspInstance instance; + instance.demands = {3, 3, 3}; + instance.durations = {1, 1, 1}; + instance.optional = {true, true, true}; + instance.capacity = 7; + instance.min_start = 0; + instance.max_end = 2; + ASSERT_EQ(25, CountAllSolutions(instance, {}, Cumulative)); + ASSERT_EQ(25, CountAllSolutions(instance, {}, CumulativeUsingReservoir)); +} + +// Up to two tasks can be scheduled at the same time. +TEST(CumulativeTimeDecompositionTest, OptionalTasks2) { + RcpspInstance instance; + instance.demands = {3, 3, 3}; + instance.durations = {3, 3, 3}; + instance.optional = {true, true, true}; + instance.capacity = 7; + instance.min_start = 0; + instance.max_end = 3; + ASSERT_EQ(7, CountAllSolutions(instance, {}, CumulativeTimeDecomposition)); + ASSERT_EQ(7, CountAllSolutions(instance, {}, CumulativeUsingReservoir)); +} + +TEST(CumulativeTimeDecompositionTest, RegressionTest1) { + RcpspInstance instance; + instance.demands = {5, 4, 1}; + instance.durations = {1, 1, 2}; + instance.optional = {false, false, false}; + instance.capacity = 5; + instance.min_start = 0; + instance.max_end = 2; + ASSERT_EQ(0, CountAllSolutions(instance, {}, CumulativeTimeDecomposition)); +} + +// Cumulative was pruning too many solutions on that instance. +TEST(CumulativeTimeDecompositionTest, RegressionTest2) { + SatParameters parameters; + parameters.set_use_overload_checker_in_cumulative(false); + parameters.set_use_timetable_edge_finding_in_cumulative(false); + RcpspInstance instance; + instance.demands = {4, 4, 3}; + instance.durations = {2, 2, 3}; + instance.optional = {true, true, true}; + instance.capacity = 6; + instance.min_start = 0; + instance.max_end = 5; + ASSERT_EQ( + 22, CountAllSolutions(instance, parameters, CumulativeTimeDecomposition)); +} + +bool CheckCumulative(const SatParameters& parameters, + const RcpspInstance& instance) { + const int64_t num_solutions_ref = + CountAllSolutions(instance, parameters, CumulativeTimeDecomposition); + const int64_t num_solutions_test = + CountAllSolutions(instance, parameters, Cumulative); + if (num_solutions_ref != num_solutions_test) { + LOG(INFO) << "Want: " << num_solutions_ref + << " solutions, got: " << num_solutions_test << " solutions."; + LOG(INFO) << instance.DebugString(); + return false; + } + const int64_t num_solutions_reservoir = + CountAllSolutions(instance, parameters, CumulativeUsingReservoir); + if (num_solutions_ref != num_solutions_reservoir) { + LOG(INFO) << "Want: " << num_solutions_ref + << " solutions, got: " << num_solutions_reservoir + << " solutions."; + LOG(INFO) << instance.DebugString(); + return false; + } + return true; +} + +// Checks that the cumulative constraint performs trivial propagation by +// updating the capacity and demand variables. +TEST(CumulativeTest, CapacityAndDemand) { + Model model; + SatSolver* sat_solver = model.GetOrCreate(); + const IntervalVariable interval = model.Add(NewInterval(-1000, 1000, 1)); + const IntegerVariable demand = model.Add(NewIntegerVariable(5, 15)); + const IntegerVariable capacity = model.Add(NewIntegerVariable(0, 10)); + const IntegerTrail* integer_trail = model.GetOrCreate(); + model.Add(Cumulative({interval}, {AffineExpression(demand)}, + AffineExpression(capacity))); + ASSERT_TRUE(sat_solver->Propagate()); + ASSERT_EQ(integer_trail->LowerBound(capacity), 5); + ASSERT_EQ(integer_trail->UpperBound(capacity), 10); + ASSERT_EQ(integer_trail->LowerBound(demand), 5); + ASSERT_EQ(integer_trail->UpperBound(demand), 10); +} + +// Checks that the cumulative constraint adpats the demand of the task to +// prevent the capacity overload. +TEST(CumulativeTest, CapacityAndZeroDemand) { + Model model; + SatSolver* sat_solver = model.GetOrCreate(); + const IntegerVariable start = model.Add(NewIntegerVariable(-1000, 1000)); + const IntegerVariable size = model.Add(NewIntegerVariable(0, 10)); + const IntegerVariable end = model.Add(NewIntegerVariable(-1000, 1000)); + const IntervalVariable interval = model.Add(NewInterval(start, end, size)); + const IntegerVariable demand = model.Add(NewIntegerVariable(11, 15)); + const IntegerVariable capacity = model.Add(NewIntegerVariable(0, 10)); + const IntegerTrail* integer_trail = model.GetOrCreate(); + model.Add(Cumulative({interval}, {AffineExpression(demand)}, + AffineExpression(capacity))); + ASSERT_TRUE(sat_solver->Propagate()); + ASSERT_EQ(integer_trail->LowerBound(capacity), 0); + ASSERT_EQ(integer_trail->UpperBound(capacity), 10); + ASSERT_EQ(integer_trail->LowerBound(demand), 11); + ASSERT_EQ(integer_trail->UpperBound(demand), 15); + ASSERT_EQ(integer_trail->UpperBound(size), 0); +} + +// Checks that the cumulative constraint removes the task to prevent the +// capacity overload. +TEST(CumulativeTest, CapacityAndOptionalTask) { + Model model; + SatSolver* sat_solver = model.GetOrCreate(); + const Literal l = Literal(model.Add(NewBooleanVariable()), true); + const IntervalVariable interval = + model.Add(NewOptionalInterval(-1000, 1000, 1, l)); + const IntegerVariable demand = model.Add(ConstantIntegerVariable(15)); + const IntegerVariable capacity = model.Add(ConstantIntegerVariable(10)); + model.Add(Cumulative({interval}, {AffineExpression(demand)}, + AffineExpression(capacity))); + ASSERT_TRUE(sat_solver->Propagate()); + ASSERT_FALSE(model.Get(Value(l))); +} + +// Cumulative was pruning too many solutions on that instance. +TEST(CumulativeTest, RegressionTest1) { + SatParameters parameters; + parameters.set_use_overload_checker_in_cumulative(false); + parameters.set_use_timetable_edge_finding_in_cumulative(false); + RcpspInstance instance; + instance.demands = {4, 4, 3}; + instance.durations = {2, 2, 3}; + instance.optional = {true, true, true}; + instance.capacity = 6; + instance.min_start = 0; + instance.max_end = 5; + ASSERT_EQ(22, CountAllSolutions(instance, parameters, Cumulative)); +} + +// Cumulative was pruning too many solutions on that instance. +TEST(CumulativeTest, RegressionTest2) { + SatParameters parameters; + parameters.set_use_overload_checker_in_cumulative(false); + parameters.set_use_timetable_edge_finding_in_cumulative(false); + RcpspInstance instance; + instance.demands = {5, 4}; + instance.durations = {4, 4}; + instance.optional = {true, true}; + instance.capacity = 6; + instance.min_start = 0; + instance.max_end = 7; + ASSERT_EQ(9, CountAllSolutions(instance, parameters, Cumulative)); +} + +// ======================================================================== +// All the test belows check that the cumulative propagator finds the exact +// same number of solutions than its time point decomposition. +// ======================================================================== + +// Param1: Number of tasks. +// Param3: Enable overload checking. +// Param4: Enable timetable edge finding. +typedef ::testing::tuple CumulativeTestParams; + +class RandomCumulativeTest + : public ::testing::TestWithParam { + protected: + int GetNumTasks() { return ::testing::get<0>(GetParam()); } + + SatParameters GetSatParameters() { + SatParameters parameters; + parameters.set_use_disjunctive_constraint_in_cumulative(false); + parameters.set_use_overload_checker_in_cumulative( + ::testing::get<1>(GetParam())); + parameters.set_use_timetable_edge_finding_in_cumulative( + ::testing::get<2>(GetParam())); + return parameters; + } +}; + +class FastRandomCumulativeTest : public RandomCumulativeTest {}; +class SlowRandomCumulativeTest : public RandomCumulativeTest {}; + +TEST_P(FastRandomCumulativeTest, FindAll) { + ASSERT_TRUE(CheckCumulative( + GetSatParameters(), + GenerateRandomInstance(GetNumTasks(), 1, 4, 1, 5, 7, 0, false))); +} + +TEST_P(FastRandomCumulativeTest, FindAllNegativeTime) { + ASSERT_TRUE(CheckCumulative( + GetSatParameters(), + GenerateRandomInstance(GetNumTasks(), 1, 4, 1, 5, 7, -100, false))); +} + +TEST_P(SlowRandomCumulativeTest, FindAllZeroDuration) { + ASSERT_TRUE(CheckCumulative( + GetSatParameters(), + GenerateRandomInstance(GetNumTasks(), 0, 4, 1, 5, 7, 0, false))); +} + +TEST_P(SlowRandomCumulativeTest, FindAllZeroDemand) { + ASSERT_TRUE(CheckCumulative( + GetSatParameters(), + GenerateRandomInstance(GetNumTasks(), 1, 4, 0, 5, 7, 0, false))); +} + +TEST_P(SlowRandomCumulativeTest, FindAllOptionalTasks) { + ASSERT_TRUE(CheckCumulative( + GetSatParameters(), + GenerateRandomInstance(GetNumTasks(), 1, 4, 0, 5, 7, 0, true))); +} + +INSTANTIATE_TEST_SUITE_P( + All, FastRandomCumulativeTest, + ::testing::Combine(::testing::Range(3, DEBUG_MODE ? 4 : 6), + ::testing::Bool(), ::testing::Bool())); + +INSTANTIATE_TEST_SUITE_P( + All, SlowRandomCumulativeTest, + ::testing::Combine(::testing::Range(3, DEBUG_MODE ? 4 : 5), + ::testing::Bool(), ::testing::Bool())); + +} // namespace +} // namespace sat +} // namespace operations_research diff --git a/ortools/sat/cuts_test.cc b/ortools/sat/cuts_test.cc new file mode 100644 index 0000000000..130db4669f --- /dev/null +++ b/ortools/sat/cuts_test.cc @@ -0,0 +1,1163 @@ +// Copyright 2010-2024 Google LLC +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "ortools/sat/cuts.h" + +#include +#include +#include +#include +#include +#include +#include + +#include "absl/numeric/int128.h" +#include "absl/strings/str_cat.h" +#include "absl/types/span.h" +#include "gtest/gtest.h" +#include "ortools/base/gmock.h" +#include "ortools/base/strong_vector.h" +#include "ortools/sat/implied_bounds.h" +#include "ortools/sat/integer.h" +#include "ortools/sat/linear_constraint.h" +#include "ortools/sat/linear_constraint_manager.h" +#include "ortools/sat/model.h" +#include "ortools/sat/sat_base.h" +#include "ortools/sat/sat_parameters.pb.h" +#include "ortools/util/fp_utils.h" +#include "ortools/util/sorted_interval_list.h" +#include "ortools/util/strong_integers.h" + +namespace operations_research { +namespace sat { +namespace { + +using ::testing::EndsWith; +using ::testing::StartsWith; + +std::vector IntegerValueVector(absl::Span values) { + std::vector result; + for (const int v : values) result.push_back(IntegerValue(v)); + return result; +} + +TEST(GetSuperAdditiveRoundingFunctionTest, AllSmallValues) { + const int max_divisor = 25; + for (IntegerValue max_t(1); max_t <= 9; ++max_t) { + for (IntegerValue max_scaling(1); max_scaling <= 9; max_scaling++) { + for (IntegerValue divisor(1); divisor <= max_divisor; ++divisor) { + for (IntegerValue rhs_remainder(1); rhs_remainder < divisor; + ++rhs_remainder) { + const std::string info = absl::StrCat( + " rhs_remainder = ", rhs_remainder.value(), + " divisor = ", divisor.value(), " max_t = ", max_t.value(), + " max_scaling = ", max_scaling.value()); + const auto f = GetSuperAdditiveRoundingFunction( + rhs_remainder, divisor, + std::min(max_t, + GetFactorT(rhs_remainder, divisor, IntegerValue(100))), + max_scaling); + ASSERT_EQ(f(IntegerValue(0)), 0) << info; + ASSERT_GE(f(divisor), 1) << info; + ASSERT_LE(f(divisor), max_scaling * max_t) << info; + for (IntegerValue a(0); a < divisor; ++a) { + IntegerValue min_diff = kMaxIntegerValue; + for (IntegerValue b(1); b < divisor; ++b) { + min_diff = std::min(min_diff, f(a + b) - f(a) - f(b)); + ASSERT_GE(min_diff, 0) + << info << ", f(" << a << ")=" << f(a) << " + f(" << b + << ")=" << f(b) << " <= f(" << a + b << ")=" << f(a + b); + } + + // TODO(user): Our discretized "mir" function is not always + // maximal. Try to fix it? + if (a <= rhs_remainder || max_scaling != 2) continue; + if (rhs_remainder * max_t < divisor / 2) continue; + + // min_diff > 0 shows that our function is dominated (i.e. not + // maximal) since f(a) could be increased by 1/2. + ASSERT_EQ(min_diff, 0) + << "Not maximal at " << info << " f(" << a << ") = " << f(a) + << " min_diff:" << min_diff; + } + } + } + } + } +} + +TEST(GetSuperAdditiveStrengtheningFunction, AllSmallValues) { + for (const int64_t rhs : {13, 14}) { // test odd/even + for (int64_t min_magnitude = 1; min_magnitude <= rhs; ++min_magnitude) { + const auto f = GetSuperAdditiveStrengtheningFunction(rhs, min_magnitude); + + // Check super additivity in -[50, 50] + for (int a = -50; a <= 50; ++a) { + for (int b = -50; b <= 50; ++b) { + ASSERT_LE(f(a) + f(b), f(a + b)) + << " a=" << a << " b=" << b << " min=" << min_magnitude + << " rhs=" << rhs; + } + } + } + } +} + +TEST(GetSuperAdditiveStrengtheningMirFunction, AllSmallValues) { + for (const int64_t rhs : {13, 14}) { // test odd/even + for (int64_t scaling = 1; scaling <= rhs; ++scaling) { + const auto f = GetSuperAdditiveStrengtheningMirFunction(rhs, scaling); + + // Check super additivity in -[50, 50] + for (int a = -50; a <= 50; ++a) { + for (int b = -50; b <= 50; ++b) { + ASSERT_LE(f(a) + f(b), f(a + b)) + << " a=" << a << " b=" << b << " scaling=" << scaling + << " rhs=" << rhs; + } + } + } + } +} + +TEST(CutDataTest, ComputeViolation) { + CutData cut; + cut.rhs = 2; + cut.terms.push_back({.lp_value = 1.2, .coeff = 1}); + cut.terms.push_back({.lp_value = 0.5, .coeff = 2}); + EXPECT_COMPARABLE(cut.ComputeViolation(), 0.2, 1e-10); +} + +template +std::string GetCutString(const Helper& helper) { + LinearConstraint ct; + CutDataBuilder builder; + EXPECT_TRUE(builder.ConvertToLinearConstraint(helper.cut(), &ct)); + return ct.DebugString(); +} + +TEST(CoverCutHelperTest, SimpleExample) { + // 6x0 + 4x1 + 10x2 <= 9. + std::vector vars = {IntegerVariable(0), IntegerVariable(2), + IntegerVariable(4)}; + std::vector coeffs = IntegerValueVector({6, 4, 10}); + std::vector lbs = IntegerValueVector({0, 0, 0}); + std::vector lp_values{1.0, 0.5, 0.1}; // Tight. + + // Note(user): the ub of the last variable is not used. But the first two + // are even though only the second one is required for the validity of the + // cut. + std::vector ubs = IntegerValueVector({1, 1, 10}); + + CutData data; + data.FillFromParallelVectors(IntegerValue(9), vars, coeffs, lp_values, lbs, + ubs); + data.ComplementForPositiveCoefficients(); + CoverCutHelper helper; + EXPECT_TRUE(helper.TrySimpleKnapsack(data)); + EXPECT_EQ(GetCutString(helper), "1*X0 1*X1 1*X2 <= 1"); + EXPECT_EQ(helper.Info(), "lift=1"); +} + +// I tried to reproduce bug 169094958, but if the base constraint is tight, +// the bug was triggered only due to numerical imprecision. A simple way to +// trigger it is like with this test if the given LP value just violate the +// initial constraint. +TEST(CoverCutHelperTest, WeirdExampleWithViolatedConstraint) { + // x0 + x1 <= 9. + std::vector vars = {IntegerVariable(0), IntegerVariable(2)}; + std::vector coeffs = IntegerValueVector({1, 1}); + std::vector lbs = IntegerValueVector({ + 0, + 0, + }); + std::vector ubs = IntegerValueVector({10, 13}); + std::vector lp_values{0.0, 12.6}; // violated. + + CutData data; + data.FillFromParallelVectors(IntegerValue(9), vars, coeffs, lp_values, lbs, + ubs); + data.ComplementForPositiveCoefficients(); + CoverCutHelper helper; + EXPECT_TRUE(helper.TrySimpleKnapsack(data)); + EXPECT_EQ(GetCutString(helper), "1*X0 1*X1 <= 9"); + EXPECT_EQ(helper.Info(), "lift=1"); +} + +TEST(CoverCutHelperTest, LetchfordSouliLifting) { + const int n = 10; + const IntegerValue rhs = IntegerValue(16); + std::vector vars; + std::vector coeffs = + IntegerValueVector({5, 5, 5, 5, 15, 13, 9, 8, 8, 8}); + for (int i = 0; i < n; ++i) { + vars.push_back(IntegerVariable(2 * i)); + } + std::vector lbs(n, IntegerValue(0)); + std::vector ubs(n, IntegerValue(1)); + std::vector lps(n, 0.0); + for (int i = 0; i < 4; ++i) { + lps[i] = 0.9; + } + + CutData data; + data.FillFromParallelVectors(rhs, vars, coeffs, lps, lbs, ubs); + data.ComplementForPositiveCoefficients(); + + CoverCutHelper helper; + EXPECT_TRUE(helper.TryWithLetchfordSouliLifting(data)); + EXPECT_EQ(GetCutString(helper), + "1*X0 1*X1 1*X2 1*X3 3*X4 3*X5 2*X6 1*X7 1*X8 1*X9 <= 3"); + + // For now, we only support Booleans in the cover. + // Note that we don't care for variable not in the cover though. + data.terms[3].bound_diff = IntegerValue(2); + EXPECT_FALSE(helper.TryWithLetchfordSouliLifting(data)); +} + +LinearConstraint IntegerRoundingCutWithBoundsFromTrail( + const RoundingOptions& options, IntegerValue rhs, + absl::Span vars, + absl::Span coeffs, absl::Span lp_values, + const Model& model) { + std::vector lbs; + std::vector ubs; + auto* integer_trail = model.Get(); + for (int i = 0; i < vars.size(); ++i) { + lbs.push_back(integer_trail->LowerBound(vars[i])); + ubs.push_back(integer_trail->UpperBound(vars[i])); + } + + CutData data; + data.FillFromParallelVectors(rhs, vars, coeffs, lp_values, lbs, ubs); + data.ComplementForSmallerLpValues(); + + IntegerRoundingCutHelper helper; + EXPECT_TRUE(helper.ComputeCut(options, data, nullptr)); + + CutDataBuilder builder; + LinearConstraint constraint; + EXPECT_TRUE(builder.ConvertToLinearConstraint(helper.cut(), &constraint)); + return constraint; +} + +TEST(IntegerRoundingCutTest, LetchfordLodiExample1) { + Model model; + const IntegerVariable x0 = model.Add(NewIntegerVariable(0, 10)); + const IntegerVariable x1 = model.Add(NewIntegerVariable(0, 10)); + + // 6x0 + 4x1 <= 9. + const IntegerValue rhs = IntegerValue(9); + std::vector vars = {x0, x1}; + std::vector coeffs = {IntegerValue(6), IntegerValue(4)}; + + std::vector lp_values{1.5, 0.0}; + RoundingOptions options; + options.max_scaling = 2; + LinearConstraint constraint = IntegerRoundingCutWithBoundsFromTrail( + options, rhs, vars, coeffs, lp_values, model); + EXPECT_EQ(constraint.DebugString(), "2*X0 1*X1 <= 2"); +} + +TEST(IntegerRoundingCutTest, LetchfordLodiExample1Modified) { + Model model; + const IntegerVariable x0 = model.Add(NewIntegerVariable(0, 10)); + const IntegerVariable x1 = model.Add(NewIntegerVariable(0, 1)); + + // 6x0 + 4x1 <= 9. + const IntegerValue rhs = IntegerValue(9); + + std::vector vars = {x0, x1}; + std::vector coeffs = {IntegerValue(6), IntegerValue(4)}; + + // x1 is at its upper bound here. + std::vector lp_values{5.0 / 6.0, 1.0}; + + // Note that the cut is only valid because the bound of x1 is one here. + LinearConstraint constraint = IntegerRoundingCutWithBoundsFromTrail( + RoundingOptions(), rhs, vars, coeffs, lp_values, model); + EXPECT_EQ(constraint.DebugString(), "1*X0 1*X1 <= 1"); +} + +TEST(IntegerRoundingCutTest, LetchfordLodiExample2) { + Model model; + const IntegerVariable x0 = model.Add(NewIntegerVariable(0, 10)); + const IntegerVariable x1 = model.Add(NewIntegerVariable(0, 10)); + + // 6x0 + 4x1 <= 9. + const IntegerValue rhs = IntegerValue(9); + std::vector vars = {x0, x1}; + std::vector coeffs = {IntegerValue(6), IntegerValue(4)}; + + std::vector lp_values{0.0, 2.25}; + LinearConstraint constraint = IntegerRoundingCutWithBoundsFromTrail( + RoundingOptions(), rhs, vars, coeffs, lp_values, model); + EXPECT_EQ(constraint.DebugString(), "3*X0 2*X1 <= 4"); +} + +TEST(IntegerRoundingCutTest, LetchfordLodiExample2WithNegatedCoeff) { + Model model; + const IntegerVariable x0 = model.Add(NewIntegerVariable(0, 10)); + const IntegerVariable x1 = model.Add(NewIntegerVariable(-3, 0)); + + // 6x0 - 4x1 <= 9. + const IntegerValue rhs = IntegerValue(9); + std::vector vars = {x0, x1}; + std::vector coeffs = {IntegerValue(6), IntegerValue(-4)}; + + std::vector lp_values{0.0, -2.25}; + LinearConstraint constraint = IntegerRoundingCutWithBoundsFromTrail( + RoundingOptions(), rhs, vars, coeffs, lp_values, model); + + // We actually do not return like in the example "3*X0 -2*X1 <= 4" + // But the simpler X0 - X1 <= 2 which has the same violation (0.25) but a + // better norm. + EXPECT_EQ(constraint.DebugString(), "1*X0 -1*X1 <= 2"); +} + +// This used to trigger a failure with a wrong implied bound code path. +TEST(IntegerRoundingCutTest, TestCaseUsedForDebugging) { + Model model; + // Variable values are in comment. + const IntegerVariable x0 = model.Add(NewIntegerVariable(0, 3)); // 1 + const IntegerVariable x1 = model.Add(NewIntegerVariable(0, 4)); // 0 + const IntegerVariable x2 = model.Add(NewIntegerVariable(0, 2)); // 1 + const IntegerVariable x3 = model.Add(NewIntegerVariable(0, 1)); // 0 + const IntegerVariable x4 = model.Add(NewIntegerVariable(0, 3)); // 1 + + // The constraint is tight with value above (-5 - 4 + 7 == -2). + const IntegerValue rhs = IntegerValue(-2); + std::vector vars = {x0, x1, x2, x3, x4}; + std::vector coeffs = IntegerValueVector({-5, -1, -4, -7, 7}); + + // The constraint is tight under LP (-5 * 0.4 == -2). + std::vector lp_values{0.4, 0.0, -1e-16, 0.0, 0.0}; + LinearConstraint constraint = IntegerRoundingCutWithBoundsFromTrail( + RoundingOptions(), rhs, vars, coeffs, lp_values, model); + + EXPECT_EQ(constraint.DebugString(), "-2*X0 -1*X1 -2*X2 -2*X3 2*X4 <= -2"); +} + +// The algo should find a "divisor" 2 when it lead to a good cut. +// +// TODO(user): Double check that such divisor will always be found? Of course, +// if the initial constraint coefficient are too high, then it will not, but +// that is okay since such cut efficacity will be bad anyway. +TEST(IntegerRoundingCutTest, ZeroHalfCut) { + Model model; + const IntegerVariable x0 = model.Add(NewIntegerVariable(0, 10)); + const IntegerVariable x1 = model.Add(NewIntegerVariable(0, 10)); + const IntegerVariable x2 = model.Add(NewIntegerVariable(0, 10)); + const IntegerVariable x3 = model.Add(NewIntegerVariable(0, 10)); + + // 6x0 + 4x1 + 8x2 + 7x3 <= 9. + const IntegerValue rhs = IntegerValue(9); + std::vector vars = {x0, x1, x2, x3}; + std::vector coeffs = {IntegerValue(6), IntegerValue(4), + IntegerValue(8), IntegerValue(7)}; + + std::vector lp_values{0.25, 1.25, 0.3125, 0.0}; + LinearConstraint constraint = IntegerRoundingCutWithBoundsFromTrail( + RoundingOptions(), rhs, vars, coeffs, lp_values, model); + EXPECT_EQ(constraint.DebugString(), "3*X0 2*X1 4*X2 3*X3 <= 4"); +} + +TEST(IntegerRoundingCutTest, LargeCoeffWithSmallImprecision) { + Model model; + const IntegerVariable x0 = model.Add(NewIntegerVariable(0, 5)); + const IntegerVariable x1 = model.Add(NewIntegerVariable(0, 5)); + + // 1e6 x0 - x1 <= 1.5e6. + const IntegerValue rhs = IntegerValue(1.5e6); + std::vector vars = {x0, x1}; + std::vector coeffs = {IntegerValue(1e6), IntegerValue(-1)}; + + // Note thate without adjustement, this returns 2 * X0 - X1 <= 2. + // TODO(user): expose parameters so this can be verified other than manually? + std::vector lp_values{1.5, 0.1}; + LinearConstraint constraint = IntegerRoundingCutWithBoundsFromTrail( + RoundingOptions(), rhs, vars, coeffs, lp_values, model); + EXPECT_EQ(constraint.DebugString(), "1*X0 <= 1"); +} + +TEST(IntegerRoundingCutTest, LargeCoeffWithSmallImprecision2) { + Model model; + const IntegerVariable x0 = model.Add(NewIntegerVariable(0, 5)); + const IntegerVariable x1 = model.Add(NewIntegerVariable(0, 5)); + + // 1e6 x0 + 999999 * x1 <= 1.5e6. + const IntegerValue rhs = IntegerValue(1.5e6); + std::vector vars = {x0, x1}; + std::vector coeffs = {IntegerValue(1e6), IntegerValue(999999)}; + + // Note thate without adjustement, this returns 2 * X0 + X1 <= 2. + // TODO(user): expose parameters so this can be verified other than manually? + std::vector lp_values{1.49, 0.1}; + LinearConstraint constraint = IntegerRoundingCutWithBoundsFromTrail( + RoundingOptions(), rhs, vars, coeffs, lp_values, model); + EXPECT_EQ(constraint.DebugString(), "1*X0 1*X1 <= 1"); +} + +TEST(IntegerRoundingCutTest, MirOnLargerConstraint) { + Model model; + std::vector vars(10); + for (int i = 0; i < 10; ++i) { + vars[i] = model.Add(NewIntegerVariable(0, 5)); + } + + // sum (i + 1) x_i <= 16. + const IntegerValue rhs = IntegerValue(16); + std::vector coeffs; + for (int i = 0; i < vars.size(); ++i) { + coeffs.push_back(IntegerValue(i + 1)); + } + + std::vector lp_values(vars.size(), 0.0); + lp_values[9] = 1.6; // 10 * 1.6 == 16 + + RoundingOptions options; + options.max_scaling = 4; + LinearConstraint constraint = IntegerRoundingCutWithBoundsFromTrail( + options, rhs, vars, coeffs, lp_values, model); + EXPECT_EQ(constraint.DebugString(), "1*X6 2*X7 3*X8 4*X9 <= 4"); +} + +TEST(IntegerRoundingCutTest, MirOnLargerConstraint2) { + Model model; + std::vector vars(10); + for (int i = 0; i < 10; ++i) vars[i] = model.Add(NewIntegerVariable(0, 5)); + + // sum (i + 1) x_i <= 16. + const IntegerValue rhs = IntegerValue(16); + std::vector coeffs; + for (int i = 0; i < vars.size(); ++i) { + coeffs.push_back(IntegerValue(i + 1)); + } + + std::vector lp_values(vars.size(), 0.0); + lp_values[4] = 5.5 / 5.0; + lp_values[9] = 1.05; + + RoundingOptions options; + options.max_scaling = 4; + LinearConstraint constraint = IntegerRoundingCutWithBoundsFromTrail( + options, rhs, vars, coeffs, lp_values, model); + EXPECT_EQ(constraint.DebugString(), + "2*X1 3*X2 4*X3 6*X4 6*X5 8*X6 9*X7 10*X8 12*X9 <= 18"); +} + +std::vector ToIntegerValues(const std::vector input) { + std::vector output; + for (const int64_t v : input) output.push_back(IntegerValue(v)); + return output; +} + +std::vector ToIntegerVariables( + const std::vector input) { + std::vector output; + for (const int64_t v : input) output.push_back(IntegerVariable(v)); + return output; +} + +// This used to fail as I was coding the CL when I was trying to force t==1 +// in the GetSuperAdditiveRoundingFunction() code. +TEST(IntegerRoundingCutTest, RegressionTest) { + RoundingOptions options; + options.max_scaling = 4; + + const IntegerValue rhs = int64_t{7469520585651099083}; + std::vector vars = ToIntegerVariables( + {0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, + 28, 30, 32, 34, 36, 38, 42, 44, 46, 48, 50, 52, 54, 56}); + std::vector coeffs = ToIntegerValues( + {22242929208935956LL, 128795791007031270LL, 64522773588815932LL, + 106805487542181976LL, 136903984044996548LL, 177476314670499137LL, + 364043443034395LL, 28002509947960647LL, 310965596097558939LL, + 103949088324014599LL, 41400520193055115LL, 50111468002532494LL, + 53821870865384327LL, 68690238549704032LL, 75189534851923882LL, + 136250652059774801LL, 169776580612315087LL, 172493907306536826LL, + 13772608007357656LL, 74052819842959090LL, 134400722410234077LL, + 5625133860678171LL, 299572729577293761LL, 81099235700461109LL, + 178989907222373586LL, 16642124499479353LL, 110378717916671350LL, + 41703587448036910LL}); + std::vector lp_values = { + 0, 0, 2.51046, 0.0741114, 0.380072, 5.17238, 0, + 0, 13.2214, 0, 0.635977, 0, 0, 3.39859, + 1.15936, 0.165207, 2.29673, 2.19505, 0, 0, 2.31191, + 0, 0.785149, 0.258119, 2.26978, 0, 0.970046, 0}; + std::vector lbs(28, IntegerValue(0)); + std::vector ubs(28, IntegerValue(99)); + ubs[8] = 17; + std::vector solution = + ToIntegerValues({0, 3, 0, 2, 2, 2, 0, 1, 5, 1, 1, 1, 1, 2, + 0, 2, 1, 3, 1, 1, 4, 1, 6, 2, 3, 0, 1, 1}); + + EXPECT_EQ(coeffs.size(), vars.size()); + EXPECT_EQ(lp_values.size(), vars.size()); + EXPECT_EQ(lbs.size(), vars.size()); + EXPECT_EQ(ubs.size(), vars.size()); + EXPECT_EQ(solution.size(), vars.size()); + + // The solution is a valid integer solution of the inequality. + { + IntegerValue activity(0); + for (int i = 0; i < vars.size(); ++i) { + activity += solution[i] * coeffs[i]; + } + EXPECT_LE(activity, rhs); + } + + CutData data; + data.FillFromParallelVectors(rhs, vars, coeffs, lp_values, lbs, ubs); + IntegerRoundingCutHelper helper; + + // TODO(user): Actually this fail, so we don't compute a cut here. + EXPECT_FALSE(helper.ComputeCut(options, data, nullptr)); +} + +void InitializeLpValues(absl::Span values, Model* model) { + auto* lp_values = model->GetOrCreate(); + lp_values->resize(2 * values.size()); + for (int i = 0; i < values.size(); ++i) { + (*lp_values)[IntegerVariable(2 * i)] = values[i]; + (*lp_values)[IntegerVariable(2 * i + 1)] = -values[i]; + } +} + +TEST(SquareCutGeneratorTest, TestBelowCut) { + Model model; + IntegerVariable x = model.Add(NewIntegerVariable(0, 5)); + IntegerVariable y = model.Add(NewIntegerVariable(0, 25)); + InitializeLpValues({2.0, 12.0}, &model); + + CutGenerator square = CreateSquareCutGenerator(y, x, 1, &model); + auto* manager = model.GetOrCreate(); + square.generate_cuts(manager); + EXPECT_EQ(1, manager->num_cuts()); + EXPECT_THAT(manager->AllConstraints().front().constraint.DebugString(), + EndsWith("-5*X0 1*X1 <= 0")); +} + +TEST(SquareCutGeneratorTest, TestBelowCutWithOffset) { + Model model; + IntegerVariable x = model.Add(NewIntegerVariable(1, 5)); + IntegerVariable y = model.Add(NewIntegerVariable(1, 25)); + InitializeLpValues({2.0, 12.0}, &model); + + CutGenerator square = CreateSquareCutGenerator(y, x, 1, &model); + auto* manager = model.GetOrCreate(); + square.generate_cuts(manager); + ASSERT_EQ(1, manager->num_cuts()); + EXPECT_THAT(manager->AllConstraints().front().constraint.DebugString(), + EndsWith("-6*X0 1*X1 <= -5")); +} + +TEST(SquareCutGeneratorTest, TestNoBelowCut) { + Model model; + IntegerVariable x = model.Add(NewIntegerVariable(1, 5)); + IntegerVariable y = model.Add(NewIntegerVariable(1, 25)); + InitializeLpValues({2.0, 6.0}, &model); + + CutGenerator square = CreateSquareCutGenerator(y, x, 1, &model); + auto* manager = model.GetOrCreate(); + square.generate_cuts(manager); + ASSERT_EQ(0, manager->num_cuts()); +} + +TEST(SquareCutGeneratorTest, TestAboveCut) { + Model model; + IntegerVariable x = model.Add(NewIntegerVariable(1, 5)); + IntegerVariable y = model.Add(NewIntegerVariable(1, 25)); + InitializeLpValues({2.5, 6.25}, &model); + + CutGenerator square = CreateSquareCutGenerator(y, x, 1, &model); + auto* manager = model.GetOrCreate(); + square.generate_cuts(manager); + ASSERT_EQ(1, manager->num_cuts()); + EXPECT_THAT(manager->AllConstraints().front().constraint.DebugString(), + StartsWith("-6 <= -5*X0 1*X1")); +} + +TEST(SquareCutGeneratorTest, TestNearlyAboveCut) { + Model model; + IntegerVariable x = model.Add(NewIntegerVariable(1, 5)); + IntegerVariable y = model.Add(NewIntegerVariable(1, 25)); + InitializeLpValues({2.4, 5.99999}, &model); + + CutGenerator square = CreateSquareCutGenerator(y, x, 1, &model); + auto* manager = model.GetOrCreate(); + square.generate_cuts(manager); + ASSERT_EQ(0, manager->num_cuts()); +} + +TEST(MultiplicationCutGeneratorTest, TestCut1) { + Model model; + IntegerVariable x = model.Add(NewIntegerVariable(1, 5)); + IntegerVariable y = model.Add(NewIntegerVariable(2, 3)); + IntegerVariable z = model.Add(NewIntegerVariable(1, 15)); + InitializeLpValues({1.2, 2.1, 2.1}, &model); + + CutGenerator mult = + CreatePositiveMultiplicationCutGenerator(z, x, y, 1, &model); + auto* manager = model.GetOrCreate(); + mult.generate_cuts(manager); + ASSERT_EQ(1, manager->num_cuts()); + EXPECT_THAT(manager->AllConstraints().front().constraint.DebugString(), + EndsWith("2*X0 1*X1 -1*X2 <= 2")); +} + +TEST(MultiplicationCutGeneratorTest, TestCut2) { + Model model; + IntegerVariable x = model.Add(NewIntegerVariable(1, 5)); + IntegerVariable y = model.Add(NewIntegerVariable(2, 3)); + IntegerVariable z = model.Add(NewIntegerVariable(1, 15)); + InitializeLpValues({4.9, 2.8, 12.0}, &model); + + CutGenerator mult = + CreatePositiveMultiplicationCutGenerator(z, x, y, 1, &model); + auto* manager = model.GetOrCreate(); + mult.generate_cuts(manager); + ASSERT_EQ(1, manager->num_cuts()); + EXPECT_THAT(manager->AllConstraints().front().constraint.DebugString(), + EndsWith("3*X0 5*X1 -1*X2 <= 15")); +} + +TEST(MultiplicationCutGeneratorTest, TestCut3) { + Model model; + IntegerVariable x = model.Add(NewIntegerVariable(1, 5)); + IntegerVariable y = model.Add(NewIntegerVariable(2, 3)); + IntegerVariable z = model.Add(NewIntegerVariable(1, 15)); + InitializeLpValues({1.2, 2.1, 4.4}, &model); + + CutGenerator mult = + CreatePositiveMultiplicationCutGenerator(z, x, y, 1, &model); + auto* manager = model.GetOrCreate(); + mult.generate_cuts(manager); + ASSERT_EQ(2, manager->num_cuts()); + EXPECT_THAT(manager->AllConstraints().front().constraint.DebugString(), + StartsWith("3 <= 3*X0 1*X1 -1*X2")); + EXPECT_THAT(manager->AllConstraints().back().constraint.DebugString(), + StartsWith("10 <= 2*X0 5*X1 -1*X2")); +} + +TEST(MultiplicationCutGeneratorTest, TestNoCut1) { + Model model; + IntegerVariable x = model.Add(NewIntegerVariable(1, 50)); + IntegerVariable y = model.Add(NewIntegerVariable(2, 30)); + IntegerVariable z = model.Add(NewIntegerVariable(1, 1500)); + InitializeLpValues({40.0, 20.0, 799.0}, &model); + + CutGenerator mult = + CreatePositiveMultiplicationCutGenerator(z, x, y, 1, &model); + auto* manager = model.GetOrCreate(); + mult.generate_cuts(manager); + ASSERT_EQ(0, manager->num_cuts()); +} + +TEST(MultiplicationCutGeneratorTest, TestNoCut2) { + Model model; + IntegerVariable x = model.Add(NewIntegerVariable(1, 50)); + IntegerVariable y = model.Add(NewIntegerVariable(2, 30)); + IntegerVariable z = model.Add(NewIntegerVariable(1, 1500)); + InitializeLpValues({40.0, 20.0, 801.0}, &model); + + CutGenerator mult = + CreatePositiveMultiplicationCutGenerator(z, x, y, 1, &model); + auto* manager = model.GetOrCreate(); + mult.generate_cuts(manager); + ASSERT_EQ(0, manager->num_cuts()); +} + +TEST(AllDiffCutGeneratorTest, TestCut) { + Model model; + Domain domain(10); + domain = domain.UnionWith(Domain(15)); + domain = domain.UnionWith(Domain(25)); + IntegerVariable x = model.Add(NewIntegerVariable(domain)); + IntegerVariable y = model.Add(NewIntegerVariable(domain)); + IntegerVariable z = model.Add(NewIntegerVariable(domain)); + InitializeLpValues({15.0, 15.0, 15.0}, &model); + + CutGenerator all_diff = CreateAllDifferentCutGenerator({x, y, z}, &model); + auto* manager = model.GetOrCreate(); + all_diff.generate_cuts(manager); + ASSERT_EQ(1, manager->num_cuts()); + EXPECT_EQ(manager->AllConstraints().front().constraint.DebugString(), + "50 <= 1*X0 1*X1 1*X2 <= 50"); +} + +TEST(AllDiffCutGeneratorTest, TestCut2) { + Model model; + Domain domain(10); + domain = domain.UnionWith(Domain(15)); + domain = domain.UnionWith(Domain(25)); + IntegerVariable x = model.Add(NewIntegerVariable(domain)); + IntegerVariable y = model.Add(NewIntegerVariable(domain)); + IntegerVariable z = model.Add(NewIntegerVariable(domain)); + InitializeLpValues({13.0, 10.0, 12.0}, &model); + + CutGenerator all_diff = CreateAllDifferentCutGenerator({x, y, z}, &model); + auto* manager = model.GetOrCreate(); + all_diff.generate_cuts(manager); + ASSERT_EQ(2, manager->num_cuts()); + EXPECT_EQ(manager->AllConstraints().front().constraint.DebugString(), + "25 <= 1*X1 1*X2 <= 40"); + EXPECT_EQ(manager->AllConstraints().back().constraint.DebugString(), + "50 <= 1*X0 1*X1 1*X2 <= 50"); +} + +// We model the maximum of 3 affine functions: +// f0(x) = 1 +// f1(x) = -x0 - 2x1 +// f2(x) = -x0 + x1 +// over the box domain -1 <= x0, x1 <= 1. For this data, there are 9 possible +// maximum corner cuts. I denote each by noting which function f^i each input +// variable x_j gets assigned: +// (1) x0 -> f0, x1 -> f0: y <= 0x0 + 0x1 + 1z_0 + 3z_1 + 2z_2 +// (2) x0 -> f0, x1 -> f1: y <= 0x0 - 2x1 + 3z_0 + 1z_1 + 4z_2 +// (3) x0 -> f0, x1 -> f2: y <= 0x0 + x1 + 2z_0 + 4z_1 + 1z_2 +// (4) x0 -> f1, x1 -> f0: y <= -x0 + 0x1 + 2z_0 + 2z_1 + 1z_2 +// (5) x0 -> f1, x1 -> f1: y <= -x0 - 2x1 + 4z_0 + 0z_1 + 3z_2 +// (6) x0 -> f1, x1 -> f2: y <= -x0 + x1 + 3z_0 + 3z_1 + 0z_2 +// (7) x0 -> f2, x1 -> f0: y <= -x0 + 0x1 + 2z_0 + 2z_1 + 1z_2 +// (8) x0 -> f2, x1 -> f1: y <= -x0 - 2x1 + 4z_0 + 0z_1 + 3z_2 +// (9) x0 -> f2, x1 -> f2: y <= -x0 + x1 + 3z_0 + 3z_1 + 0z_2 +TEST(LinMaxCutsTest, BasicCuts1) { + Model model; + IntegerVariable x0 = model.Add(NewIntegerVariable(-1, 1)); + IntegerVariable x1 = model.Add(NewIntegerVariable(-1, 1)); + IntegerVariable target = model.Add(NewIntegerVariable(-100, 100)); + LinearExpression f0; + f0.offset = IntegerValue(1); + LinearExpression f1; + f1.vars = {x0, x1}; + f1.coeffs = {IntegerValue(-1), IntegerValue(-2)}; + LinearExpression f2; + f2.vars = {x0, x1}; + f2.coeffs = {IntegerValue(-1), IntegerValue(1)}; + + std::vector exprs = {f0, f1, f2}; + std::vector z_vars; + for (int i = 0; i < exprs.size(); ++i) { + IntegerVariable z = model.Add(NewIntegerVariable(0, 1)); + z_vars.push_back(z); + } + + CutGenerator max_cuts = + CreateLinMaxCutGenerator(target, exprs, z_vars, &model); + + auto* manager = model.GetOrCreate(); + InitializeLpValues({-1.0, 1.0, 2.0, 1.0 / 3.0, 1.0 / 3.0, 1.0 / 3.0}, &model); + + max_cuts.generate_cuts(manager); + ASSERT_EQ(1, manager->num_cuts()); + + // x vars are X0,X1 respectively, target is X2, z_vars are X3,X4,X5 + // respectively. + // Most violated inequality is 2. + EXPECT_THAT(manager->AllConstraints().front().constraint.DebugString(), + StartsWith("0 <= -2*X1 -1*X2 3*X3 1*X4 4*X5")); + + InitializeLpValues({-1.0, -1.0, 2.0, 1.0 / 3.0, 1.0 / 3.0, 1.0 / 3.0}, + &model); + max_cuts.generate_cuts(manager); + ASSERT_EQ(2, manager->num_cuts()); + // Most violated inequality is 3. + EXPECT_THAT(manager->AllConstraints().back().constraint.DebugString(), + StartsWith("0 <= 1*X1 -1*X2 2*X3 4*X4 1*X5")); +} + +// We model the maximum of 3 affine functions: +// f0(x) = 1 +// f1(x) = x +// f2(x) = -x +// target = max(f0, f1, f2) +// x in [-10, 10] +TEST(LinMaxCutsTest, AffineCuts1) { + Model model; + const IntegerValue zero(0); + const IntegerValue one(1); + IntegerVariable x = model.Add(NewIntegerVariable(-10, 10)); + IntegerVariable target = model.Add(NewIntegerVariable(1, 100)); + LinearExpression target_expr; + target_expr.vars.push_back(target); + target_expr.coeffs.push_back(one); + + std::vector> affines = { + {zero, one}, {one, zero}, {-one, zero}}; + + LinearConstraintBuilder builder(&model); + ASSERT_TRUE( + BuildMaxAffineUpConstraint(target_expr, x, affines, &model, &builder)); + + // Note, the cut is not normalized. + EXPECT_EQ(builder.Build().DebugString(), "20*X1 <= 200"); +} + +// We model the maximum of 3 affine functions: +// f0(x) = 1 +// f1(x) = x +// f2(x) = -x +// target = max(f0, f1, f2) +// x in [-1, 10] +TEST(LinMaxCutsTest, AffineCuts2) { + Model model; + const IntegerValue zero(0); + const IntegerValue one(1); + IntegerVariable x = model.Add(NewIntegerVariable(-1, 10)); + IntegerVariable target = model.Add(NewIntegerVariable(1, 100)); + LinearExpression target_expr; + target_expr.vars.push_back(target); + target_expr.coeffs.push_back(one); + + std::vector> affines = { + {zero, one}, {one, zero}, {-one, zero}}; + + LinearConstraintBuilder builder(&model); + ASSERT_TRUE( + BuildMaxAffineUpConstraint(target_expr, x, affines, &model, &builder)); + + EXPECT_EQ(builder.Build().DebugString(), "-9*X0 11*X1 <= 20"); +} + +// We model the maximum of 3 affine functions: +// f0(x) = 1 +// f1(x) = x +// f2(x) = -x +// target = max(f0, f1, f2) +// x fixed +TEST(LinMaxCutsTest, AffineCutsFixedVar) { + Model model; + const IntegerValue zero(0); + const IntegerValue one(1); + IntegerVariable x = model.Add(NewIntegerVariable(2, 2)); + IntegerVariable target = model.Add(NewIntegerVariable(0, 100)); + LinearExpression target_expr; + target_expr.vars.push_back(target); + target_expr.coeffs.push_back(one); + + std::vector> affines = { + {zero, one}, {one, zero}, {-one, zero}}; + + CutGenerator max_cuts = + CreateMaxAffineCutGenerator(target_expr, x, affines, "test", &model); + + auto* manager = model.GetOrCreate(); + InitializeLpValues({2.0, 8.0}, &model); + max_cuts.generate_cuts(manager); + EXPECT_EQ(0, manager->num_cuts()); +} + +TEST(ImpliedBoundsProcessorTest, PositiveBasicTest) { + Model model; + model.GetOrCreate()->set_use_implied_bounds(true); + + const BooleanVariable b = model.Add(NewBooleanVariable()); + const IntegerVariable b_view = model.Add(NewIntegerVariable(0, 1)); + const IntegerVariable x = model.Add(NewIntegerVariable(2, 9)); + + auto* integer_encoder = model.GetOrCreate(); + auto* integer_trail = model.GetOrCreate(); + auto* implied_bounds = model.GetOrCreate(); + + integer_encoder->AssociateToIntegerEqualValue(Literal(b, true), b_view, + IntegerValue(1)); + implied_bounds->Add(Literal(b, true), + IntegerLiteral::GreaterOrEqual(x, IntegerValue(5))); + + // Lp solution. + ImpliedBoundsProcessor processor({x, b_view}, integer_trail, implied_bounds); + + util_intops::StrongVector lp_values(1000); + lp_values[x] = 4.0; + lp_values[b_view] = 2.0 / 3.0; // 2.0 + b_view_value * (5-2) == 4.0 + processor.RecomputeCacheAndSeparateSomeImpliedBoundCuts(lp_values); + + // Lets look at the term X. + CutData data; + CutDataBuilder builder; + + CutTerm X; + X.coeff = 1; + X.lp_value = 2.0; + X.bound_diff = 7; + X.expr_vars[0] = x; + X.expr_coeffs[0] = 1; + X.expr_coeffs[1] = 0; + X.expr_offset = -2; + data.terms.push_back(X); + + processor.CacheDataForCut(IntegerVariable(100), &data); + EXPECT_TRUE(processor.TryToExpandWithLowerImpliedbound(IntegerValue(1), 0, + /*complement=*/false, + &data, &builder)); + EXPECT_EQ(data.terms.size(), 2); + EXPECT_THAT(data.terms[0].DebugString(), + ::testing::StartsWith("coeff=1 lp=0 range=7")); + EXPECT_THAT(data.terms[1].DebugString(), + ::testing::StartsWith("coeff=3 lp=0.666667 range=1")); + EXPECT_EQ(data.terms[1].expr_offset, 0); +} + +// Same as above but with b.Negated() +TEST(ImpliedBoundsProcessorTest, NegativeBasicTest) { + Model model; + model.GetOrCreate()->set_use_implied_bounds(true); + + const BooleanVariable b = model.Add(NewBooleanVariable()); + const IntegerVariable b_view = model.Add(NewIntegerVariable(0, 1)); + const IntegerVariable x = model.Add(NewIntegerVariable(2, 9)); + + auto* integer_encoder = model.GetOrCreate(); + auto* integer_trail = model.GetOrCreate(); + auto* implied_bounds = model.GetOrCreate(); + + integer_encoder->AssociateToIntegerEqualValue(Literal(b, true), b_view, + IntegerValue(1)); + implied_bounds->Add(Literal(b, false), // False here. + IntegerLiteral::GreaterOrEqual(x, IntegerValue(5))); + + // Lp solution. + ImpliedBoundsProcessor processor({x, b_view}, integer_trail, implied_bounds); + + util_intops::StrongVector lp_values(1000); + lp_values[x] = 4.0; + lp_values[b_view] = 1.0 - 2.0 / 3.0; // 1 - value above. + processor.RecomputeCacheAndSeparateSomeImpliedBoundCuts(lp_values); + + // Lets look at the term X. + CutData data; + CutDataBuilder builder; + + CutTerm X; + X.coeff = 1; + X.lp_value = 2.0; + X.bound_diff = 7; + X.expr_vars[0] = x; + X.expr_coeffs[0] = 1; + X.expr_coeffs[1] = 0; + X.expr_offset = -2; + data.terms.push_back(X); + + processor.CacheDataForCut(IntegerVariable(100), &data); + EXPECT_TRUE(processor.TryToExpandWithLowerImpliedbound(IntegerValue(1), 0, + /*complement=*/false, + &data, &builder)); + EXPECT_EQ(data.terms.size(), 2); + EXPECT_THAT(data.terms[0].DebugString(), + ::testing::StartsWith("coeff=1 lp=0 range=7")); + EXPECT_THAT(data.terms[1].DebugString(), + ::testing::StartsWith("coeff=3 lp=0.666667 range=1")); + + // This is the only change, we have 1 - bool there actually. + EXPECT_EQ(data.terms[1].expr_offset, 1); + EXPECT_EQ(data.terms[1].expr_coeffs[0], -1); + EXPECT_EQ(data.terms[1].expr_vars[0], b_view); +} + +TEST(ImpliedBoundsProcessorTest, DecompositionTest) { + Model model; + model.GetOrCreate()->set_use_implied_bounds(true); + + const BooleanVariable b = model.Add(NewBooleanVariable()); + const IntegerVariable b_view = model.Add(NewIntegerVariable(0, 1)); + const BooleanVariable c = model.Add(NewBooleanVariable()); + const IntegerVariable c_view = model.Add(NewIntegerVariable(0, 1)); + const IntegerVariable x = model.Add(NewIntegerVariable(2, 9)); + + auto* integer_encoder = model.GetOrCreate(); + auto* integer_trail = model.GetOrCreate(); + auto* implied_bounds = model.GetOrCreate(); + + integer_encoder->AssociateToIntegerEqualValue(Literal(b, true), b_view, + IntegerValue(1)); + integer_encoder->AssociateToIntegerEqualValue(Literal(c, true), c_view, + IntegerValue(1)); + implied_bounds->Add(Literal(b, true), + IntegerLiteral::GreaterOrEqual(x, IntegerValue(5))); + implied_bounds->Add(Literal(c, true), + IntegerLiteral::LowerOrEqual(x, IntegerValue(2))); + + // Lp solution. + ImpliedBoundsProcessor processor({x, b_view, c_view}, integer_trail, + implied_bounds); + + util_intops::StrongVector lp_values(1000); + lp_values[x] = 4.0; + lp_values[NegationOf(x)] = -4.0; + lp_values[b_view] = 2.0 / 3.0; // 2.0 + b_view_value * (5-2) == 4.0 + lp_values[c_view] = 0.5; + processor.RecomputeCacheAndSeparateSomeImpliedBoundCuts(lp_values); + + // Lets look at the term X. + CutTerm X; + X.coeff = 1; + X.lp_value = 2.0; + X.bound_diff = 7; + X.expr_vars[0] = x; + X.expr_coeffs[0] = 1; + X.expr_coeffs[1] = 0; + X.expr_offset = -2; + + CutData data; + data.terms.push_back(X); + processor.CacheDataForCut(IntegerVariable(100), &data); + X = data.terms[0]; + + // X - 2 = 3 * B + slack; + CutTerm bool_term; + CutTerm slack_term; + EXPECT_TRUE(processor.DecomposeWithImpliedLowerBound(X, IntegerValue(1), + bool_term, slack_term)); + EXPECT_THAT(bool_term.DebugString(), + ::testing::StartsWith("coeff=3 lp=0.666667 range=1")); + EXPECT_THAT(slack_term.DebugString(), + ::testing::StartsWith("coeff=1 lp=0 range=7")); + + // (9 - X) = 7 * C + slack; + CutTerm Y = X; + absl::int128 unused; + Y.Complement(&unused); + Y.coeff = -Y.coeff; + EXPECT_TRUE(processor.DecomposeWithImpliedLowerBound(Y, IntegerValue(1), + bool_term, slack_term)); + EXPECT_THAT(bool_term.DebugString(), + ::testing::StartsWith("coeff=7 lp=0.5 range=1")); + EXPECT_THAT(slack_term.DebugString(), + ::testing::StartsWith("coeff=1 lp=1.5 range=7")); + + // X - 2 = 7 * (1 - C) - slack; + EXPECT_TRUE(processor.DecomposeWithImpliedUpperBound(X, IntegerValue(1), + bool_term, slack_term)); + EXPECT_THAT(bool_term.DebugString(), + ::testing::StartsWith("coeff=7 lp=0.5 range=1")); + EXPECT_THAT(slack_term.DebugString(), + ::testing::StartsWith("coeff=-1 lp=1.5 range=7")); +} + +TEST(CutDataTest, SimpleExample) { + Model model; + const IntegerVariable x0 = model.Add(NewIntegerVariable(7, 10)); + const IntegerVariable x1 = model.Add(NewIntegerVariable(-3, 20)); + + // 6x0 - 4x1 <= 9. + const IntegerValue rhs = IntegerValue(9); + std::vector vars = {x0, x1}; + std::vector coeffs = {IntegerValue(6), IntegerValue(-4)}; + std::vector lp_values = {7.5, 4.5}; + + CutData cut; + std::vector lbs; + std::vector ubs; + auto* integer_trail = model.Get(); + for (int i = 0; i < vars.size(); ++i) { + lbs.push_back(integer_trail->LowerBound(vars[i])); + ubs.push_back(integer_trail->UpperBound(vars[i])); + } + cut.FillFromParallelVectors(rhs, vars, coeffs, lp_values, lbs, ubs); + cut.ComplementForSmallerLpValues(); + + // 6 (X0' + 7) - 4 (X1' - 3) <= 9 + ASSERT_EQ(cut.terms.size(), 2); + EXPECT_EQ(cut.rhs, 9 - 4 * 3 - 6 * 7); + EXPECT_EQ(cut.terms[0].coeff, 6); + EXPECT_EQ(cut.terms[0].lp_value, 0.5); + EXPECT_EQ(cut.terms[0].bound_diff, 3); + EXPECT_EQ(cut.terms[1].coeff, -4); + EXPECT_EQ(cut.terms[1].lp_value, 7.5); + EXPECT_EQ(cut.terms[1].bound_diff, 23); + + // Lets complement. + const absl::int128 old_rhs = cut.rhs; + cut.terms[0].Complement(&cut.rhs); + EXPECT_EQ(cut.rhs, old_rhs - 3 * 6); + EXPECT_EQ(cut.terms[0].coeff, -6); + EXPECT_EQ(cut.terms[0].lp_value, 3 - 0.5); + EXPECT_EQ(cut.terms[0].bound_diff, 3); + + // Encode back. + LinearConstraint new_constraint; + CutDataBuilder builder; + EXPECT_TRUE(builder.ConvertToLinearConstraint(cut, &new_constraint)); + + // We have a division by GCD in there. + const IntegerValue gcd = 2; + EXPECT_EQ(vars.size(), new_constraint.num_terms); + for (int i = 0; i < new_constraint.num_terms; ++i) { + EXPECT_EQ(vars[i], new_constraint.vars[i]); + EXPECT_EQ(coeffs[i] / gcd, new_constraint.coeffs[i]); + } +} + +TEST(SumOfAllDiffLowerBounderTest, ContinuousVariables) { + Model model; + IntegerTrail* integer_trail = model.GetOrCreate(); + IntegerVariable x1 = model.Add(NewIntegerVariable(1, 10)); + IntegerVariable x2 = model.Add(NewIntegerVariable(1, 10)); + IntegerVariable x3 = model.Add(NewIntegerVariable(1, 10)); + + SumOfAllDiffLowerBounder helper; + helper.Add(x1, 3, *integer_trail); + helper.Add(x2, 3, *integer_trail); + helper.Add(x3, 3, *integer_trail); + EXPECT_EQ(3, helper.size()); + EXPECT_EQ(6, helper.SumOfMinDomainValues()); + EXPECT_EQ(6, helper.SumOfDifferentMins()); + std::string suffix; + EXPECT_EQ(6, helper.GetBestLowerBound(suffix)); + EXPECT_EQ("e", suffix); + helper.Clear(); + EXPECT_EQ(0, helper.size()); +} + +TEST(SumOfAllDiffLowerBounderTest, DisjointVariables) { + Model model; + IntegerTrail* integer_trail = model.GetOrCreate(); + IntegerVariable x1 = model.Add(NewIntegerVariable(1, 10)); + IntegerVariable x2 = model.Add(NewIntegerVariable(1, 10)); + IntegerVariable x3 = model.Add(NewIntegerVariable(1, 10)); + + SumOfAllDiffLowerBounder helper; + helper.Add(x1, 3, *integer_trail); + helper.Add(x2, 3, *integer_trail); + helper.Add(AffineExpression(x3, 1, 10), 3, *integer_trail); + EXPECT_EQ(3, helper.size()); + EXPECT_EQ(6, helper.SumOfMinDomainValues()); + EXPECT_EQ(14, helper.SumOfDifferentMins()); + std::string suffix; + EXPECT_EQ(14, helper.GetBestLowerBound(suffix)); + EXPECT_EQ("a", suffix); +} + +TEST(SumOfAllDiffLowerBounderTest, DiscreteDomains) { + Model model; + IntegerTrail* integer_trail = model.GetOrCreate(); + IntegerVariable x1 = model.Add(NewIntegerVariable(1, 10)); + IntegerVariable x2 = model.Add(NewIntegerVariable(1, 10)); + IntegerVariable x3 = model.Add(NewIntegerVariable(1, 10)); + + SumOfAllDiffLowerBounder helper; + helper.Add(AffineExpression(x1, 3, 0), 3, *integer_trail); + helper.Add(AffineExpression(x2, 3, 0), 3, *integer_trail); + helper.Add(AffineExpression(x3, 3, 0), 3, *integer_trail); + EXPECT_EQ(3, helper.size()); + EXPECT_EQ(18, helper.SumOfMinDomainValues()); + EXPECT_EQ(12, helper.SumOfDifferentMins()); + std::string suffix; + EXPECT_EQ(18, helper.GetBestLowerBound(suffix)); + EXPECT_EQ("d", suffix); +} + +} // namespace +} // namespace sat +} // namespace operations_research diff --git a/ortools/sat/diffn_test.cc b/ortools/sat/diffn_test.cc new file mode 100644 index 0000000000..a46d78d29f --- /dev/null +++ b/ortools/sat/diffn_test.cc @@ -0,0 +1,176 @@ +// Copyright 2010-2024 Google LLC +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "ortools/sat/diffn.h" + +#include + +#include + +#include "absl/strings/str_join.h" +#include "gtest/gtest.h" +#include "ortools/base/logging.h" +#include "ortools/sat/cp_model.h" +#include "ortools/sat/cp_model.pb.h" +#include "ortools/sat/cp_model_solver.h" +#include "ortools/sat/integer.h" +#include "ortools/sat/integer_search.h" +#include "ortools/sat/intervals.h" +#include "ortools/sat/model.h" +#include "ortools/sat/sat_base.h" +#include "ortools/sat/sat_parameters.pb.h" +#include "ortools/sat/sat_solver.h" + +namespace operations_research { +namespace sat { +namespace { + +// Counts how many ways we can put two square of minimal size 1 in an n x n +// square. +// +// For n = 1, infeasible. +// For n = 2, should be 4 * 3. +// For n = 3: +// - 9 * 8 for two size 1. +// - 4 * 5 for size 2 + size 1. Times 2 for the permutation. +int CountAllTwoBoxesSolutions(int n) { + Model model; + std::vector x; + std::vector y; + for (int i = 0; i < 2; ++i) { + // Create a square shaped box of minimum size 1. + const IntegerVariable size = model.Add(NewIntegerVariable(1, n)); + x.push_back( + model.Add(NewInterval(model.Add(NewIntegerVariable(0, n)), + model.Add(NewIntegerVariable(0, n)), size))); + y.push_back( + model.Add(NewInterval(model.Add(NewIntegerVariable(0, n)), + model.Add(NewIntegerVariable(0, n)), size))); + } + + // The cumulative relaxation adds extra variables that are not complextly + // fixed. So to not count too many solution with our code here, we disable + // that. Note that alternativelly, we could have used the cp_model.proto API + // to do the same, and that should works even with this on. + AddNonOverlappingRectangles(x, y, &model); + + int num_solutions_found = 0; + auto* integer_trail = model.GetOrCreate(); + auto* repository = model.GetOrCreate(); + auto start_value = [repository, integer_trail](IntervalVariable i) { + return integer_trail->LowerBound(repository->Start(i)).value(); + }; + auto end_value = [repository, integer_trail](IntervalVariable i) { + return integer_trail->LowerBound(repository->End(i)).value(); + }; + while (true) { + const SatSolver::Status status = + SolveIntegerProblemWithLazyEncoding(&model); + if (status != SatSolver::Status::FEASIBLE) break; + + // Display the first few solutions. + if (num_solutions_found < 30) { + LOG(INFO) << "R1: " << start_value(x[0]) << "," << start_value(y[0]) + << " " << end_value(x[0]) << "," << end_value(y[0]) + << " R2: " << start_value(x[1]) << "," << start_value(y[1]) + << " " << end_value(x[1]) << "," << end_value(y[1]); + } + + num_solutions_found++; + model.Add(ExcludeCurrentSolutionAndBacktrack()); + } + return num_solutions_found; +} + +TEST(NonOverlappingRectanglesTest, SimpleCounting) { + EXPECT_EQ(CountAllTwoBoxesSolutions(1), 0); + EXPECT_EQ(CountAllTwoBoxesSolutions(2), 3 * 4); + EXPECT_EQ(CountAllTwoBoxesSolutions(3), 9 * 8 + 4 * 5 * 2); + EXPECT_EQ(CountAllTwoBoxesSolutions(4), + /*2 1x1 square*/ 16 * 15 + + /*2 2x2 square*/ 2 * (5 + 3 + 4 + 4) + + /*3x3 and 1x1*/ 2 * 4 * 7 + + /*2x2 amd 1x1*/ 2 * 9 * 12); +} + +TEST(NonOverlappingRectanglesTest, SimpleCountingWithOptional) { + Model model; + IntervalsRepository* interval_repository = + model.GetOrCreate(); + std::vector x; + std::vector y; + const Literal l1(model.Add(NewBooleanVariable()), true); + x.push_back(interval_repository->CreateInterval( + IntegerValue(0), IntegerValue(5), IntegerValue(5), l1.Index(), false)); + y.push_back(interval_repository->CreateInterval( + IntegerValue(0), IntegerValue(2), IntegerValue(2), l1.Index(), false)); + + const Literal l2(model.Add(NewBooleanVariable()), true); + x.push_back(interval_repository->CreateInterval( + IntegerValue(4), IntegerValue(6), IntegerValue(2), l2.Index(), false)); + y.push_back(interval_repository->CreateInterval( + IntegerValue(3), IntegerValue(4), IntegerValue(1), l2.Index(), false)); + + // The cumulative relaxation adds extra variables that are not completely + // fixed. So to not count too many solution with our code here, we disable + // that. Note that alternatively, we could have used the cp_model.proto API + // to do the same, and that should works even with this on. + // TODO(user): Fix and run with add_cumulative_relaxation = true. + AddNonOverlappingRectangles(x, y, &model); + + int num_solutions_found = 0; + while (true) { + const SatSolver::Status status = + SolveIntegerProblemWithLazyEncoding(&model); + if (status != SatSolver::Status::FEASIBLE) break; + + // Display the first few solutions. + if (num_solutions_found < 30) { + LOG(INFO) << "R1: " << interval_repository->IsPresent(x[0]) << " " + << " R2: " << interval_repository->IsPresent(x[1]) << " "; + } + + num_solutions_found++; + model.Add(ExcludeCurrentSolutionAndBacktrack()); + } + EXPECT_EQ(4, num_solutions_found); +} + +TEST(NonOverlappingRectanglesTest, CountSolutionsWithZeroAreaBoxes) { + CpModelBuilder cp_model; + IntVar v1 = cp_model.NewIntVar({1, 2}); + IntVar v2 = cp_model.NewIntVar({0, 1}); + IntervalVar x1 = cp_model.NewIntervalVar(2, v2, 2 + v2); + IntervalVar x2 = cp_model.NewFixedSizeIntervalVar(1, 2); + IntervalVar y1 = cp_model.NewIntervalVar(1, v1, v1 + 1); + IntervalVar y2 = cp_model.NewFixedSizeIntervalVar(2, 0); + NoOverlap2DConstraint diffn = cp_model.AddNoOverlap2D(); + diffn.AddRectangle(x1, y1); + diffn.AddRectangle(x2, y2); + + Model model; + model.Add(NewSatParameters("enumerate_all_solutions:true")); + int count = 0; + model.Add( + NewFeasibleSolutionObserver([&count](const CpSolverResponse& response) { + LOG(INFO) << absl::StrJoin(response.solution(), " "); + ++count; + })); + const CpSolverResponse response = SolveCpModel(cp_model.Build(), &model); + EXPECT_EQ(response.status(), CpSolverStatus::OPTIMAL); + EXPECT_EQ(count, 2); +} + +} // namespace +} // namespace sat +} // namespace operations_research diff --git a/ortools/sat/diffn_util_test.cc b/ortools/sat/diffn_util_test.cc new file mode 100644 index 0000000000..508ac031be --- /dev/null +++ b/ortools/sat/diffn_util_test.cc @@ -0,0 +1,960 @@ +// Copyright 2010-2024 Google LLC +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "ortools/sat/diffn_util.h" + +#include +#include +#include +#include +#include + +#include "absl/algorithm/container.h" +#include "absl/log/check.h" +#include "absl/random/bit_gen_ref.h" +#include "absl/random/distributions.h" +#include "absl/random/random.h" +#include "absl/strings/str_join.h" +#include "absl/types/span.h" +#include "benchmark/benchmark.h" +#include "gtest/gtest.h" +#include "ortools/base/gmock.h" +#include "ortools/base/logging.h" +#include "ortools/graph/connected_components.h" +#include "ortools/sat/2d_orthogonal_packing_testing.h" +#include "ortools/sat/integer.h" +#include "ortools/util/strong_integers.h" + +namespace operations_research { +namespace sat { +namespace { + +using ::testing::ElementsAre; +using ::testing::ElementsAreArray; +using ::testing::UnorderedElementsAre; +using ::testing::UnorderedElementsAreArray; + +TEST(GetOverlappingRectangleComponentsTest, NoComponents) { + EXPECT_TRUE(GetOverlappingRectangleComponents({}, {}).empty()); + IntegerValue zero(0); + IntegerValue two(2); + IntegerValue four(4); + EXPECT_TRUE(GetOverlappingRectangleComponents( + {{zero, two, zero, two}, {two, four, two, four}}, {}) + .empty()); + std::vector first = {0}; + EXPECT_TRUE(GetOverlappingRectangleComponents( + {{zero, two, zero, two}, {two, four, two, four}}, + absl::MakeSpan(first)) + .empty()); + std::vector both = {0, 1}; + EXPECT_TRUE(GetOverlappingRectangleComponents( + {{zero, two, zero, two}, {two, four, two, four}}, + absl::MakeSpan(both)) + .empty()); + EXPECT_TRUE(GetOverlappingRectangleComponents( + {{zero, two, zero, two}, {two, four, zero, two}}, + absl::MakeSpan(both)) + .empty()); + EXPECT_TRUE(GetOverlappingRectangleComponents( + {{zero, two, zero, two}, {zero, two, two, four}}, + absl::MakeSpan(both)) + .empty()); +} + +TEST(GetOverlappingRectangleComponentsTest, ComponentAndActive) { + EXPECT_TRUE(GetOverlappingRectangleComponents({}, {}).empty()); + IntegerValue zero(0); + IntegerValue one(1); + IntegerValue two(2); + IntegerValue three(3); + IntegerValue four(4); + + std::vector all = {0, 1, 2}; + const auto& components = GetOverlappingRectangleComponents( + {{zero, two, zero, two}, {zero, two, one, three}, {zero, two, two, four}}, + absl::MakeSpan(all)); + ASSERT_EQ(1, components.size()); + EXPECT_EQ(3, components[0].size()); + + std::vector only_two = {0, 2}; + EXPECT_TRUE(GetOverlappingRectangleComponents({{zero, two, zero, two}, + {zero, two, one, three}, + {zero, two, two, four}}, + absl::MakeSpan(only_two)) + .empty()); +} + +TEST(AnalyzeIntervalsTest, Random) { + // Generate a random set of intervals until the first conflict. We are in n^5! + absl::BitGen random; + const int64_t size = 20; + std::vector rectangles; + std::vector energies; + std::vector boxes; + for (int i = 0; i < 40; ++i) { + Rectangle box; + box.x_min = IntegerValue(absl::Uniform(random, 0, size)); + box.x_max = + IntegerValue(absl::Uniform(random, box.x_min.value() + 1, size + 1)); + box.y_min = IntegerValue(absl::Uniform(random, 0, size)); + box.y_max = + IntegerValue(absl::Uniform(random, box.y_min.value() + 1, size + 1)); + rectangles.push_back(box); + boxes.push_back(i); + energies.push_back(IntegerValue(absl::Uniform( + random, 1, (box.x_max - box.x_min + 1).value())) * + IntegerValue(absl::Uniform( + random, 1, (box.y_max - box.y_min + 1).value()))); + + LOG(INFO) << i << " " << box << " energy:" << energies.back(); + Rectangle conflict; + if (!BoxesAreInEnergyConflict(rectangles, energies, boxes, &conflict)) { + continue; + } + + LOG(INFO) << "Conflict! " << conflict; + + // Make sure whatever filter we do, we do not remove the conflict. + absl::Span s = absl::MakeSpan(boxes); + IntegerValue threshold_x = kMaxIntegerValue; + IntegerValue threshold_y = kMaxIntegerValue; + for (int i = 0; i < 4; ++i) { + if (!AnalyzeIntervals(/*transpose=*/i % 2 == 1, s, rectangles, energies, + &threshold_x, &threshold_y)) { + LOG(INFO) << "Detected by analyse."; + return; + } + s = FilterBoxesAndRandomize(rectangles, s, threshold_x, threshold_y, + random); + LOG(INFO) << "Filtered size: " << s.size() << " x<=" << threshold_x + << " y<=" << threshold_y; + ASSERT_TRUE(BoxesAreInEnergyConflict(rectangles, energies, s)); + } + + break; + } +} + +TEST(FilterBoxesThatAreTooLargeTest, Empty) { + std::vector r; + std::vector energies; + std::vector boxes; + EXPECT_TRUE( + FilterBoxesThatAreTooLarge(r, energies, absl::MakeSpan(boxes)).empty()); +} + +TEST(FilterBoxesThatAreTooLargeTest, BasicTest) { + int num_boxes(3); + std::vector r(num_boxes); + std::vector energies(num_boxes, IntegerValue(25)); + std::vector boxes{0, 1, 2}; + + r[0] = {IntegerValue(0), IntegerValue(5), IntegerValue(0), IntegerValue(5)}; + r[1] = {IntegerValue(0), IntegerValue(10), IntegerValue(0), IntegerValue(10)}; + r[2] = {IntegerValue(0), IntegerValue(6), IntegerValue(0), IntegerValue(6)}; + + EXPECT_THAT(FilterBoxesThatAreTooLarge(r, energies, absl::MakeSpan(boxes)), + ElementsAre(0, 2)); +} + +TEST(ConstructOverlappingSetsTest, BasicTest) { + std::vector> result{{3}}; // To be sure we clear. + + // --------------------0 + // --------1 --------2 + // ------------3 + // ------4 + std::vector intervals{{0, IntegerValue(0), IntegerValue(10)}, + {1, IntegerValue(0), IntegerValue(4)}, + {2, IntegerValue(6), IntegerValue(10)}, + {3, IntegerValue(2), IntegerValue(8)}, + {4, IntegerValue(3), IntegerValue(6)}}; + + // Note that the order is deterministic, but not sorted. + ConstructOverlappingSets(/*already_sorted=*/false, &intervals, &result); + EXPECT_THAT(result, ElementsAre(UnorderedElementsAre(0, 1, 3, 4), + UnorderedElementsAre(3, 0, 2))); +} + +TEST(ConstructOverlappingSetsTest, OneSet) { + std::vector> result{{3}}; // To be sure we clear. + + std::vector intervals{ + {0, IntegerValue(0), IntegerValue(10)}, + {1, IntegerValue(1), IntegerValue(10)}, + {2, IntegerValue(2), IntegerValue(10)}, + {3, IntegerValue(3), IntegerValue(10)}, + {4, IntegerValue(4), IntegerValue(10)}}; + + ConstructOverlappingSets(/*already_sorted=*/false, &intervals, &result); + EXPECT_THAT(result, ElementsAre(ElementsAre(0, 1, 2, 3, 4))); +} + +TEST(GetOverlappingIntervalComponentsTest, BasicTest) { + std::vector> components{{3}}; // To be sure we clear. + + std::vector intervals{{0, IntegerValue(0), IntegerValue(3)}, + {1, IntegerValue(2), IntegerValue(4)}, + {2, IntegerValue(4), IntegerValue(7)}, + {3, IntegerValue(8), IntegerValue(10)}, + {4, IntegerValue(5), IntegerValue(9)}}; + + GetOverlappingIntervalComponents(&intervals, &components); + EXPECT_THAT(components, ElementsAre(ElementsAre(0, 1), ElementsAre(2, 4, 3))); +} + +TEST(GetOverlappingIntervalComponentsAndArticulationPointsTest, + WithWeirdIndicesAndSomeCornerCases) { + // Here are our intervals: 2======5 7====9 + // They are indexed from top to 0===2 4=====7 8======11 + // bottom, from left to right, 1===3 5=6 7=8 + // starting at 10. + std::vector intervals{ + {10, IntegerValue(2), IntegerValue(5)}, + {11, IntegerValue(7), IntegerValue(9)}, + {12, IntegerValue(0), IntegerValue(2)}, + {13, IntegerValue(4), IntegerValue(7)}, + {14, IntegerValue(8), IntegerValue(11)}, + {15, IntegerValue(1), IntegerValue(3)}, + {16, IntegerValue(5), IntegerValue(6)}, + {17, IntegerValue(7), IntegerValue(8)}, + }; + + std::vector> components; + GetOverlappingIntervalComponents(&intervals, &components); + EXPECT_THAT(components, ElementsAre(ElementsAre(12, 15, 10, 13, 16), + ElementsAre(17, 11, 14))); + + EXPECT_THAT(GetIntervalArticulationPoints(&intervals), + ElementsAre(15, 10, 13, 11)); +} + +std::vector GenerateRandomIntervalVector( + absl::BitGenRef random, int num_intervals) { + std::vector intervals; + intervals.reserve(num_intervals); + const int64_t interval_domain = + absl::LogUniform(random, 1, std::numeric_limits::max()); + const int64_t max_interval_length = absl::Uniform( + random, std::max(1, interval_domain / (2 * num_intervals + 1)), + interval_domain); + for (int i = 0; i < num_intervals; ++i) { + const int64_t start = absl::Uniform(random, 0, interval_domain); + const int64_t max_length = + std::min(interval_domain - start, max_interval_length); + const int64_t end = + start + absl::Uniform(absl::IntervalClosed, random, 1, max_length); + intervals.push_back( + IndexedInterval{i, IntegerValue(start), IntegerValue(end)}); + } + return intervals; +} + +std::vector> GetOverlappingIntervalComponentsBruteForce( + const std::vector& intervals) { + // Build the adjacency list. + std::vector> adj(intervals.size()); + for (int i = 1; i < intervals.size(); ++i) { + for (int j = 0; j < i; ++j) { + if (std::max(intervals[i].start, intervals[j].start) < + std::min(intervals[i].end, intervals[j].end)) { + adj[i].push_back(j); + adj[j].push_back(i); + } + } + } + std::vector component_indices = + util::GetConnectedComponents(intervals.size(), adj); + if (component_indices.empty()) return {}; + // Transform that into the expected output: a vector of components. + std::vector> components( + *absl::c_max_element(component_indices) + 1); + for (int i = 0; i < intervals.size(); ++i) { + components[component_indices[i]].push_back(i); + } + // Sort the components by start, like GetOverlappingIntervalComponents(). + absl::c_sort(components, [&intervals](const std::vector& c1, + const std::vector& c2) { + CHECK(!c1.empty() && !c2.empty()); + return intervals[c1[0]].start < intervals[c2[0]].start; + }); + // Inside each component, the intervals should be sorted, too. + // Moreover, we need to convert our indices to IntervalIndex.index. + for (std::vector& component : components) { + absl::c_sort(component, [&intervals](int i, int j) { + return IndexedInterval::ComparatorByStartThenEndThenIndex()(intervals[i], + intervals[j]); + }); + for (int& index : component) index = intervals[index].index; + } + return components; +} + +TEST(GetOverlappingIntervalComponentsTest, RandomizedStressTest) { + // Test duration as of 2021-06: .6s in fastbuild, .3s in opt. + constexpr int kNumTests = 10000; + absl::BitGen random; + for (int test = 0; test < kNumTests; ++test) { + const int num_intervals = absl::Uniform(random, 0, 16); + std::vector intervals = + GenerateRandomIntervalVector(random, num_intervals); + const std::vector intervals_copy = intervals; + std::vector> components; + GetOverlappingIntervalComponents(&intervals, &components); + ASSERT_THAT( + components, + ElementsAreArray(GetOverlappingIntervalComponentsBruteForce(intervals))) + << test << " " << absl::StrJoin(intervals_copy, ","); + // Also verify that the function only altered the order of "intervals". + EXPECT_THAT(intervals, UnorderedElementsAreArray(intervals_copy)); + ASSERT_FALSE(HasFailure()) + << test << " " << absl::StrJoin(intervals_copy, ","); + } +} + +TEST(GetIntervalArticulationPointsTest, RandomizedStressTest) { + // THIS TEST ASSUMES THAT GetOverlappingIntervalComponents() IS CORRECT. + // -> don't look at it if GetOverlappingIntervalComponentsTest.StressTest + // fails, and rather investigate that other test first. + + auto get_num_components = [](const std::vector& intervals) { + std::vector mutable_intervals = intervals; + std::vector> components; + GetOverlappingIntervalComponents(&mutable_intervals, &components); + return components.size(); + }; + // Test duration as of 2021-06: 1s in fastbuild, .4s in opt. + constexpr int kNumTests = 10000; + absl::BitGen random; + for (int test = 0; test < kNumTests; ++test) { + const int num_intervals = absl::Uniform(random, 0, 16); + const std::vector intervals = + GenerateRandomIntervalVector(random, num_intervals); + const int baseline_num_components = get_num_components(intervals); + + // Compute the expected articulation points: try removing each interval + // individually and check whether there are more components if we do. + std::vector expected_articulation_points; + for (int i = 0; i < num_intervals; ++i) { + std::vector tmp_intervals = intervals; + tmp_intervals.erase(tmp_intervals.begin() + i); + if (get_num_components(tmp_intervals) > baseline_num_components) { + expected_articulation_points.push_back(i); + } + } + // Sort the articulation points by start, and replace them by their + // corresponding IndexedInterval.index. + absl::c_sort(expected_articulation_points, [&intervals](int i, int j) { + return intervals[i].start < intervals[j].start; + }); + for (int& idx : expected_articulation_points) idx = intervals[idx].index; + + // Compare our function with the expected values. + std::vector mutable_intervals = intervals; + EXPECT_THAT(GetIntervalArticulationPoints(&mutable_intervals), + ElementsAreArray(expected_articulation_points)); + + // Also verify that the function only altered the order of "intervals". + EXPECT_THAT(mutable_intervals, UnorderedElementsAreArray(intervals)); + ASSERT_FALSE(HasFailure()) << test << " " << absl::StrJoin(intervals, ","); + } +} + +TEST(CapacityProfileTest, BasicApi) { + CapacityProfile profile; + profile.AddRectangle(IntegerValue(2), IntegerValue(6), IntegerValue(0), + IntegerValue(2)); + profile.AddRectangle(IntegerValue(4), IntegerValue(12), IntegerValue(0), + IntegerValue(1)); + profile.AddRectangle(IntegerValue(4), IntegerValue(8), IntegerValue(0), + IntegerValue(5)); + std::vector result; + profile.BuildResidualCapacityProfile(&result); + EXPECT_THAT( + result, + ElementsAre( + CapacityProfile::Rectangle(kMinIntegerValue, IntegerValue(0)), + CapacityProfile::Rectangle(IntegerValue(2), IntegerValue(2)), + CapacityProfile::Rectangle(IntegerValue(4), IntegerValue(5)), + CapacityProfile::Rectangle(IntegerValue(8), IntegerValue(1)), + CapacityProfile::Rectangle(IntegerValue(12), IntegerValue(0)))); + + // We query it twice to test that it can be done and that the result is not + // messed up. + profile.BuildResidualCapacityProfile(&result); + EXPECT_THAT( + result, + ElementsAre( + CapacityProfile::Rectangle(kMinIntegerValue, IntegerValue(0)), + CapacityProfile::Rectangle(IntegerValue(2), IntegerValue(2)), + CapacityProfile::Rectangle(IntegerValue(4), IntegerValue(5)), + CapacityProfile::Rectangle(IntegerValue(8), IntegerValue(1)), + CapacityProfile::Rectangle(IntegerValue(12), IntegerValue(0)))); + EXPECT_EQ(IntegerValue(2 * 2 + 4 * 5 + 4 * 1), profile.GetBoundingArea()); +} + +TEST(CapacityProfileTest, ProfileWithMandatoryPart) { + CapacityProfile profile; + profile.AddRectangle(IntegerValue(2), IntegerValue(6), IntegerValue(0), + IntegerValue(2)); + profile.AddRectangle(IntegerValue(4), IntegerValue(12), IntegerValue(0), + IntegerValue(1)); + profile.AddRectangle(IntegerValue(4), IntegerValue(8), IntegerValue(0), + IntegerValue(5)); + profile.AddMandatoryConsumption(IntegerValue(5), IntegerValue(10), + IntegerValue(1)); + std::vector result; + + // Add a dummy rectangle to test the result is cleared. result.push_bask(..); + result.push_back( + CapacityProfile::Rectangle(IntegerValue(2), IntegerValue(3))); + + profile.BuildResidualCapacityProfile(&result); + EXPECT_THAT( + result, + ElementsAre( + CapacityProfile::Rectangle(kMinIntegerValue, IntegerValue(0)), + CapacityProfile::Rectangle(IntegerValue(2), IntegerValue(2)), + CapacityProfile::Rectangle(IntegerValue(4), IntegerValue(5)), + CapacityProfile::Rectangle(IntegerValue(5), IntegerValue(4)), + CapacityProfile::Rectangle(IntegerValue(8), IntegerValue(0)), + CapacityProfile::Rectangle(IntegerValue(10), IntegerValue(1)), + CapacityProfile::Rectangle(IntegerValue(12), IntegerValue(0)))); + + // The bounding area should not be impacted by the mandatory consumption. + EXPECT_EQ(IntegerValue(2 * 2 + 4 * 5 + 4 * 1), profile.GetBoundingArea()); +} + +IntegerValue NaiveSmallest1DIntersection(IntegerValue range_min, + IntegerValue range_max, + IntegerValue size, + IntegerValue interval_min, + IntegerValue interval_max) { + IntegerValue min_intersection = std::numeric_limits::max(); + for (IntegerValue start = range_min; start + size <= range_max; ++start) { + // Interval is [start, start + size] + const IntegerValue intersection_start = std::max(start, interval_min); + const IntegerValue intersection_end = std::min(start + size, interval_max); + const IntegerValue intersection_length = + std::max(IntegerValue(0), intersection_end - intersection_start); + min_intersection = std::min(min_intersection, intersection_length); + } + return min_intersection; +} + +TEST(Smallest1DIntersectionTest, BasicTest) { + absl::BitGen random; + const int64_t max_size = 20; + constexpr int num_runs = 400; + for (int k = 0; k < num_runs; k++) { + const IntegerValue range_min = + IntegerValue(absl::Uniform(random, 0, max_size - 1)); + const IntegerValue range_max = + IntegerValue(absl::Uniform(random, range_min.value() + 1, max_size)); + const IntegerValue size = + absl::Uniform(random, 1, range_max.value() - range_min.value()); + + const IntegerValue interval_min = + IntegerValue(absl::Uniform(random, 0, max_size - 1)); + const IntegerValue interval_max = + IntegerValue(absl::Uniform(random, interval_min.value() + 1, max_size)); + EXPECT_EQ(NaiveSmallest1DIntersection(range_min, range_max, size, + interval_min, interval_max), + Smallest1DIntersection(range_min, range_max, size, interval_min, + interval_max)); + } +} + +TEST(RectangleTest, BasicTest) { + Rectangle r1 = {.x_min = 0, .x_max = 2, .y_min = 0, .y_max = 2}; + Rectangle r2 = {.x_min = 1, .x_max = 3, .y_min = 1, .y_max = 3}; + EXPECT_EQ(r1.Intersect(r2), + Rectangle({.x_min = 1, .x_max = 2, .y_min = 1, .y_max = 2})); +} + +TEST(RectangleTest, RandomSetDifferenceTest) { + absl::BitGen random; + const int64_t size = 20; + constexpr int num_runs = 400; + for (int k = 0; k < num_runs; k++) { + Rectangle ret[2]; + for (int i = 0; i < 2; ++i) { + ret[i].x_min = IntegerValue(absl::Uniform(random, 0, size - 1)); + ret[i].x_max = + ret[i].x_min + IntegerValue(absl::Uniform(random, 1, size - 1)); + ret[i].y_min = IntegerValue(absl::Uniform(random, 0, size - 1)); + ret[i].y_max = + ret[i].y_min + IntegerValue(absl::Uniform(random, 1, size - 1)); + } + auto set_diff = ret[0].SetDifference(ret[1]); + EXPECT_EQ(set_diff.empty(), ret[0].Intersect(ret[1]) == ret[0]); + IntegerValue diff_area = 0; + for (int i = 0; i < set_diff.size(); ++i) { + for (int j = i + 1; j < set_diff.size(); ++j) { + EXPECT_TRUE(set_diff[i].IsDisjoint(set_diff[j])); + } + EXPECT_NE(set_diff[i].Intersect(ret[0]), Rectangle::GetEmpty()); + EXPECT_EQ(set_diff[i].Intersect(ret[1]), Rectangle::GetEmpty()); + IntegerValue area = set_diff[i].Area(); + EXPECT_GT(area, 0); + diff_area += area; + } + EXPECT_EQ(ret[0].IntersectArea(ret[1]) + diff_area, ret[0].Area()); + } +} + +TEST(GetMinimumOverlapTest, BasicTest) { + RectangleInRange range_ret = { + .bounding_area = {.x_min = 0, .x_max = 15, .y_min = 0, .y_max = 15}, + .x_size = 10, + .y_size = 10}; + + // Minimum intersection is when the item is in the bottom-left corner of the + // allowed space. + Rectangle r = {.x_min = 3, .x_max = 30, .y_min = 3, .y_max = 30}; + EXPECT_EQ(range_ret.GetMinimumIntersection(r).Area(), 7 * 7); + EXPECT_EQ(range_ret.GetAtCorner(RectangleInRange::Corner::BOTTOM_LEFT), + Rectangle({.x_min = 0, .x_max = 10, .y_min = 0, .y_max = 10})); + EXPECT_EQ(range_ret.GetAtCorner(RectangleInRange::Corner::BOTTOM_LEFT) + .Intersect(r) + .Area(), + 7 * 7); + EXPECT_EQ(r.Intersect( + Rectangle({.x_min = 0, .x_max = 10, .y_min = 0, .y_max = 10})), + Rectangle({.x_min = 3, .x_max = 10, .y_min = 3, .y_max = 10})); + + RectangleInRange bigger = + RectangleInRange::BiggestWithMinIntersection(r, range_ret, 7, 7); + // This should be a broader range but don't increase the minimum intersection. + EXPECT_EQ(bigger.GetMinimumIntersection(r).Area(), 7 * 7); + for (const auto& pos : + {RectangleInRange::Corner::BOTTOM_LEFT, + RectangleInRange::Corner::TOP_LEFT, RectangleInRange::Corner::TOP_RIGHT, + RectangleInRange::Corner::BOTTOM_RIGHT}) { + EXPECT_EQ(bigger.GetAtCorner(pos).Intersect(r).Area(), 7 * 7); + } + EXPECT_EQ(bigger.bounding_area.x_min, 0); + EXPECT_EQ(bigger.bounding_area.x_max, 33); + EXPECT_EQ(bigger.bounding_area.y_min, 0); + EXPECT_EQ(bigger.bounding_area.y_max, 33); + EXPECT_EQ(r.Intersect(Rectangle( + {.x_min = 23, .x_max = 33, .y_min = 23, .y_max = 33})), + Rectangle({.x_min = 23, .x_max = 30, .y_min = 23, .y_max = 30})); + + RectangleInRange range_ret2 = { + .bounding_area = {.x_min = 0, .x_max = 105, .y_min = 0, .y_max = 120}, + .x_size = 100, + .y_size = 100}; + Rectangle r2 = {.x_min = 2, .x_max = 4, .y_min = 0, .y_max = 99}; + EXPECT_EQ(range_ret2.GetMinimumIntersection(r2), Rectangle::GetEmpty()); +} + +IntegerValue RecomputeEnergy(const Rectangle& rectangle, + const std::vector& intervals) { + IntegerValue ret = 0; + for (const RectangleInRange& range : intervals) { + const Rectangle min_intersect = range.GetMinimumIntersection(rectangle); + EXPECT_LE(min_intersect.SizeX(), range.x_size); + EXPECT_LE(min_intersect.SizeY(), range.y_size); + ret += min_intersect.Area(); + } + return ret; +} + +IntegerValue RecomputeEnergy(const ProbingRectangle& ranges) { + return RecomputeEnergy(ranges.GetCurrentRectangle(), ranges.Intervals()); +} + +void MoveAndCheck(ProbingRectangle& ranges, ProbingRectangle::Edge type) { + EXPECT_TRUE(ranges.CanShrink(type)); + const IntegerValue expected_area = + ranges.GetCurrentRectangle().Area() - ranges.GetShrinkDeltaArea(type); + const IntegerValue expected_min_energy = + ranges.GetMinimumEnergy() - ranges.GetShrinkDeltaEnergy(type); + ranges.Shrink(type); + EXPECT_EQ(ranges.GetMinimumEnergy(), RecomputeEnergy(ranges)); + EXPECT_EQ(ranges.GetMinimumEnergy(), expected_min_energy); + EXPECT_EQ(ranges.GetCurrentRectangle().Area(), expected_area); + ranges.ValidateInvariants(); +} + +TEST(ProbingRectangleTest, BasicTest) { + RectangleInRange range_ret = { + .bounding_area = {.x_min = 0, .x_max = 15, .y_min = 0, .y_max = 13}, + .x_size = 10, + .y_size = 8}; + RectangleInRange range_ret2 = { + .bounding_area = {.x_min = 1, .x_max = 8, .y_min = 7, .y_max = 14}, + .x_size = 5, + .y_size = 5}; + + std::vector ranges_vec = {range_ret, range_ret2}; + ProbingRectangle ranges(ranges_vec); + EXPECT_EQ(ranges.GetCurrentRectangle(), + Rectangle({.x_min = 0, .x_max = 15, .y_min = 0, .y_max = 14})); + + // Start with the full bounding box, thus both are fully inside. + EXPECT_EQ(ranges.GetMinimumEnergy(), 10 * 8 + 5 * 5); + + EXPECT_EQ(ranges.GetMinimumEnergy(), RecomputeEnergy(ranges)); + + MoveAndCheck(ranges, ProbingRectangle::Edge::LEFT); + EXPECT_EQ(ranges.GetCurrentRectangle(), + Rectangle({.x_min = 1, .x_max = 15, .y_min = 0, .y_max = 14})); + + MoveAndCheck(ranges, ProbingRectangle::Edge::LEFT); + EXPECT_EQ(ranges.GetCurrentRectangle(), + Rectangle({.x_min = 3, .x_max = 15, .y_min = 0, .y_max = 14})); + + MoveAndCheck(ranges, ProbingRectangle::Edge::LEFT); + EXPECT_EQ(ranges.GetCurrentRectangle(), + Rectangle({.x_min = 5, .x_max = 15, .y_min = 0, .y_max = 14})); + + MoveAndCheck(ranges, ProbingRectangle::Edge::LEFT); + EXPECT_EQ(ranges.GetCurrentRectangle(), + Rectangle({.x_min = 6, .x_max = 15, .y_min = 0, .y_max = 14})); + + MoveAndCheck(ranges, ProbingRectangle::Edge::TOP); + EXPECT_EQ(ranges.GetCurrentRectangle(), + Rectangle({.x_min = 6, .x_max = 15, .y_min = 0, .y_max = 13})); + + MoveAndCheck(ranges, ProbingRectangle::Edge::TOP); + EXPECT_EQ(ranges.GetCurrentRectangle(), + Rectangle({.x_min = 6, .x_max = 15, .y_min = 0, .y_max = 8})); + + MoveAndCheck(ranges, ProbingRectangle::Edge::TOP); + EXPECT_EQ(ranges.GetCurrentRectangle(), + Rectangle({.x_min = 6, .x_max = 15, .y_min = 0, .y_max = 5})); +} + +void ReduceUntilDone(ProbingRectangle& ranges, absl::BitGen& random) { + static constexpr ProbingRectangle::Edge kAllEdgesArr[] = { + ProbingRectangle::Edge::LEFT, + ProbingRectangle::Edge::TOP, + ProbingRectangle::Edge::RIGHT, + ProbingRectangle::Edge::BOTTOM, + }; + static constexpr absl::Span kAllMoveTypes( + kAllEdgesArr); + while (!ranges.IsMinimal()) { + ProbingRectangle::Edge type = + kAllMoveTypes.at(absl::Uniform(random, 0, (int)kAllMoveTypes.size())); + if (!ranges.CanShrink(type)) continue; + MoveAndCheck(ranges, type); + } +} + +// This function will find the conflicts for rectangles that have as coordinates +// for the edges one of {min, min + size, max - size, max} for every possible +// item that is at least partially inside the rectangle. Note that we might not +// detect a conflict even if there is one by looking only at those rectangles, +// see the ProbingRectangleTest.CounterExample unit test for a concrete example. +std::optional FindRectangleWithEnergyTooLargeExhaustive( + const std::vector& box_ranges) { + int num_boxes = box_ranges.size(); + std::vector x; + x.reserve(num_boxes * 4); + std::vector y; + y.reserve(num_boxes * 4); + for (const auto& box : box_ranges) { + x.push_back(box.bounding_area.x_min); + x.push_back(box.bounding_area.x_min + box.x_size); + x.push_back(box.bounding_area.x_max - box.x_size); + x.push_back(box.bounding_area.x_max); + y.push_back(box.bounding_area.y_min); + y.push_back(box.bounding_area.y_min + box.y_size); + y.push_back(box.bounding_area.y_max - box.y_size); + y.push_back(box.bounding_area.y_max); + } + std::sort(x.begin(), x.end()); + std::sort(y.begin(), y.end()); + x.erase(std::unique(x.begin(), x.end()), x.end()); + y.erase(std::unique(y.begin(), y.end()), y.end()); + for (int i = 0; i < x.size(); ++i) { + for (int j = i + 1; j < x.size(); ++j) { + for (int k = 0; k < y.size(); ++k) { + for (int l = k + 1; l < y.size(); ++l) { + IntegerValue used_energy = 0; + Rectangle rect = { + .x_min = x[i], .x_max = x[j], .y_min = y[k], .y_max = y[l]}; + for (const auto& box : box_ranges) { + auto intersection = box.GetMinimumIntersection(rect); + used_energy += intersection.Area(); + } + if (used_energy > rect.Area()) { + std::vector items_inside; + for (const auto& box : box_ranges) { + if (box.GetMinimumIntersectionArea(rect) > 0) { + items_inside.push_back(box); + } + } + if (items_inside.size() == num_boxes) { + return rect; + } else { + // Call it again after removing items that are outside. + auto try2 = + FindRectangleWithEnergyTooLargeExhaustive(items_inside); + if (try2.has_value()) { + return try2; + } + } + } + } + } + } + } + return std::nullopt; +} + +// This function should give exactly the same result as the +// `FindRectangleWithEnergyTooLargeExhaustive` above, but exercising the +// `ProbingRectangle` class. +std::optional FindRectangleWithEnergyTooLargeWithProbingRectangle( + std::vector& box_ranges) { + int left_shrinks = 0; + int right_shrinks = 0; + int top_shrinks = 0; + + ProbingRectangle ranges(box_ranges); + + while (true) { + // We want to do the equivalent of what + // `FindRectangleWithEnergyTooLargeExhaustive` does: for every + // left/right/top coordinates, try all possible bottom for conflicts. But + // since we cannot fix the coordinates with ProbingRectangle, we fix the + // number of shrinks instead. + ranges.Reset(); + for (int i = 0; i < left_shrinks; i++) { + CHECK(ranges.CanShrink(ProbingRectangle::Edge::LEFT)); + ranges.Shrink(ProbingRectangle::Edge::LEFT); + } + const bool left_end = !ranges.CanShrink(ProbingRectangle::Edge::LEFT); + for (int i = 0; i < top_shrinks; i++) { + CHECK(ranges.CanShrink(ProbingRectangle::Edge::TOP)); + ranges.Shrink(ProbingRectangle::Edge::TOP); + } + const bool top_end = !ranges.CanShrink(ProbingRectangle::Edge::TOP); + for (int i = 0; i < right_shrinks; i++) { + CHECK(ranges.CanShrink(ProbingRectangle::Edge::RIGHT)); + ranges.Shrink(ProbingRectangle::Edge::RIGHT); + } + const bool right_end = !ranges.CanShrink(ProbingRectangle::Edge::RIGHT); + if (ranges.GetMinimumEnergy() > ranges.GetCurrentRectangleArea()) { + return ranges.GetCurrentRectangle(); + } + while (ranges.CanShrink(ProbingRectangle::Edge::BOTTOM)) { + ranges.Shrink(ProbingRectangle::Edge::BOTTOM); + if (ranges.GetMinimumEnergy() > ranges.GetCurrentRectangleArea()) { + return ranges.GetCurrentRectangle(); + } + } + if (!right_end) { + right_shrinks++; + } else if (!top_end) { + top_shrinks++; + right_shrinks = 0; + } else if (!left_end) { + left_shrinks++; + top_shrinks = 0; + right_shrinks = 0; + } else { + break; + } + } + return std::nullopt; +} + +TEST(ProbingRectangleTest, Random) { + absl::BitGen random; + const int64_t size = 20; + std::vector rectangles; + int count = 0; + int comprehensive_count = 0; + constexpr int num_runs = 400; + for (int k = 0; k < num_runs; k++) { + const int num_intervals = absl::Uniform(random, 1, 20); + IntegerValue total_area = 0; + rectangles.clear(); + for (int i = 0; i < num_intervals; ++i) { + RectangleInRange& range = rectangles.emplace_back(); + range.bounding_area.x_min = IntegerValue(absl::Uniform(random, 0, size)); + range.bounding_area.x_max = IntegerValue( + absl::Uniform(random, range.bounding_area.x_min.value() + 1, size)); + range.x_size = absl::Uniform(random, 1, + range.bounding_area.x_max.value() - + range.bounding_area.x_min.value()); + + range.bounding_area.y_min = IntegerValue(absl::Uniform(random, 0, size)); + range.bounding_area.y_max = IntegerValue( + absl::Uniform(random, range.bounding_area.y_min.value() + 1, size)); + range.y_size = absl::Uniform(random, 1, + range.bounding_area.y_max.value() - + range.bounding_area.y_min.value()); + total_area += range.x_size * range.y_size; + } + auto ret = FindRectanglesWithEnergyConflictMC(rectangles, random, 1.0, 0.8); + count += !ret.conflicts.empty(); + ProbingRectangle ranges(rectangles); + EXPECT_EQ(total_area, ranges.GetMinimumEnergy()); + const bool has_possible_conflict = + FindRectangleWithEnergyTooLargeExhaustive(rectangles).has_value(); + if (has_possible_conflict) { + EXPECT_TRUE( + FindRectangleWithEnergyTooLargeWithProbingRectangle(rectangles) + .has_value()); + } + ReduceUntilDone(ranges, random); + comprehensive_count += has_possible_conflict; + } + LOG(INFO) << count << "/" << num_runs << " had an heuristic (out of " + << comprehensive_count << " possible)."; +} + +// Counterexample for proposition 5.4 of Clautiaux, François, et al. "A new +// constraint programming approach for the orthogonal packing problem." +// Computers & Operations Research 35.3 (2008): 944-959. +TEST(ProbingRectangleTest, CounterExample) { + const std::vector rectangles = { + {.bounding_area = {.x_min = 6, .x_max = 10, .y_min = 11, .y_max = 16}, + .x_size = 3, + .y_size = 2}, + {.bounding_area = {.x_min = 5, .x_max = 17, .y_min = 12, .y_max = 13}, + .x_size = 2, + .y_size = 1}, + {.bounding_area = {.x_min = 15, .x_max = 18, .y_min = 11, .y_max = 14}, + .x_size = 1, + .y_size = 1}, + {.bounding_area = {.x_min = 4, .x_max = 14, .y_min = 4, .y_max = 19}, + .x_size = 8, + .y_size = 7}, + {.bounding_area = {.x_min = 0, .x_max = 16, .y_min = 5, .y_max = 18}, + .x_size = 8, + .y_size = 9}, + {.bounding_area = {.x_min = 4, .x_max = 14, .y_min = 12, .y_max = 16}, + .x_size = 5, + .y_size = 1}, + {.bounding_area = {.x_min = 1, .x_max = 16, .y_min = 12, .y_max = 18}, + .x_size = 6, + .y_size = 1}, + {.bounding_area = {.x_min = 5, .x_max = 19, .y_min = 14, .y_max = 15}, + .x_size = 2, + .y_size = 1}}; + const Rectangle rect = {.x_min = 6, .x_max = 10, .y_min = 7, .y_max = 16}; + // The only other possible rectangle with a conflict is x(7..9), y(7..16), + // but none of {y_min, y_min + y_size, y_max - y_size, y_max} is equal to 7. + const IntegerValue energy = RecomputeEnergy(rect, rectangles); + EXPECT_GT(energy, rect.Area()); + EXPECT_FALSE( + FindRectangleWithEnergyTooLargeExhaustive(rectangles).has_value()); +} + +void BM_FindRectangles(benchmark::State& state) { + absl::BitGen random; + std::vector> problems; + static constexpr int kNumProblems = 20; + for (int i = 0; i < kNumProblems; i++) { + problems.push_back(MakeItemsFromRectangles( + GenerateNonConflictingRectangles(state.range(0), random), + state.range(1) / 100.0, random)); + } + int idx = 0; + for (auto s : state) { + CHECK(FindRectanglesWithEnergyConflictMC(problems[idx], random, 1.0, 0.8) + .conflicts.empty()); + ++idx; + if (idx == kNumProblems) idx = 0; + } +} + +BENCHMARK(BM_FindRectangles) + ->ArgPair(5, 1) + ->ArgPair(10, 1) + ->ArgPair(20, 1) + ->ArgPair(30, 1) + ->ArgPair(40, 1) + ->ArgPair(80, 1) + ->ArgPair(100, 1) + ->ArgPair(200, 1) + ->ArgPair(1000, 1) + ->ArgPair(10000, 1) + ->ArgPair(5, 100) + ->ArgPair(10, 100) + ->ArgPair(20, 100) + ->ArgPair(30, 100) + ->ArgPair(40, 100) + ->ArgPair(80, 100) + ->ArgPair(100, 100) + ->ArgPair(200, 100) + ->ArgPair(1000, 100) + ->ArgPair(10000, 100); + +TEST(FindPairwiseRestrictionsTest, Random) { + absl::BitGen random; + constexpr int num_runs = 400; + for (int k = 0; k < num_runs; k++) { + const int num_rectangles = absl::Uniform(random, 1, 20); + const std::vector rectangles = + GenerateNonConflictingRectangles(num_rectangles, random); + const std::vector items = + GenerateItemsRectanglesWithNoPairwiseConflict( + rectangles, absl::Uniform(random, 0, 1.0), random); + std::vector results; + AppendPairwiseRestrictions(items, &results); + for (const PairwiseRestriction& result : results) { + EXPECT_NE(result.type, + PairwiseRestriction::PairwiseRestrictionType::CONFLICT); + } + } +} + +void BM_FindPairwiseRestrictions(benchmark::State& state) { + absl::BitGen random; + // In the vast majority of the cases the propagator doesn't find any pairwise + // condition to propagate. Thus we choose to benchmark for this particular + // case. + const std::vector items = + GenerateItemsRectanglesWithNoPairwisePropagation( + state.range(0), state.range(1) / 100.0, random); + std::vector results; + for (auto s : state) { + AppendPairwiseRestrictions(items, &results); + CHECK(results.empty()); + } +} + +BENCHMARK(BM_FindPairwiseRestrictions) + ->ArgPair(5, 1) + ->ArgPair(10, 1) + ->ArgPair(20, 1) + ->ArgPair(30, 1) + ->ArgPair(40, 1) + ->ArgPair(80, 1) + ->ArgPair(100, 1) + ->ArgPair(200, 1) + ->ArgPair(1000, 1) + ->ArgPair(10000, 1) + ->ArgPair(5, 100) + ->ArgPair(10, 100) + ->ArgPair(20, 100) + ->ArgPair(30, 100) + ->ArgPair(40, 100) + ->ArgPair(80, 100) + ->ArgPair(100, 100) + ->ArgPair(200, 100) + ->ArgPair(1000, 100) + ->ArgPair(10000, 100); + +} // namespace +} // namespace sat +} // namespace operations_research diff --git a/ortools/sat/disjunctive_test.cc b/ortools/sat/disjunctive_test.cc new file mode 100644 index 0000000000..bf58e2b915 --- /dev/null +++ b/ortools/sat/disjunctive_test.cc @@ -0,0 +1,527 @@ +// Copyright 2010-2024 Google LLC +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "ortools/sat/disjunctive.h" + +#include +#include +#include +#include +#include +#include +#include + +#include "absl/log/check.h" +#include "absl/random/bit_gen_ref.h" +#include "absl/random/random.h" +#include "absl/strings/str_cat.h" +#include "absl/strings/str_join.h" +#include "absl/types/span.h" +#include "gtest/gtest.h" +#include "ortools/base/logging.h" +#include "ortools/sat/integer.h" +#include "ortools/sat/integer_search.h" +#include "ortools/sat/intervals.h" +#include "ortools/sat/model.h" +#include "ortools/sat/precedences.h" +#include "ortools/sat/sat_base.h" +#include "ortools/sat/sat_solver.h" +#include "ortools/util/strong_integers.h" + +namespace operations_research { +namespace sat { +namespace { + +// TODO(user): Add tests for variable duration intervals! The code is trickier +// to get right in this case. + +// Macros to improve the test readability below. +#define MIN_START(v) IntegerValue(v) +#define MIN_DURATION(v) IntegerValue(v) + +TEST(TaskSetTest, AddEntry) { + TaskSet tasks(1000); + std::mt19937 random(12345); + for (int i = 0; i < 1000; ++i) { + tasks.AddEntry({i, MIN_START(absl::Uniform(random, 0, 1000)), + MIN_DURATION(absl::Uniform(random, 0, 100))}); + } + EXPECT_TRUE( + std::is_sorted(tasks.SortedTasks().begin(), tasks.SortedTasks().end())); +} + +TEST(TaskSetTest, EndMinOnEmptySet) { + TaskSet tasks(0); + int critical_index; + EXPECT_EQ(kMinIntegerValue, + tasks.ComputeEndMin(/*task_to_ignore=*/-1, &critical_index)); + EXPECT_EQ(kMinIntegerValue, tasks.ComputeEndMin()); +} + +TEST(TaskSetTest, EndMinBasicTest) { + TaskSet tasks(3); + int critical_index; + tasks.AddEntry({0, MIN_START(2), MIN_DURATION(3)}); + tasks.AddEntry({1, MIN_START(2), MIN_DURATION(3)}); + tasks.AddEntry({2, MIN_START(2), MIN_DURATION(3)}); + EXPECT_EQ(11, tasks.ComputeEndMin(/*task_to_ignore=*/-1, &critical_index)); + EXPECT_EQ(11, tasks.ComputeEndMin()); + EXPECT_EQ(0, critical_index); +} + +TEST(TaskSetTest, EndMinWithNegativeValue) { + TaskSet tasks(3); + int critical_index; + tasks.AddEntry({0, MIN_START(-5), MIN_DURATION(1)}); + tasks.AddEntry({1, MIN_START(-6), MIN_DURATION(2)}); + tasks.AddEntry({2, MIN_START(-7), MIN_DURATION(3)}); + EXPECT_EQ(-1, tasks.ComputeEndMin(/*task_to_ignore=*/-1, &critical_index)); + EXPECT_EQ(-1, tasks.ComputeEndMin()); + EXPECT_EQ(0, critical_index); +} + +TEST(TaskSetTest, EndMinLimitCase) { + TaskSet tasks(3); + int critical_index; + tasks.AddEntry({0, MIN_START(2), MIN_DURATION(3)}); + tasks.AddEntry({1, MIN_START(2), MIN_DURATION(3)}); + tasks.AddEntry({2, MIN_START(8), MIN_DURATION(5)}); + EXPECT_EQ(8, tasks.ComputeEndMin(/*task_to_ignore=*/2, &critical_index)); + EXPECT_EQ(0, critical_index); + EXPECT_EQ(13, tasks.ComputeEndMin(/*task_to_ignore=*/-1, &critical_index)); + EXPECT_EQ(2, critical_index); +} + +TEST(TaskSetTest, IgnoringTheLastEntry) { + TaskSet tasks(3); + int critical_index; + tasks.AddEntry({0, MIN_START(2), MIN_DURATION(3)}); + tasks.AddEntry({1, MIN_START(7), MIN_DURATION(3)}); + EXPECT_EQ(10, tasks.ComputeEndMin(/*task_to_ignore=*/-1, &critical_index)); + EXPECT_EQ(5, tasks.ComputeEndMin(/*task_to_ignore=*/1, &critical_index)); +} + +#define MIN_START(v) IntegerValue(v) +#define MIN_DURATION(v) IntegerValue(v) + +// Tests that the DisjunctiveConstraint propagate how expected on the +// given input. Returns false if a conflict is detected (i.e. no feasible +// solution). +struct TaskWithDuration { + int min_start; + int max_end; + int min_duration; +}; +struct Task { + int min_start; + int max_end; +}; +bool TestDisjunctivePropagation(absl::Span input, + absl::Span expected, + int expected_num_enqueues) { + Model model; + IntegerTrail* integer_trail = model.GetOrCreate(); + IntervalsRepository* intervals = model.GetOrCreate(); + + const int kStart(0); + const int kHorizon(10000); + + std::vector ids; + for (const TaskWithDuration& task : input) { + const IntervalVariable i = + model.Add(NewInterval(kStart, kHorizon, task.min_duration)); + ids.push_back(i); + std::vector no_literal_reason; + std::vector no_integer_reason; + EXPECT_TRUE(integer_trail->Enqueue( + intervals->Start(i).GreaterOrEqual(IntegerValue(task.min_start)), + no_literal_reason, no_integer_reason)); + EXPECT_TRUE( + integer_trail->Enqueue(intervals->End(i).LowerOrEqual(task.max_end), + no_literal_reason, no_integer_reason)); + } + + // Propagate properly the other bounds of the intervals. + EXPECT_TRUE(model.GetOrCreate()->Propagate()); + + const int initial_num_enqueues = integer_trail->num_enqueues(); + AddDisjunctive(ids, &model); + if (!model.GetOrCreate()->Propagate()) return false; + CHECK_EQ(input.size(), expected.size()); + for (int i = 0; i < input.size(); ++i) { + EXPECT_EQ(expected[i].min_start, + integer_trail->LowerBound(intervals->Start(ids[i]))) + << "task #" << i; + EXPECT_EQ(expected[i].max_end, + integer_trail->UpperBound(intervals->End(ids[i]))) + << "task #" << i; + } + + // The *2 is because there is one Enqueue() for the start and end variable. + EXPECT_EQ(expected_num_enqueues + initial_num_enqueues, + integer_trail->num_enqueues()); + return true; +} + +// 01234567890 +// (---- ) +// ( ------) +TEST(DisjunctiveConstraintTest, NoPropagation) { + EXPECT_TRUE(TestDisjunctivePropagation({{0, 10, 4}, {0, 10, 6}}, + {{0, 10}, {0, 10}}, 0)); +} + +// 01234567890 +// (---- ) +// ( -------) +TEST(DisjunctiveConstraintTest, Overload) { + EXPECT_FALSE(TestDisjunctivePropagation({{0, 10, 4}, {0, 10, 7}}, {}, 0)); +} + +// 01234567890123456789 +// (----- ) +// ( -----) +// ( ------ ) +TEST(DisjunctiveConstraintTest, OverloadFromVilimPhd) { + EXPECT_FALSE( + TestDisjunctivePropagation({{0, 13, 5}, {1, 14, 5}, {2, 12, 6}}, {}, 0)); +} + +// 0123456789012345678901234567890123456789 +// ( [---- ) +// (--- ) +// ( ---) +// (-----) +// +// TODO(user): The problem with this test is that the other propagators do +// propagate the same bound, but in 2 steps, whereas the edge finding do that in +// one. To properly test this, we need to add options to deactivate some of +// the propagations. +TEST(DisjunctiveConstraintTest, EdgeFindingFromVilimPhd) { + EXPECT_TRUE(TestDisjunctivePropagation( + {{4, 30, 4}, {5, 13, 3}, {5, 13, 3}, {13, 18, 5}}, + {{18, 30}, {5, 13}, {5, 13}, {13, 18}}, /*expected_num_enqueues=*/2)); +} + +// 0123456789012345678901234567890123456789 +// (----------- ) +// ( ----------) +// ( -- ] ) +TEST(DisjunctiveConstraintTest, NotLastFromVilimPhd) { + EXPECT_TRUE(TestDisjunctivePropagation({{0, 25, 11}, {1, 27, 10}, {4, 20, 2}}, + {{0, 25}, {1, 27}, {4, 17}}, 1)); +} + +// 0123456789012345678901234567890123456789 +// (----- ) +// ( -----) +// (--- ) +// [ <- the new bound for the third task. +TEST(DisjunctiveConstraintTest, DetectablePrecedenceFromVilimPhd) { + EXPECT_TRUE(TestDisjunctivePropagation({{0, 13, 5}, {1, 14, 5}, {7, 17, 3}}, + {{0, 13}, {1, 14}, {10, 17}}, 1)); +} + +TEST(DisjunctiveConstraintTest, Precedences) { + Model model; + Trail* trail = model.GetOrCreate(); + IntegerTrail* integer_trail = model.GetOrCreate(); + auto* precedences = model.GetOrCreate(); + auto* relations = model.GetOrCreate(); + auto* intervals = model.GetOrCreate(); + + const auto add_affine_coeff_one_precedence = [&](const AffineExpression e1, + const AffineExpression& e2) { + CHECK_NE(e1.var, kNoIntegerVariable); + CHECK_EQ(e1.coeff, 1); + CHECK_NE(e2.var, kNoIntegerVariable); + CHECK_EQ(e2.coeff, 1); + precedences->AddPrecedenceWithOffset(e1.var, e2.var, + e1.constant - e2.constant); + relations->Add(e1.var, e2.var, e1.constant - e2.constant); + }; + + const int kStart(0); + const int kHorizon(10000); + + std::vector ids; + ids.push_back(model.Add(NewInterval(kStart, kHorizon, 10))); + ids.push_back(model.Add(NewInterval(kStart, kHorizon, 10))); + ids.push_back(model.Add(NewInterval(kStart, kHorizon, 10))); + AddDisjunctive(ids, &model); + + EXPECT_TRUE(model.GetOrCreate()->Propagate()); + for (const IntervalVariable i : ids) { + EXPECT_EQ(0, integer_trail->LowerBound(intervals->Start(i))); + } + + // Now with the precedences. + add_affine_coeff_one_precedence(intervals->End(ids[0]), + intervals->Start(ids[2])); + add_affine_coeff_one_precedence(intervals->End(ids[1]), + intervals->Start(ids[2])); + EXPECT_TRUE(precedences->Propagate(trail)); + EXPECT_EQ(10, integer_trail->LowerBound(intervals->Start(ids[2]))); + + EXPECT_TRUE(model.GetOrCreate()->Propagate()); + EXPECT_EQ(20, integer_trail->LowerBound(intervals->Start(ids[2]))); +} + +// This test should enumerate all the permutation of kNumIntervals elements. +// It used to fail before CL 134067105. +TEST(SchedulingTest, Permutations) { + static const int kNumIntervals = 4; + Model model; + std::vector intervals; + for (int i = 0; i < kNumIntervals; ++i) { + const IntervalVariable interval = + model.Add(NewInterval(0, kNumIntervals, 1)); + intervals.push_back(interval); + } + AddDisjunctive(intervals, &model); + + IntegerTrail* integer_trail = model.GetOrCreate(); + IntervalsRepository* repository = model.GetOrCreate(); + std::vector> solutions; + while (true) { + const SatSolver::Status status = + SolveIntegerProblemWithLazyEncoding(&model); + if (status != SatSolver::Status::FEASIBLE) break; + + // Add the solution. + std::vector solution(kNumIntervals, -1); + for (int i = 0; i < intervals.size(); ++i) { + const IntervalVariable interval = intervals[i]; + const int64_t start_time = + integer_trail->LowerBound(repository->Start(interval)).value(); + DCHECK_GE(start_time, 0); + DCHECK_LT(start_time, kNumIntervals); + solution[start_time] = i; + } + solutions.push_back(solution); + LOG(INFO) << "Found solution: {" << absl::StrJoin(solution, ", ") << "}."; + + // Loop to the next solution. + model.Add(ExcludeCurrentSolutionAndBacktrack()); + } + + // Test that we do have all the permutations (but in a random order). + std::sort(solutions.begin(), solutions.end()); + std::vector expected(kNumIntervals); + std::iota(expected.begin(), expected.end(), 0); + for (int i = 0; i < solutions.size(); ++i) { + EXPECT_EQ(expected, solutions[i]); + if (i + 1 < solutions.size()) { + EXPECT_TRUE(std::next_permutation(expected.begin(), expected.end())); + } else { + // We enumerated all the permutations. + EXPECT_FALSE(std::next_permutation(expected.begin(), expected.end())); + } + } +} + +// ============================================================================ +// Random tests with comparison with a simple time-decomposition encoding. +// ============================================================================ + +void AddDisjunctiveTimeDecomposition(absl::Span vars, + Model* model) { + const int num_tasks = vars.size(); + IntegerTrail* integer_trail = model->GetOrCreate(); + IntegerEncoder* encoder = model->GetOrCreate(); + IntervalsRepository* repository = model->GetOrCreate(); + + // Compute time range. + IntegerValue min_start = kMaxIntegerValue; + IntegerValue max_end = kMinIntegerValue; + for (int t = 0; t < num_tasks; ++t) { + const AffineExpression start = repository->Start(vars[t]); + const AffineExpression end = repository->End(vars[t]); + min_start = std::min(min_start, integer_trail->LowerBound(start)); + max_end = std::max(max_end, integer_trail->UpperBound(end)); + } + + // Add a constraint for each point of time. + for (IntegerValue time = min_start; time <= max_end; ++time) { + std::vector presence_at_time; + for (const IntervalVariable var : vars) { + const AffineExpression start = repository->Start(var); + const AffineExpression end = repository->End(var); + + const IntegerValue start_min = integer_trail->LowerBound(start); + const IntegerValue end_max = integer_trail->UpperBound(end); + if (end_max <= time || time < start_min) continue; + + // This will be true iff interval is present at time. + // TODO(user): we actually only need one direction of the equivalence. + presence_at_time.push_back( + Literal(model->Add(NewBooleanVariable()), true)); + + std::vector presence_condition; + presence_condition.push_back(encoder->GetOrCreateAssociatedLiteral( + start.LowerOrEqual(IntegerValue(time)))); + presence_condition.push_back(encoder->GetOrCreateAssociatedLiteral( + end.GreaterOrEqual(IntegerValue(time + 1)))); + if (repository->IsOptional(var)) { + presence_condition.push_back(repository->PresenceLiteral(var)); + } + model->Add(ReifiedBoolAnd(presence_condition, presence_at_time.back())); + } + model->Add(AtMostOneConstraint(presence_at_time)); + + // Abort if UNSAT. + if (model->GetOrCreate()->ModelIsUnsat()) return; + } +} + +struct OptionalTasksWithDuration { + int min_start; + int max_end; + int duration; + bool is_optional; +}; + +// TODO(user): we never generate zero duration for now. +std::vector GenerateRandomInstance( + int num_tasks, absl::BitGenRef randomizer) { + std::vector instance; + for (int i = 0; i < num_tasks; ++i) { + OptionalTasksWithDuration task; + task.min_start = absl::Uniform(randomizer, 0, 10); + task.max_end = absl::Uniform(randomizer, 0, 10); + if (task.min_start > task.max_end) std::swap(task.min_start, task.max_end); + if (task.min_start == task.max_end) ++task.max_end; + task.duration = + 1 + absl::Uniform(randomizer, 0, task.max_end - task.min_start - 1); + task.is_optional = absl::Bernoulli(randomizer, 1.0 / 2); + instance.push_back(task); + } + return instance; +} + +int CountAllSolutions( + absl::Span instance, + const std::function&, Model*)>& + add_disjunctive) { + Model model; + std::vector intervals; + for (const OptionalTasksWithDuration& task : instance) { + if (task.is_optional) { + const Literal is_present = Literal(model.Add(NewBooleanVariable()), true); + intervals.push_back(model.Add(NewOptionalInterval( + task.min_start, task.max_end, task.duration, is_present))); + } else { + intervals.push_back( + model.Add(NewInterval(task.min_start, task.max_end, task.duration))); + } + } + add_disjunctive(intervals, &model); + + int num_solutions_found = 0; + while (true) { + const SatSolver::Status status = + SolveIntegerProblemWithLazyEncoding(&model); + if (status != SatSolver::Status::FEASIBLE) break; + num_solutions_found++; + model.Add(ExcludeCurrentSolutionAndBacktrack()); + } + return num_solutions_found; +} + +std::string InstanceDebugString( + absl::Span instance) { + std::string result; + for (const OptionalTasksWithDuration& task : instance) { + absl::StrAppend(&result, "[", task.min_start, ", ", task.max_end, + "] duration:", task.duration, + " is_optional:", task.is_optional, "\n"); + } + return result; +} + +TEST(DisjunctiveTest, RandomComparisonWithSimpleEncoding) { + std::mt19937 randomizer(12345); + const int num_tests = DEBUG_MODE ? 100 : 1000; + for (int test = 0; test < num_tests; ++test) { + const int num_tasks = absl::Uniform(randomizer, 1, 6); + const std::vector instance = + GenerateRandomInstance(num_tasks, randomizer); + EXPECT_EQ(CountAllSolutions(instance, AddDisjunctiveTimeDecomposition), + CountAllSolutions(instance, AddDisjunctive)) + << InstanceDebugString(instance); + EXPECT_EQ( + CountAllSolutions(instance, AddDisjunctive), + CountAllSolutions(instance, AddDisjunctiveWithBooleanPrecedencesOnly)) + << InstanceDebugString(instance); + } +} + +TEST(DisjunctiveTest, TwoIntervalsTest) { + // All the way to put 2 intervals of size 4 and 3 in [0,9]. There is just + // two non-busy unit interval, so: + // - 2 possibilities with 1 hole of size 2 at beginning + // - 2 possibilities with 1 hole of size 2 at the end. + // - 2 possibilities with 1 hole of size 2 in the middle. + // - 2 possibilities with 2 holes around the interval of size 3. + // - 2 possibilities with 2 holes around the interval of size 4. + // - 2 possibilities with 2 holes on both extremities. + std::vector instance; + instance.push_back({0, 9, 4, false}); + instance.push_back({0, 9, 3, false}); + EXPECT_EQ(12, CountAllSolutions(instance, AddDisjunctive)); +} + +TEST(DisjunctiveTest, Precedences) { + Model model; + + std::vector ids; + ids.push_back(model.Add(NewInterval(0, 7, 3))); + ids.push_back(model.Add(NewInterval(0, 7, 2))); + AddDisjunctive(ids, &model); + + const IntegerVariable var = model.Add(NewIntegerVariable(0, 10)); + IntervalsRepository* intervals = model.GetOrCreate(); + model.Add( + AffineCoeffOneLowerOrEqualWithOffset(intervals->End(ids[0]), var, 5)); + model.Add( + AffineCoeffOneLowerOrEqualWithOffset(intervals->End(ids[1]), var, 4)); + + EXPECT_TRUE(model.GetOrCreate()->Propagate()); + EXPECT_EQ(model.Get(LowerBound(var)), (3 + 2) + std::min(4, 5)); +} + +TEST(DisjunctiveTest, OptionalIntervalsWithLinkedPresence) { + Model model; + const Literal alternative = Literal(model.Add(NewBooleanVariable()), true); + + std::vector intervals; + intervals.push_back(model.Add(NewOptionalInterval(0, 6, 3, alternative))); + intervals.push_back(model.Add(NewOptionalInterval(0, 6, 2, alternative))); + intervals.push_back( + model.Add(NewOptionalInterval(0, 6, 4, alternative.Negated()))); + AddDisjunctive(intervals, &model); + + int num_solutions_found = 0; + while (true) { + const SatSolver::Status status = + SolveIntegerProblemWithLazyEncoding(&model); + if (status != SatSolver::Status::FEASIBLE) break; + num_solutions_found++; + model.Add(ExcludeCurrentSolutionAndBacktrack()); + } + EXPECT_EQ(num_solutions_found, /*alternative*/ 6 + /*!alternative*/ 3); +} + +} // namespace +} // namespace sat +} // namespace operations_research diff --git a/ortools/sat/encoding_test.cc b/ortools/sat/encoding_test.cc new file mode 100644 index 0000000000..29608b3fbb --- /dev/null +++ b/ortools/sat/encoding_test.cc @@ -0,0 +1,106 @@ +// Copyright 2010-2024 Google LLC +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "ortools/sat/encoding.h" + +#include +#include +#include +#include + +#include "absl/random/distributions.h" +#include "gtest/gtest.h" +#include "ortools/sat/pb_constraint.h" +#include "ortools/sat/sat_base.h" +#include "ortools/sat/sat_solver.h" + +namespace operations_research { +namespace sat { +namespace { + +TEST(MergeAllNodesWithDequeTest, BasicPropagation) { + // We start with a sat solver and n Boolean variables. + std::mt19937 random(12345); + const int n = 456; + SatSolver solver; + solver.SetNumVariables(n); + + // We encode the full cardinality constraint on the n variables. + std::deque repository; + std::vector nodes; + for (int i = 0; i < n; ++i) { + repository.push_back(EncodingNode::LiteralNode( + Literal(BooleanVariable(i), true), Coefficient(0))); + nodes.push_back(&repository.back()); + } + const Coefficient an_upper_bound(1000); + EncodingNode* root = + MergeAllNodesWithDeque(an_upper_bound, nodes, &solver, &repository); + EXPECT_EQ(root->lb(), 0); + EXPECT_EQ(root->ub(), n); + EXPECT_EQ(root->size(), n); + EXPECT_EQ(root->depth(), 9); // 2^9 = 512 which is the first value >= n. + + // We fix some of the n variables randomly, and check some property of the + // Encoding nodes. + for (int run = 0; run < 10; ++run) { + const float density = run / 10; + int exact_count = 0; + solver.Backtrack(0); + for (int i = 0; i < n; ++i) { + const bool value = absl::Bernoulli(random, density); + exact_count += value ? 1 : 0; + EXPECT_TRUE(solver.EnqueueDecisionIfNotConflicting( + Literal(BooleanVariable(i), value))); + } + EXPECT_EQ(solver.Solve(), SatSolver::FEASIBLE); + + // We use an exact encoding, so the number of affected variables at the root + // level of the encoding should be exactly exact_count. + if (exact_count > 0) { + EXPECT_TRUE(solver.Assignment().LiteralIsTrue( + root->GreaterThan(exact_count - 1))); + } + if (exact_count < n) { + EXPECT_FALSE( + solver.Assignment().LiteralIsTrue(root->GreaterThan(exact_count))); + } + } +} + +TEST(LazyMergeAllNodeWithPQAndIncreaseLbTest, CorrectDepth) { + // We start with a sat solver and n Boolean variables. + std::mt19937 random(12345); + const int n = 456; + SatSolver solver; + solver.SetNumVariables(n); + + // We encode the full cardinality constraint on the n variables. + std::deque repository; + std::vector nodes; + for (int i = 0; i < n; ++i) { + repository.push_back(EncodingNode::LiteralNode( + Literal(BooleanVariable(i), true), Coefficient(0))); + nodes.push_back(&repository.back()); + } + EncodingNode* root = + LazyMergeAllNodeWithPQAndIncreaseLb(1, nodes, &solver, &repository); + EXPECT_EQ(root->lb(), 1); + EXPECT_EQ(root->ub(), n); + EXPECT_EQ(root->size(), 0); + EXPECT_EQ(root->depth(), 9); // 2^9 = 512 which is the first value >= n. +} + +} // namespace +} // namespace sat +} // namespace operations_research diff --git a/ortools/sat/feasibility_jump_test.cc b/ortools/sat/feasibility_jump_test.cc new file mode 100644 index 0000000000..e20cac1e0d --- /dev/null +++ b/ortools/sat/feasibility_jump_test.cc @@ -0,0 +1,92 @@ +// Copyright 2010-2024 Google LLC +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "ortools/sat/feasibility_jump.h" + +#include + +#include "gtest/gtest.h" + +namespace operations_research::sat { +namespace { + +TEST(JumpTableTest, TestCachesCalls) { + int num_calls = 0; + JumpTable jumps; + jumps.SetComputeFunction( + [&](int) { return std::make_pair(++num_calls, -1.0); }); + jumps.RecomputeAll(1); + + EXPECT_EQ(jumps.GetJump(0), std::make_pair(1, -1.0)); + EXPECT_EQ(jumps.GetJump(0), std::make_pair(1, -1.0)); + EXPECT_EQ(num_calls, 1); +} + +TEST(JumpTableTest, TestNeedsRecomputationOneVar) { + int num_calls = 0; + JumpTable jumps; + jumps.SetComputeFunction( + [&](int) { return std::make_pair(++num_calls, -1.0); }); + jumps.RecomputeAll(1); + + jumps.GetJump(0); + jumps.Recompute(0); + + EXPECT_EQ(jumps.GetJump(0), std::make_pair(2, -1.0)); + EXPECT_EQ(num_calls, 2); +} + +TEST(JumpTableTest, TestNeedsRecomputationMultiVar) { + int num_calls = 0; + JumpTable jumps; + jumps.SetComputeFunction( + [&](int v) { return std::make_pair(++num_calls, v); }); + jumps.RecomputeAll(2); + + jumps.GetJump(0); + jumps.GetJump(1); + jumps.Recompute(0); + + EXPECT_EQ(jumps.GetJump(0), std::make_pair(3, 0)); + EXPECT_EQ(jumps.GetJump(1), std::make_pair(2, 1)); + EXPECT_EQ(num_calls, 3); +} + +TEST(JumpTableTest, TestVarsNeedingRecomputePossiblyGood) { + int num_calls = 0; + JumpTable jumps; + jumps.SetComputeFunction( + [&](int) { return std::make_pair(++num_calls, 1.0); }); + jumps.RecomputeAll(1); + + EXPECT_TRUE(jumps.NeedRecomputation(0)); + EXPECT_EQ(num_calls, 0); +} + +TEST(JumpTableTest, TestSetJump) { + int num_calls = 0; + JumpTable jumps; + jumps.SetComputeFunction( + [&](int) { return std::make_pair(++num_calls, -1.0); }); + jumps.RecomputeAll(1); + + jumps.SetJump(0, 1, 1.0); + + EXPECT_FALSE(jumps.NeedRecomputation(0)); + EXPECT_GE(jumps.Score(0), 0); + EXPECT_EQ(jumps.GetJump(0), std::make_pair(1, 1.0)); + EXPECT_EQ(num_calls, 0); +} + +} // namespace +} // namespace operations_research::sat diff --git a/ortools/sat/go/cpmodel/BUILD.bazel b/ortools/sat/go/cpmodel/BUILD.bazel index 1aac72eae4..707ce53b2f 100644 --- a/ortools/sat/go/cpmodel/BUILD.bazel +++ b/ortools/sat/go/cpmodel/BUILD.bazel @@ -1,3 +1,16 @@ +# Copyright 2010-2024 Google LLC +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( diff --git a/ortools/sat/implied_bounds_test.cc b/ortools/sat/implied_bounds_test.cc new file mode 100644 index 0000000000..935c30b33d --- /dev/null +++ b/ortools/sat/implied_bounds_test.cc @@ -0,0 +1,706 @@ +// Copyright 2010-2024 Google LLC +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "ortools/sat/implied_bounds.h" + +#include + +#include "absl/container/flat_hash_map.h" +#include "absl/types/span.h" +#include "gtest/gtest.h" +#include "ortools/base/gmock.h" +#include "ortools/base/strong_vector.h" +#include "ortools/lp_data/lp_types.h" +#include "ortools/sat/integer.h" +#include "ortools/sat/linear_constraint.h" +#include "ortools/sat/model.h" +#include "ortools/sat/sat_base.h" +#include "ortools/sat/sat_parameters.pb.h" +#include "ortools/sat/sat_solver.h" +#include "ortools/util/strong_integers.h" + +namespace operations_research { +namespace sat { +namespace { + +using ::testing::Pair; +using ::testing::UnorderedElementsAre; + +TEST(ImpliedBoundsTest, BasicTest) { + Model model; + model.GetOrCreate()->set_use_implied_bounds(true); + auto* ib = model.GetOrCreate(); + auto* sat_solver = model.GetOrCreate(); + auto* integer_trail = model.GetOrCreate(); + + const Literal enforcement(model.Add(NewBooleanVariable()), true); + const IntegerVariable var(model.Add(NewIntegerVariable(0, 10))); + + EXPECT_TRUE(ib->Add(enforcement, + IntegerLiteral::GreaterOrEqual(var, IntegerValue(3)))); + EXPECT_TRUE(ib->Add(enforcement.Negated(), + IntegerLiteral::GreaterOrEqual(var, IntegerValue(7)))); + + // Here because we are at level-zero everything is propagated right away. + EXPECT_EQ(integer_trail->LowerBound(var), IntegerValue(3)); + EXPECT_EQ(integer_trail->LevelZeroLowerBound(var), IntegerValue(3)); + EXPECT_TRUE(sat_solver->Propagate()); + EXPECT_EQ(integer_trail->LowerBound(var), IntegerValue(3)); +} + +TEST(ImpliedBoundsTest, BasicTestPositiveLevel) { + Model model; + model.GetOrCreate()->set_use_implied_bounds(true); + auto* ib = model.GetOrCreate(); + auto* sat_solver = model.GetOrCreate(); + auto* integer_trail = model.GetOrCreate(); + + const Literal enforcement(model.Add(NewBooleanVariable()), true); + const IntegerVariable var(model.Add(NewIntegerVariable(0, 10))); + + // We can do the same at a positive level. + const Literal to_enqueue(model.Add(NewBooleanVariable()), true); + EXPECT_TRUE(sat_solver->ResetToLevelZero()); + EXPECT_TRUE(sat_solver->EnqueueDecisionIfNotConflicting(to_enqueue)); + EXPECT_GT(sat_solver->CurrentDecisionLevel(), 0); + + EXPECT_TRUE(ib->Add(enforcement, + IntegerLiteral::GreaterOrEqual(var, IntegerValue(3)))); + EXPECT_TRUE(ib->Add(enforcement.Negated(), + IntegerLiteral::GreaterOrEqual(var, IntegerValue(7)))); + + // Now, only the level zero bound is up to date. + EXPECT_EQ(integer_trail->LowerBound(var), IntegerValue(0)); + EXPECT_EQ(integer_trail->LevelZeroLowerBound(var), IntegerValue(3)); + + // But on the next restart, nothing is lost. + EXPECT_TRUE(sat_solver->ResetToLevelZero()); + EXPECT_EQ(integer_trail->LowerBound(var), IntegerValue(3)); +} + +// Same test as above but no deduction since parameter is false. +TEST(ImpliedBoundsTest, BasicTestWithFalseParameters) { + Model model; + model.GetOrCreate()->set_use_implied_bounds(false); + auto* ib = model.GetOrCreate(); + auto* sat_solver = model.GetOrCreate(); + auto* integer_trail = model.GetOrCreate(); + + const Literal enforcement(model.Add(NewBooleanVariable()), true); + const IntegerVariable var(model.Add(NewIntegerVariable(0, 10))); + + EXPECT_TRUE(ib->Add(enforcement, + IntegerLiteral::GreaterOrEqual(var, IntegerValue(3)))); + EXPECT_TRUE(ib->Add(enforcement.Negated(), + IntegerLiteral::GreaterOrEqual(var, IntegerValue(7)))); + + EXPECT_TRUE(sat_solver->Propagate()); + EXPECT_EQ(integer_trail->LowerBound(var), IntegerValue(0)); +} + +TEST(ImpliedBoundsTest, ReadBoundsFromTrail) { + Model model; + model.GetOrCreate()->set_use_implied_bounds(true); + + const Literal l(model.Add(NewBooleanVariable()), true); + const IntegerVariable var(model.Add(NewIntegerVariable(0, 100))); + + // Make sure l as a view. + const IntegerVariable view(model.Add(NewIntegerVariable(0, 1))); + model.GetOrCreate()->AssociateToIntegerEqualValue( + l, view, IntegerValue(1)); + + // So that there is a decision. + auto* sat_solver = model.GetOrCreate(); + EXPECT_TRUE(sat_solver->EnqueueDecisionIfNotConflicting(l)); + EXPECT_TRUE(sat_solver->Propagate()); + + // Enqueue a bunch of fact. + auto* integer_trail = model.GetOrCreate(); + EXPECT_TRUE(integer_trail->Enqueue( + IntegerLiteral::GreaterOrEqual(var, IntegerValue(2)), {l.Negated()}, {})); + EXPECT_TRUE(integer_trail->Enqueue( + IntegerLiteral::GreaterOrEqual(var, IntegerValue(4)), {l.Negated()}, {})); + EXPECT_TRUE(integer_trail->Enqueue( + IntegerLiteral::GreaterOrEqual(var, IntegerValue(8)), {l.Negated()}, {})); + EXPECT_TRUE(integer_trail->Enqueue( + IntegerLiteral::GreaterOrEqual(var, IntegerValue(9)), {l.Negated()}, {})); + + // Read from trail. + auto* ib = model.GetOrCreate(); + ib->ProcessIntegerTrail(l); + + std::vector result = ib->GetImpliedBounds(var); + EXPECT_EQ(result.size(), 1); + EXPECT_EQ(result[0].literal_view, view); + EXPECT_EQ(result[0].lower_bound, IntegerValue(9)); + EXPECT_TRUE(result[0].is_positive); +} + +TEST(ImpliedBoundsTest, DetectEqualityFromMin) { + Model model; + model.GetOrCreate()->set_use_implied_bounds(true); + + const Literal literal(model.Add(NewBooleanVariable()), true); + const IntegerVariable var(model.Add(NewIntegerVariable(0, 100))); + + auto* ib = model.GetOrCreate(); + ib->Add(literal, IntegerLiteral::LowerOrEqual(var, IntegerValue(0))); + + EXPECT_THAT( + ib->GetImpliedValues(literal), + testing::UnorderedElementsAre(testing::Pair(var, IntegerValue(0)))); +} + +TEST(ImpliedBoundsTest, DetectEqualityFromMax) { + Model model; + model.GetOrCreate()->set_use_implied_bounds(true); + + const Literal literal(model.Add(NewBooleanVariable()), true); + const IntegerVariable var(model.Add(NewIntegerVariable(0, 100))); + + auto* ib = model.GetOrCreate(); + ib->Add(literal, IntegerLiteral::GreaterOrEqual(var, IntegerValue(100))); + + EXPECT_THAT(ib->GetImpliedValues(literal), + UnorderedElementsAre(Pair(var, IntegerValue(100)))); +} + +TEST(ImpliedBoundsTest, DetectEqualityFromBothInequalities) { + Model model; + model.GetOrCreate()->set_use_implied_bounds(true); + + const Literal literal(model.Add(NewBooleanVariable()), true); + const IntegerVariable var(model.Add(NewIntegerVariable(0, 100))); + + auto* ib = model.GetOrCreate(); + ib->Add(literal, IntegerLiteral::LowerOrEqual(var, IntegerValue(7))); + ib->Add(literal, IntegerLiteral::GreaterOrEqual(var, IntegerValue(7))); + + EXPECT_THAT(ib->GetImpliedValues(literal), + UnorderedElementsAre(Pair(var, IntegerValue(7)))); +} + +TEST(ImpliedBoundsTest, NoEqualityDetection) { + Model model; + model.GetOrCreate()->set_use_implied_bounds(true); + + const Literal literal(model.Add(NewBooleanVariable()), true); + const IntegerVariable var(model.Add(NewIntegerVariable(0, 100))); + + auto* ib = model.GetOrCreate(); + ib->Add(literal, IntegerLiteral::LowerOrEqual(var, IntegerValue(7))); + ib->Add(literal, IntegerLiteral::GreaterOrEqual(var, IntegerValue(6))); + + EXPECT_TRUE(ib->GetImpliedValues(literal).empty()); +} + +TEST(DetectLinearEncodingOfProductsTest, MatchingElementEncodings) { + Model model; + const Literal l0(model.Add(NewBooleanVariable()), true); + const Literal l1(model.Add(NewBooleanVariable()), true); + const Literal l2(model.Add(NewBooleanVariable()), true); + const Literal l3(model.Add(NewBooleanVariable()), true); + + model.Add(NewIntegerVariableFromLiteral(l0)); + model.Add(NewIntegerVariableFromLiteral(l1)); + model.Add(NewIntegerVariableFromLiteral(l2)); + model.Add(NewIntegerVariableFromLiteral(l3)); + + const IntegerVariable x0(model.Add(NewIntegerVariable(0, 100))); + const IntegerVariable x1(model.Add(NewIntegerVariable(0, 100))); + auto* element_encodings = model.GetOrCreate(); + element_encodings->Add(x0, + {{IntegerValue(2), l0}, + {IntegerValue(4), l1}, + {IntegerValue(2), l2}, + {IntegerValue(10), l3}}, + 2); + element_encodings->Add(x1, + {{IntegerValue(3), l0}, + {IntegerValue(10), l1}, + {IntegerValue(20), l2}, + {IntegerValue(30), l3}}, + 2); + LinearConstraintBuilder builder(&model); + builder.AddConstant(IntegerValue(-1)); // To be cleared. + EXPECT_TRUE( + model.GetOrCreate()->TryToLinearize(x0, x1, &builder)); + EXPECT_EQ(builder.BuildExpression().DebugString(), "34*X1 34*X2 294*X3 + 6"); + + builder.Clear(); + EXPECT_TRUE( + model.GetOrCreate()->TryToLinearize(x1, x0, &builder)); + EXPECT_EQ(builder.BuildExpression().DebugString(), "34*X1 34*X2 294*X3 + 6"); +} + +TEST(DetectLinearEncodingOfProductsTest, MatchingEncodingAndSizeTwoEncoding) { + Model model; + const Literal l0(model.Add(NewBooleanVariable()), true); + const Literal l1(model.Add(NewBooleanVariable()), true); + const Literal l2(model.Add(NewBooleanVariable()), true); + const Literal l3(model.Add(NewBooleanVariable()), true); + const IntegerVariable x0(model.Add(NewIntegerVariable(0, 100))); + const IntegerVariable x1(model.Add(NewIntegerVariable(6, 7))); + auto* element_encodings = model.GetOrCreate(); + auto* integer_encoder = model.GetOrCreate(); + element_encodings->Add(x0, + {{IntegerValue(2), l0}, + {IntegerValue(4), l1}, + {IntegerValue(2), l2}, + {IntegerValue(10), l3}}, + 2); + integer_encoder->AssociateToIntegerEqualValue(l2, x1, IntegerValue(7)); + model.Add(NewIntegerVariableFromLiteral(l0)); + model.Add(NewIntegerVariableFromLiteral(l1)); + model.Add(NewIntegerVariableFromLiteral(l2)); + model.Add(NewIntegerVariableFromLiteral(l3)); + + LinearConstraintBuilder builder(&model); + builder.AddConstant(IntegerValue(-1)); // To be cleared. + EXPECT_TRUE( + model.GetOrCreate()->TryToLinearize(x0, x1, &builder)); + EXPECT_EQ(builder.BuildExpression().DebugString(), "12*X3 2*X4 48*X5 + 12"); + + EXPECT_TRUE( + model.GetOrCreate()->TryToLinearize(x1, x0, &builder)); + EXPECT_EQ(builder.BuildExpression().DebugString(), "12*X3 2*X4 48*X5 + 12"); +} + +TEST(DetectLinearEncodingOfProductsTest, BooleanAffinePosPosProduct) { + Model model; + const IntegerVariable var = model.Add(NewIntegerVariable(0, 1)); + const AffineExpression left(var, IntegerValue(2), IntegerValue(-1)); + const AffineExpression right(var, IntegerValue(3), IntegerValue(1)); + + LinearConstraintBuilder builder(&model); + util_intops::StrongVector lp_values(2, 0.0); + + EXPECT_TRUE(model.GetOrCreate()->TryToLinearize( + left, right, &builder)); + for (int value : {0, 1}) { + lp_values[var] = static_cast(value); + lp_values[NegationOf(var)] = static_cast(-value); + EXPECT_EQ(builder.BuildExpression().LpValue(lp_values), + left.LpValue(lp_values) * right.LpValue(lp_values)); + } + + builder.Clear(); + EXPECT_TRUE(model.GetOrCreate()->TryToLinearize( + right, left, &builder)); + for (int value : {0, 1}) { + lp_values[var] = static_cast(value); + lp_values[NegationOf(var)] = static_cast(-value); + EXPECT_EQ(builder.BuildExpression().LpValue(lp_values), + left.LpValue(lp_values) * right.LpValue(lp_values)); + } +} + +TEST(DetectLinearEncodingOfProductsTest, BooleanAffinePosNegProduct) { + Model model; + const IntegerVariable var = model.Add(NewIntegerVariable(0, 1)); + const AffineExpression left(var, IntegerValue(2), IntegerValue(-1)); + const AffineExpression right(NegationOf(var), IntegerValue(3), + IntegerValue(1)); + + LinearConstraintBuilder builder(&model); + util_intops::StrongVector lp_values(2, 0.0); + + EXPECT_TRUE(model.GetOrCreate()->TryToLinearize( + left, right, &builder)); + for (int value : {0, 1}) { + lp_values[var] = static_cast(value); + lp_values[NegationOf(var)] = static_cast(-value); + EXPECT_EQ(builder.BuildExpression().LpValue(lp_values), + left.LpValue(lp_values) * right.LpValue(lp_values)); + } + builder.Clear(); + EXPECT_TRUE(model.GetOrCreate()->TryToLinearize( + right, left, &builder)); + for (int value : {0, 1}) { + lp_values[var] = static_cast(value); + lp_values[NegationOf(var)] = static_cast(-value); + EXPECT_EQ(builder.BuildExpression().LpValue(lp_values), + left.LpValue(lp_values) * right.LpValue(lp_values)); + } +} + +TEST(DetectLinearEncodingOfProductsTest, BooleanAffineNegNegProduct) { + Model model; + const IntegerVariable var = model.Add(NewIntegerVariable(0, 1)); + const AffineExpression left(NegationOf(var), IntegerValue(2), + IntegerValue(-1)); + const AffineExpression right(NegationOf(var), IntegerValue(3), + IntegerValue(1)); + + LinearConstraintBuilder builder(&model); + util_intops::StrongVector lp_values(2, 0.0); + + EXPECT_TRUE(model.GetOrCreate()->TryToLinearize( + left, right, &builder)); + for (int value : {0, 1}) { + lp_values[var] = static_cast(value); + lp_values[NegationOf(var)] = static_cast(-value); + EXPECT_EQ(builder.BuildExpression().LpValue(lp_values), + left.LpValue(lp_values) * right.LpValue(lp_values)); + } + + builder.Clear(); + EXPECT_TRUE(model.GetOrCreate()->TryToLinearize( + right, left, &builder)); + for (int value : {0, 1}) { + lp_values[var] = static_cast(value); + lp_values[NegationOf(var)] = static_cast(-value); + EXPECT_EQ(builder.BuildExpression().LpValue(lp_values), + left.LpValue(lp_values) * right.LpValue(lp_values)); + } +} + +TEST(DetectLinearEncodingOfProductsTest, NoDetectionWhenNotBooleanA) { + Model model; + const IntegerVariable var = model.Add(NewIntegerVariable(0, 2)); + const AffineExpression left(var, IntegerValue(2), IntegerValue(-1)); + const AffineExpression right(var, IntegerValue(3), IntegerValue(1)); + + LinearConstraintBuilder builder(&model); + EXPECT_FALSE(model.GetOrCreate()->TryToLinearize( + left, right, &builder)); +} + +TEST(DetectLinearEncodingOfProductsTest, NoDetectionWhenNotBooleanB) { + Model model; + const IntegerVariable var = model.Add(NewIntegerVariable(-1, 1)); + const AffineExpression left(var, IntegerValue(2), IntegerValue(-1)); + const AffineExpression right(var, IntegerValue(3), IntegerValue(1)); + + LinearConstraintBuilder builder(&model); + EXPECT_FALSE(model.GetOrCreate()->TryToLinearize( + left, right, &builder)); +} + +TEST(DetectLinearEncodingOfProductsTest, AffineTimesConstant) { + Model model; + const IntegerVariable var = model.Add(NewIntegerVariable(0, 5)); + const AffineExpression left(var, IntegerValue(2), IntegerValue(-1)); + const AffineExpression right = IntegerValue(3); + + LinearConstraintBuilder builder(&model); + EXPECT_TRUE(model.GetOrCreate()->TryToLinearize( + left, right, &builder)); + EXPECT_EQ(builder.BuildExpression().DebugString(), "6*X0 + -3"); + + EXPECT_TRUE(model.GetOrCreate()->TryToLinearize( + right, left, &builder)); + EXPECT_EQ(builder.BuildExpression().DebugString(), "6*X0 + -3"); +} + +TEST(DecomposeProductTest, MatchingElementEncodings) { + Model model; + + const Literal l0(model.Add(NewBooleanVariable()), true); + const Literal l1(model.Add(NewBooleanVariable()), true); + const Literal l2(model.Add(NewBooleanVariable()), true); + const Literal l3(model.Add(NewBooleanVariable()), true); + + model.Add(NewIntegerVariableFromLiteral(l0)); + model.Add(NewIntegerVariableFromLiteral(l1)); + model.Add(NewIntegerVariableFromLiteral(l2)); + model.Add(NewIntegerVariableFromLiteral(l3)); + + const IntegerVariable x0(model.Add(NewIntegerVariable(0, 100))); + const IntegerVariable x1(model.Add(NewIntegerVariable(0, 100))); + + auto* element_encodings = model.GetOrCreate(); + element_encodings->Add(x0, + {{IntegerValue(2), l0}, + {IntegerValue(4), l1}, + {IntegerValue(2), l2}, + {IntegerValue(10), l3}}, + 2); + element_encodings->Add(x1, + {{IntegerValue(3), l0}, + {IntegerValue(10), l1}, + {IntegerValue(20), l2}, + {IntegerValue(30), l3}}, + 2); + + auto* decomposer = model.GetOrCreate(); + const std::vector terms_a = + decomposer->TryToDecompose(x0, x1); + const std::vector expected_terms_a = { + {l0, IntegerValue(2), IntegerValue(3)}, + {l1, IntegerValue(4), IntegerValue(10)}, + {l2, IntegerValue(2), IntegerValue(20)}, + {l3, IntegerValue(10), IntegerValue(30)}, + }; + ASSERT_FALSE(terms_a.empty()); + EXPECT_EQ(terms_a, expected_terms_a); + + const std::vector terms_b = + decomposer->TryToDecompose(x1, x0); + const std::vector expected_terms_b = { + {l0, IntegerValue(3), IntegerValue(2)}, + {l1, IntegerValue(10), IntegerValue(4)}, + {l2, IntegerValue(20), IntegerValue(2)}, + {l3, IntegerValue(30), IntegerValue(10)}, + }; + ASSERT_FALSE(terms_b.empty()); + EXPECT_EQ(terms_b, expected_terms_b); +} + +TEST(DecomposeProductTest, MatchingEncodingAndSizeTwoEncoding) { + Model model; + + const Literal l0(model.Add(NewBooleanVariable()), true); + const Literal l1(model.Add(NewBooleanVariable()), true); + const Literal l2(model.Add(NewBooleanVariable()), true); + const Literal l3(model.Add(NewBooleanVariable()), true); + const IntegerVariable x0(model.Add(NewIntegerVariable(0, 100))); + const IntegerVariable x1(model.Add(NewIntegerVariable(6, 7))); + + auto* element_encodings = model.GetOrCreate(); + element_encodings->Add(x0, + {{IntegerValue(2), l0}, + {IntegerValue(4), l1}, + {IntegerValue(2), l2}, + {IntegerValue(10), l3}}, + 2); + + auto* integer_encoder = model.GetOrCreate(); + integer_encoder->AssociateToIntegerEqualValue(l2, x1, IntegerValue(7)); + model.Add(NewIntegerVariableFromLiteral(l0)); + model.Add(NewIntegerVariableFromLiteral(l1)); + model.Add(NewIntegerVariableFromLiteral(l2)); + model.Add(NewIntegerVariableFromLiteral(l3)); + + auto* decomposer = model.GetOrCreate(); + const std::vector terms_a = + decomposer->TryToDecompose(x0, x1); + const std::vector expected_terms_a = { + {l0, IntegerValue(2), IntegerValue(6)}, + {l1, IntegerValue(4), IntegerValue(6)}, + {l2, IntegerValue(2), IntegerValue(7)}, + {l3, IntegerValue(10), IntegerValue(6)}, + }; + EXPECT_EQ(terms_a, expected_terms_a); + + const std::vector terms_b = + decomposer->TryToDecompose(x1, x0); + const std::vector expected_terms_b = { + {l0, IntegerValue(6), IntegerValue(2)}, + {l1, IntegerValue(6), IntegerValue(4)}, + {l2, IntegerValue(7), IntegerValue(2)}, + {l3, IntegerValue(6), IntegerValue(10)}, + }; + EXPECT_EQ(terms_b, expected_terms_b); +} + +TEST(DecomposeProductTest, MatchingSizeTwoEncodingsFirstFirst) { + Model model; + + const Literal l0(model.Add(NewBooleanVariable()), true); + const IntegerVariable x0(model.Add(NewIntegerVariable(5, 6))); + const IntegerVariable x1(model.Add(NewIntegerVariable(6, 7))); + + auto* integer_encoder = model.GetOrCreate(); + integer_encoder->AssociateToIntegerEqualValue(l0, x0, IntegerValue(5)); + integer_encoder->AssociateToIntegerEqualValue(l0, x1, IntegerValue(6)); + + auto* decomposer = model.GetOrCreate(); + const std::vector terms_a = + decomposer->TryToDecompose(x0, x1); + const std::vector expected_terms_a = { + {l0, IntegerValue(5), IntegerValue(6)}, + {l0.Negated(), IntegerValue(6), IntegerValue(7)}, + }; + EXPECT_EQ(terms_a, expected_terms_a); +} + +TEST(DecomposeProductTest, MatchingSizeTwoEncodingsFirstLast) { + Model model; + + const Literal l0(model.Add(NewBooleanVariable()), true); + const IntegerVariable x0(model.Add(NewIntegerVariable(5, 6))); + const IntegerVariable x1(model.Add(NewIntegerVariable(6, 7))); + + auto* integer_encoder = model.GetOrCreate(); + integer_encoder->AssociateToIntegerEqualValue(l0, x0, IntegerValue(5)); + integer_encoder->AssociateToIntegerEqualValue(l0, x1, IntegerValue(7)); + + auto* decomposer = model.GetOrCreate(); + const std::vector terms_a = + decomposer->TryToDecompose(x0, x1); + const std::vector expected_terms_a = { + {l0, IntegerValue(5), IntegerValue(7)}, + {l0.Negated(), IntegerValue(6), IntegerValue(6)}, + }; + EXPECT_EQ(terms_a, expected_terms_a); +} + +TEST(DecomposeProductTest, MatchingSizeTwoEncodingslastFirst) { + Model model; + + const Literal l0(model.Add(NewBooleanVariable()), true); + const IntegerVariable x0(model.Add(NewIntegerVariable(5, 6))); + const IntegerVariable x1(model.Add(NewIntegerVariable(6, 7))); + + auto* integer_encoder = model.GetOrCreate(); + integer_encoder->AssociateToIntegerEqualValue(l0, x0, IntegerValue(6)); + integer_encoder->AssociateToIntegerEqualValue(l0, x1, IntegerValue(6)); + + auto* decomposer = model.GetOrCreate(); + const std::vector terms_a = + decomposer->TryToDecompose(x0, x1); + const std::vector expected_terms_a = { + {l0.Negated(), IntegerValue(5), IntegerValue(7)}, + {l0, IntegerValue(6), IntegerValue(6)}, + }; + EXPECT_EQ(terms_a, expected_terms_a); +} + +TEST(DecomposeProductTest, MatchingSizeTwoEncodingsLastLast) { + Model model; + + const Literal l0(model.Add(NewBooleanVariable()), true); + const IntegerVariable x0(model.Add(NewIntegerVariable(5, 6))); + const IntegerVariable x1(model.Add(NewIntegerVariable(6, 7))); + + auto* integer_encoder = model.GetOrCreate(); + integer_encoder->AssociateToIntegerEqualValue(l0, x0, IntegerValue(6)); + integer_encoder->AssociateToIntegerEqualValue(l0, x1, IntegerValue(7)); + + auto* decomposer = model.GetOrCreate(); + const std::vector terms_a = + decomposer->TryToDecompose(x0, x1); + const std::vector expected_terms_a = { + {l0.Negated(), IntegerValue(5), IntegerValue(6)}, + {l0, IntegerValue(6), IntegerValue(7)}, + }; + EXPECT_EQ(terms_a, expected_terms_a); +} + +TEST(ProductDetectorTest, BasicCases) { + Model model; + model.GetOrCreate()->set_detect_linearized_product(true); + model.GetOrCreate()->set_linearization_level(2); + auto* detector = model.GetOrCreate(); + detector->ProcessTernaryClause(Literals({+1, +2, +3})); + detector->ProcessBinaryClause(Literals({-1, -2})); + detector->ProcessBinaryClause(Literals({-1, -3})); + EXPECT_EQ(kNoLiteralIndex, detector->GetProduct(Literal(-1), Literal(-2))); + EXPECT_EQ(kNoLiteralIndex, detector->GetProduct(Literal(-1), Literal(-3))); + EXPECT_EQ(Literal(+1).Index(), + detector->GetProduct(Literal(-2), Literal(-3))); +} + +TEST(ProductDetectorTest, BasicIntCase1) { + Model model; + model.GetOrCreate()->set_detect_linearized_product(true); + model.GetOrCreate()->set_linearization_level(2); + auto* detector = model.GetOrCreate(); + + IntegerVariable x(10); + IntegerVariable y(20); + detector->ProcessConditionalZero(Literal(+1), x); + detector->ProcessConditionalEquality(Literal(-1), x, y); + + EXPECT_EQ(x, detector->GetProduct(Literal(-1), y)); + EXPECT_EQ(kNoIntegerVariable, detector->GetProduct(Literal(-1), x)); + EXPECT_EQ(kNoIntegerVariable, detector->GetProduct(Literal(1), x)); + EXPECT_EQ(kNoIntegerVariable, detector->GetProduct(Literal(1), y)); +} + +TEST(ProductDetectorTest, BasicIntCase2) { + Model model; + model.GetOrCreate()->set_detect_linearized_product(true); + model.GetOrCreate()->set_linearization_level(2); + auto* detector = model.GetOrCreate(); + + IntegerVariable x(10); + IntegerVariable y(20); + detector->ProcessConditionalEquality(Literal(-1), x, y); + detector->ProcessConditionalZero(Literal(+1), x); + + EXPECT_EQ(x, detector->GetProduct(Literal(-1), y)); + EXPECT_EQ(kNoIntegerVariable, detector->GetProduct(Literal(-1), x)); + EXPECT_EQ(kNoIntegerVariable, detector->GetProduct(Literal(1), x)); + EXPECT_EQ(kNoIntegerVariable, detector->GetProduct(Literal(1), y)); +} + +TEST(ProductDetectorTest, RLT) { + Model model; + model.GetOrCreate()->set_add_rlt_cuts(true); + model.GetOrCreate()->set_linearization_level(2); + auto* detector = model.GetOrCreate(); + auto* integer_encoder = model.GetOrCreate(); + + const Literal l0(model.Add(NewBooleanVariable()), true); + const IntegerVariable x(model.Add(NewIntegerVariable(0, 1))); + integer_encoder->AssociateToIntegerEqualValue(l0, x, IntegerValue(1)); + + const Literal l1(model.Add(NewBooleanVariable()), true); + const IntegerVariable y(model.Add(NewIntegerVariable(0, 1))); + integer_encoder->AssociateToIntegerEqualValue(l1, y, IntegerValue(1)); + + const Literal l2(model.Add(NewBooleanVariable()), true); + const IntegerVariable z(model.Add(NewIntegerVariable(0, 1))); + integer_encoder->AssociateToIntegerEqualValue(l2, z, IntegerValue(1)); + + // X + (1 - Y) + Z >= 1 + detector->ProcessTernaryClause(Literals({+1, -2, +3})); + + // Lets choose value so that X + Z >= Y is tight. + util_intops::StrongVector lp_values(10, 0.0); + lp_values[x] = 0.7; + lp_values[y] = 0.9; + lp_values[z] = 0.2; + const absl::flat_hash_map lp_vars = { + {x, glop::ColIndex(0)}, {y, glop::ColIndex(1)}, {z, glop::ColIndex(2)}}; + detector->InitializeBooleanRLTCuts(lp_vars, lp_values); + + // (1 - X) * Y <= Z, 0.3 * 0.9 == 0.27 <= 0.2, interesting! + // (1 - X) * (1 - Z) <= (1 - Y), 0.3 * 0.8 == 0.24 <= 0.1, interesting ! + // Y * (1 - Z) <= X, 0.9 * 0.8 == 0.72 <= 0.7, interesting ! + EXPECT_EQ(detector->BoolRLTCandidates().size(), 3); + EXPECT_THAT(detector->BoolRLTCandidates().at(NegationOf(x)), + UnorderedElementsAre(y, NegationOf(z))); + EXPECT_THAT(detector->BoolRLTCandidates().at(y), + UnorderedElementsAre(NegationOf(x), NegationOf(z))); + EXPECT_THAT(detector->BoolRLTCandidates().at(NegationOf(z)), + UnorderedElementsAre(y, NegationOf(x))); + + // And we can recover the literal ub. + EXPECT_EQ(detector->LiteralProductUpperBound(NegationOf(x), y), z); + EXPECT_EQ(detector->LiteralProductUpperBound(NegationOf(x), NegationOf(z)), + NegationOf(y)); + EXPECT_EQ(detector->LiteralProductUpperBound(y, NegationOf(z)), x); + + // If we change values, we might get less candidates though + lp_values[x] = 0.0; + lp_values[y] = 0.2; + lp_values[z] = 0.2; + detector->InitializeBooleanRLTCuts(lp_vars, lp_values); + + // (1 - X) * Y <= Z, 1.0 * 0.2 <= 0.2, tight, but not interesting. + // (1 - X) * (1 - Z) <= (1 - Y), 1.0 * 0.8 <= 0.8 tight, but not interesting. + // Y * (1 - Z) <= X, 0.2 * 0.8 <= 0.0, interesting ! + EXPECT_EQ(detector->BoolRLTCandidates().size(), 2); + EXPECT_THAT(detector->BoolRLTCandidates().at(y), + UnorderedElementsAre(NegationOf(z))); + EXPECT_THAT(detector->BoolRLTCandidates().at(NegationOf(z)), + UnorderedElementsAre(y)); +} + +} // namespace +} // namespace sat +} // namespace operations_research diff --git a/ortools/sat/inclusion_test.cc b/ortools/sat/inclusion_test.cc new file mode 100644 index 0000000000..7f5276708f --- /dev/null +++ b/ortools/sat/inclusion_test.cc @@ -0,0 +1,177 @@ +// Copyright 2010-2024 Google LLC +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "ortools/sat/inclusion.h" + +#include +#include + +#include "absl/random/random.h" +#include "absl/types/span.h" +#include "gtest/gtest.h" +#include "ortools/base/gmock.h" +#include "ortools/sat/util.h" + +namespace operations_research { +namespace sat { +namespace { + +TEST(InclusionDetectorTest, SymmetricExample) { + CompactVectorVector storage; + InclusionDetector detector(storage); + detector.AddPotentialSet(storage.Add({1, 2})); + detector.AddPotentialSet(storage.Add({1, 3})); + detector.AddPotentialSet(storage.Add({1, 2, 3})); + detector.AddPotentialSet(storage.Add({1, 4, 3, 2})); + + std::vector> included; + detector.DetectInclusions([&included](int subset, int superset) { + included.push_back({subset, superset}); + }); + EXPECT_THAT(included, + ::testing::ElementsAre(std::make_pair(0, 2), std::make_pair(1, 2), + std::make_pair(0, 3), std::make_pair(1, 3), + std::make_pair(2, 3))); +} + +// If sets are duplicates, we do not detect both inclusions, but just one. +TEST(InclusionDetectorTest, DuplicateBehavior) { + CompactVectorVector storage; + InclusionDetector detector(storage); + detector.AddPotentialSet(storage.Add({1, 2})); + detector.AddPotentialSet(storage.Add({1, 2})); + detector.AddPotentialSet(storage.Add({1, 2})); + detector.AddPotentialSet(storage.Add({1, 2})); + + std::vector> included; + detector.DetectInclusions([&included](int subset, int superset) { + included.push_back({subset, superset}); + }); + EXPECT_THAT(included, ::testing::ElementsAre( + std::make_pair(0, 1), std::make_pair(0, 2), + std::make_pair(1, 2), std::make_pair(0, 3), + std::make_pair(2, 3), std::make_pair(1, 3))); +} + +TEST(InclusionDetectorTest, NonSymmetricExample) { + CompactVectorVector storage; + InclusionDetector detector(storage); + + // Index 0, 1, 2 + detector.AddPotentialSubset(storage.Add({1, 2})); + detector.AddPotentialSubset(storage.Add({1, 3})); + detector.AddPotentialSubset(storage.Add({1, 2, 3})); + + // Index 3, 4, 5, 6 + detector.AddPotentialSuperset(storage.Add({1, 2})); + detector.AddPotentialSuperset(storage.Add({1, 4, 3})); + detector.AddPotentialSuperset(storage.Add({1, 4, 3})); + detector.AddPotentialSuperset(storage.Add({1, 5, 2, 3})); + + std::vector> included; + detector.DetectInclusions([&included](int subset, int superset) { + included.push_back({subset, superset}); + }); + EXPECT_THAT(included, ::testing::ElementsAre( + std::make_pair(0, 3), std::make_pair(1, 4), + std::make_pair(1, 5), std::make_pair(0, 6), + std::make_pair(2, 6), std::make_pair(1, 6))); + + // Class can be used multiple time. + // Here we test exclude a subset for appearing twice. + included.clear(); + detector.DetectInclusions([&detector, &included](int subset, int superset) { + included.push_back({subset, superset}); + detector.StopProcessingCurrentSubset(); + }); + EXPECT_THAT(included, + ::testing::ElementsAre(std::make_pair(0, 3), std::make_pair(1, 4), + std::make_pair(2, 6))); + + // Here we test exclude a superset for appearing twice. + included.clear(); + detector.DetectInclusions([&detector, &included](int subset, int superset) { + included.push_back({subset, superset}); + detector.StopProcessingCurrentSuperset(); + }); + EXPECT_THAT(included, ::testing::ElementsAre( + std::make_pair(0, 3), std::make_pair(1, 4), + std::make_pair(1, 5), std::make_pair(0, 6))); + + // Here we stop on first match. + included.clear(); + detector.DetectInclusions([&detector, &included](int subset, int superset) { + included.push_back({subset, superset}); + detector.Stop(); + }); + EXPECT_THAT(included, ::testing::ElementsAre(std::make_pair(0, 3))); +} + +TEST(InclusionDetectorTest, InclusionChain) { + CompactVectorVector storage; + InclusionDetector detector(storage); + detector.AddPotentialSet(storage.Add({1})); + detector.AddPotentialSet(storage.Add({1, 2})); + detector.AddPotentialSet(storage.Add({1, 2, 3})); + + std::vector> included; + detector.DetectInclusions([&included](int subset, int superset) { + included.push_back({subset, superset}); + }); + EXPECT_THAT(included, + ::testing::ElementsAre(std::make_pair(0, 1), std::make_pair(0, 2), + std::make_pair(1, 2))); + + // If we stop processing a superset that can also be a subset, it should + // not appear as such. + included.clear(); + detector.DetectInclusions([&](int subset, int superset) { + detector.StopProcessingCurrentSuperset(); + included.push_back({subset, superset}); + }); + EXPECT_THAT(included, ::testing::ElementsAre(std::make_pair(0, 1), + std::make_pair(0, 2))); +} + +// We just check that nothing crashes. +TEST(InclusionDetectorTest, RandomTest) { + absl::BitGen random; + CompactVectorVector storage; + InclusionDetector detector(storage); + + std::vector temp; + for (int i = 0; i < 1000; ++i) { + temp.clear(); + const int size = absl::Uniform(random, 0, 100); + for (int j = 0; j < size; ++j) { + temp.push_back(absl::Uniform(random, 0, 10000)); + } + if (absl::Bernoulli(random, 0.5)) { + detector.AddPotentialSet(storage.Add(temp)); + } else { + if (absl::Bernoulli(random, 0.5)) { + detector.AddPotentialSubset(storage.Add(temp)); + } else { + detector.AddPotentialSuperset(storage.Add(temp)); + } + } + } + + int num_inclusions = 0; + detector.DetectInclusions( + [&num_inclusions](int subset, int superset) { ++num_inclusions; }); +} + +} // namespace +} // namespace sat +} // namespace operations_research diff --git a/ortools/sat/integer_test.cc b/ortools/sat/integer_test.cc new file mode 100644 index 0000000000..48fe3902f1 --- /dev/null +++ b/ortools/sat/integer_test.cc @@ -0,0 +1,1333 @@ +// Copyright 2010-2024 Google LLC +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "ortools/sat/integer.h" + +#include +#include +#include +#include +#include +#include + +#include "absl/log/check.h" +#include "absl/types/span.h" +#include "benchmark/benchmark.h" +#include "gtest/gtest.h" +#include "ortools/base/gmock.h" +#include "ortools/base/logging.h" +#include "ortools/base/types.h" +#include "ortools/sat/integer_search.h" +#include "ortools/sat/model.h" +#include "ortools/sat/sat_base.h" +#include "ortools/sat/sat_solver.h" +#include "ortools/util/sorted_interval_list.h" +#include "ortools/util/strong_integers.h" + +namespace operations_research { +namespace sat { +namespace { + +using ::testing::ElementsAre; +using ::testing::UnorderedElementsAre; + +TEST(AffineExpressionTest, Inequalities) { + const IntegerVariable var(1); + EXPECT_EQ( + AffineExpression(var, IntegerValue(3)).LowerOrEqual(IntegerValue(8)), + IntegerLiteral::LowerOrEqual(var, IntegerValue(2))); + EXPECT_EQ( + AffineExpression(var, IntegerValue(-3)).LowerOrEqual(IntegerValue(-1)), + IntegerLiteral::GreaterOrEqual(var, IntegerValue(1))); + EXPECT_EQ( + AffineExpression(var, IntegerValue(2)).GreaterOrEqual(IntegerValue(3)), + IntegerLiteral::GreaterOrEqual(var, IntegerValue(2))); +} + +TEST(AffineExpressionTest, ValueAt) { + const IntegerVariable var(1); + EXPECT_EQ(AffineExpression(var, IntegerValue(3)).ValueAt(IntegerValue(8)), + IntegerValue(3 * 8)); + EXPECT_EQ(AffineExpression(var, IntegerValue(3), IntegerValue(-2)) + .ValueAt(IntegerValue(5)), + IntegerValue(3 * 5 - 2)); +} + +TEST(AffineExpressionTest, NegatedConstant) { + const AffineExpression negated = AffineExpression(IntegerValue(3)).Negated(); + EXPECT_EQ(negated.var, kNoIntegerVariable); + EXPECT_EQ(negated.coeff, 0); + EXPECT_EQ(negated.constant, -3); +} + +TEST(AffineExpressionTest, ApiWithoutVar) { + const AffineExpression three(IntegerValue(3)); + EXPECT_TRUE(three.GreaterOrEqual(IntegerValue(2)).IsAlwaysTrue()); + EXPECT_TRUE(three.LowerOrEqual(IntegerValue(2)).IsAlwaysFalse()); +} + +TEST(ToDoubleTest, Infinities) { + EXPECT_EQ(ToDouble(IntegerValue(100)), 100.0); + + const double kInfinity = std::numeric_limits::infinity(); + EXPECT_EQ(ToDouble(kMaxIntegerValue), kInfinity); + EXPECT_EQ(ToDouble(kMinIntegerValue), -kInfinity); + + EXPECT_LT(ToDouble(kMaxIntegerValue - IntegerValue(1)), kInfinity); + EXPECT_GT(ToDouble(kMinIntegerValue + IntegerValue(1)), -kInfinity); +} + +TEST(FloorRatioTest, AllSmallCases) { + // Dividend can take any value. + for (IntegerValue dividend(-100); dividend < 100; ++dividend) { + // Divisor must be positive. + for (IntegerValue divisor(1); divisor < 100; ++divisor) { + const IntegerValue floor = FloorRatio(dividend, divisor); + EXPECT_LE(floor * divisor, dividend); + EXPECT_GT((floor + 1) * divisor, dividend); + } + } +} + +TEST(PositiveRemainderTest, AllCasesForFixedDivisor) { + IntegerValue divisor(17); + for (IntegerValue dividend(-100); dividend < 100; ++dividend) { + EXPECT_EQ(PositiveRemainder(dividend, divisor), + dividend - divisor * FloorRatio(dividend, divisor)); + } +} + +TEST(CeilRatioTest, AllSmallCases) { + // Dividend can take any value. + for (IntegerValue dividend(-100); dividend < 100; ++dividend) { + // Divisor must be positive. + for (IntegerValue divisor(1); divisor < 100; ++divisor) { + const IntegerValue ceil = CeilRatio(dividend, divisor); + EXPECT_GE(ceil * divisor, dividend); + EXPECT_LT((ceil - 1) * divisor, dividend); + } + } +} + +TEST(NegationOfTest, IsIdempotent) { + for (int i = 0; i < 100; ++i) { + const IntegerVariable var(i); + EXPECT_EQ(NegationOf(NegationOf(var)), var); + } +} + +TEST(NegationOfTest, VectorArgument) { + std::vector vars{IntegerVariable(1), IntegerVariable(2)}; + std::vector negated_vars = NegationOf(vars); + EXPECT_EQ(negated_vars.size(), vars.size()); + for (int i = 0; i < vars.size(); ++i) { + EXPECT_EQ(negated_vars[i], NegationOf(vars[i])); + } +} + +TEST(IntegerValue, NegatedCannotOverflow) { + EXPECT_GT(kMinIntegerValue - 1, std::numeric_limits::min()); +} + +TEST(IntegerLiteral, OverflowValueAreCapped) { + const IntegerVariable var(0); + EXPECT_EQ(IntegerLiteral::GreaterOrEqual(var, kMaxIntegerValue + 1), + IntegerLiteral::GreaterOrEqual( + var, IntegerValue(std::numeric_limits::max()))); + EXPECT_EQ(IntegerLiteral::LowerOrEqual(var, kMinIntegerValue - 1), + IntegerLiteral::LowerOrEqual( + var, IntegerValue(std::numeric_limits::min()))); +} + +TEST(IntegerLiteral, NegatedIsIdempotent) { + for (const IntegerValue value : + {kMinIntegerValue, kMaxIntegerValue, kMaxIntegerValue + 1, + IntegerValue(0), IntegerValue(1), IntegerValue(2)}) { + const IntegerLiteral literal = + IntegerLiteral::GreaterOrEqual(IntegerVariable(0), value); + CHECK_EQ(literal, literal.Negated().Negated()); + } +} + +// A bound difference of exactly kint64max is ok. +TEST(IntegerTrailDeathTest, LargeVariableDomain) { + Model model; + model.Add(NewIntegerVariable(-3, std::numeric_limits::max() - 3)); + + if (DEBUG_MODE) { + // But one of kint64max + 1 cause a check fail in debug. + EXPECT_DEATH(model.Add(NewIntegerVariable( + -3, std::numeric_limits::max() - 2)), + ""); + } +} + +TEST(IntegerTrailTest, ConstantIntegerVariableSharing) { + Model model; + const IntegerVariable a = model.Add(ConstantIntegerVariable(0)); + const IntegerVariable b = model.Add(ConstantIntegerVariable(7)); + const IntegerVariable c = model.Add(ConstantIntegerVariable(-7)); + const IntegerVariable d = model.Add(ConstantIntegerVariable(0)); + const IntegerVariable e = model.Add(ConstantIntegerVariable(3)); + EXPECT_EQ(a, d); + EXPECT_EQ(b, NegationOf(c)); + EXPECT_NE(a, e); + EXPECT_EQ(0, model.Get(Value(a))); + EXPECT_EQ(7, model.Get(Value(b))); + EXPECT_EQ(-7, model.Get(Value(c))); + EXPECT_EQ(0, model.Get(Value(d))); + EXPECT_EQ(3, model.Get(Value(e))); +} + +TEST(IntegerTrailTest, VariableCreationAndBoundGetter) { + Model model; + IntegerTrail* p = model.GetOrCreate(); + IntegerVariable a = model.Add(NewIntegerVariable(0, 10)); + IntegerVariable b = model.Add(NewIntegerVariable(-10, 10)); + IntegerVariable c = model.Add(NewIntegerVariable(20, 30)); + + // Index are dense and contiguous, but two indices are created each time. + // They start at zero. + EXPECT_EQ(0, a.value()); + EXPECT_EQ(1, NegationOf(a).value()); + EXPECT_EQ(2, b.value()); + EXPECT_EQ(3, NegationOf(b).value()); + EXPECT_EQ(4, c.value()); + EXPECT_EQ(5, NegationOf(c).value()); + + // Bounds matches the one we passed at creation. + EXPECT_EQ(0, p->LowerBound(a)); + EXPECT_EQ(10, p->UpperBound(a)); + EXPECT_EQ(-10, p->LowerBound(b)); + EXPECT_EQ(10, p->UpperBound(b)); + EXPECT_EQ(20, p->LowerBound(c)); + EXPECT_EQ(30, p->UpperBound(c)); + + // Test level-zero enqueue. + EXPECT_TRUE( + p->Enqueue(IntegerLiteral::LowerOrEqual(a, IntegerValue(20)), {}, {})); + EXPECT_EQ(10, p->UpperBound(a)); + EXPECT_TRUE( + p->Enqueue(IntegerLiteral::LowerOrEqual(a, IntegerValue(7)), {}, {})); + EXPECT_EQ(7, p->UpperBound(a)); + EXPECT_TRUE( + p->Enqueue(IntegerLiteral::GreaterOrEqual(a, IntegerValue(5)), {}, {})); + EXPECT_EQ(5, p->LowerBound(a)); +} + +TEST(IntegerTrailTest, Untrail) { + Model model; + IntegerTrail* p = model.GetOrCreate(); + IntegerVariable a = p->AddIntegerVariable(IntegerValue(1), IntegerValue(10)); + IntegerVariable b = p->AddIntegerVariable(IntegerValue(2), IntegerValue(10)); + + Trail* trail = model.GetOrCreate(); + trail->Resize(10); + + // We need a reason for the Enqueue(): + const Literal r(model.Add(NewBooleanVariable()), true); + trail->EnqueueWithUnitReason(r.Negated()); + + // Enqueue. + trail->SetDecisionLevel(1); + EXPECT_TRUE(p->Propagate(trail)); + EXPECT_TRUE( + p->Enqueue(IntegerLiteral::GreaterOrEqual(a, IntegerValue(5)), {r}, {})); + EXPECT_EQ(5, p->LowerBound(a)); + EXPECT_TRUE( + p->Enqueue(IntegerLiteral::GreaterOrEqual(b, IntegerValue(7)), {r}, {})); + EXPECT_EQ(7, p->LowerBound(b)); + + trail->SetDecisionLevel(2); + EXPECT_TRUE(p->Propagate(trail)); + EXPECT_TRUE( + p->Enqueue(IntegerLiteral::GreaterOrEqual(b, IntegerValue(9)), {r}, {})); + EXPECT_EQ(9, p->LowerBound(b)); + + // Untrail. + trail->SetDecisionLevel(1); + p->Untrail(*trail, 0); + EXPECT_EQ(7, p->LowerBound(b)); + + trail->SetDecisionLevel(0); + p->Untrail(*trail, 0); + EXPECT_EQ(1, p->LowerBound(a)); + EXPECT_EQ(2, p->LowerBound(b)); +} + +TEST(IntegerTrailTest, BasicReason) { + Model model; + IntegerTrail* p = model.GetOrCreate(); + IntegerVariable a = p->AddIntegerVariable(IntegerValue(1), IntegerValue(10)); + + Trail* trail = model.GetOrCreate(); + trail->Resize(10); + trail->EnqueueWithUnitReason(Literal(-1)); + trail->EnqueueWithUnitReason(Literal(-2)); + trail->EnqueueWithUnitReason(Literal(+3)); + trail->EnqueueWithUnitReason(Literal(+4)); + trail->SetDecisionLevel(1); + EXPECT_TRUE(p->Propagate(trail)); + + // Enqueue. + EXPECT_TRUE(p->Enqueue(IntegerLiteral::GreaterOrEqual(a, IntegerValue(2)), + Literals({+1}), {})); + EXPECT_TRUE(p->Enqueue(IntegerLiteral::GreaterOrEqual(a, IntegerValue(3)), + Literals({+2}), {})); + EXPECT_TRUE(p->Enqueue(IntegerLiteral::GreaterOrEqual(a, IntegerValue(5)), + Literals({-3}), {})); + EXPECT_TRUE(p->Enqueue(IntegerLiteral::GreaterOrEqual(a, IntegerValue(6)), + Literals({-4}), {})); + + EXPECT_THAT(p->ReasonFor(IntegerLiteral::GreaterOrEqual(a, IntegerValue(6))), + ElementsAre(Literal(-4))); + EXPECT_THAT(p->ReasonFor(IntegerLiteral::GreaterOrEqual(a, IntegerValue(5))), + ElementsAre(Literal(-3))); + EXPECT_THAT(p->ReasonFor(IntegerLiteral::GreaterOrEqual(a, IntegerValue(4))), + ElementsAre(Literal(-3))); + EXPECT_THAT(p->ReasonFor(IntegerLiteral::GreaterOrEqual(a, IntegerValue(3))), + ElementsAre(Literal(+2))); + EXPECT_TRUE( + p->ReasonFor(IntegerLiteral::GreaterOrEqual(a, IntegerValue(0))).empty()); + EXPECT_TRUE(p->ReasonFor(IntegerLiteral::GreaterOrEqual(a, IntegerValue(-10))) + .empty()); +} + +struct LazyReasonForTest : public LazyReasonInterface { + bool called = false; + + void Explain(int /*id*/, IntegerValue /*propagation_slack*/, + IntegerVariable /*variable_to_explain*/, int /*trail_index*/, + std::vector* /*literals_reason*/, + std::vector* /*trail_indices_reason*/) final { + called = true; + } +}; + +TEST(IntegerTrailTest, LazyReason) { + Model model; + IntegerTrail* p = model.GetOrCreate(); + IntegerVariable a = p->AddIntegerVariable(IntegerValue(1), IntegerValue(10)); + + Trail* trail = model.GetOrCreate(); + trail->Resize(10); + trail->SetDecisionLevel(1); + EXPECT_TRUE(p->Propagate(trail)); + + LazyReasonForTest mock; + + // Enqueue. + EXPECT_TRUE(p->EnqueueWithLazyReason( + IntegerLiteral::GreaterOrEqual(a, IntegerValue(2)), 0, 0, &mock)); + EXPECT_TRUE(p->Propagate(trail)); + EXPECT_FALSE(mock.called); + + // Called if needed for the conflict. + EXPECT_FALSE( + p->Enqueue(IntegerLiteral::LowerOrEqual(a, IntegerValue(1)), {}, {})); + EXPECT_TRUE(mock.called); +} + +TEST(IntegerTrailTest, LiteralAndBoundReason) { + Model model; + IntegerTrail* p = model.GetOrCreate(); + IntegerVariable a = model.Add(NewIntegerVariable(0, 10)); + IntegerVariable b = model.Add(NewIntegerVariable(0, 10)); + IntegerVariable c = model.Add(NewIntegerVariable(0, 10)); + + Trail* trail = model.GetOrCreate(); + trail->Resize(10); + trail->EnqueueWithUnitReason(Literal(-1)); + trail->EnqueueWithUnitReason(Literal(-2)); + trail->EnqueueWithUnitReason(Literal(-3)); + trail->EnqueueWithUnitReason(Literal(-4)); + trail->SetDecisionLevel(1); + EXPECT_TRUE(p->Propagate(trail)); + + // Enqueue. + EXPECT_TRUE(p->Enqueue(IntegerLiteral::GreaterOrEqual(a, IntegerValue(1)), + Literals({+1}), {})); + EXPECT_TRUE(p->Enqueue(IntegerLiteral::GreaterOrEqual(a, IntegerValue(2)), + Literals({+2}), {})); + EXPECT_TRUE(p->Enqueue(IntegerLiteral::GreaterOrEqual(b, IntegerValue(3)), + Literals({+3}), + {IntegerLiteral::GreaterOrEqual(a, IntegerValue(1))})); + EXPECT_TRUE(p->Enqueue(IntegerLiteral::GreaterOrEqual(c, IntegerValue(5)), + Literals({+4, +3}), + {IntegerLiteral::GreaterOrEqual(a, IntegerValue(2)), + IntegerLiteral::GreaterOrEqual(b, IntegerValue(3))})); + + EXPECT_THAT(p->ReasonFor(IntegerLiteral::GreaterOrEqual(b, IntegerValue(2))), + UnorderedElementsAre(Literal(+1), Literal(+3))); + EXPECT_THAT(p->ReasonFor(IntegerLiteral::GreaterOrEqual(c, IntegerValue(3))), + UnorderedElementsAre(Literal(+2), Literal(+3), Literal(+4))); +} + +TEST(IntegerTrailTest, LevelZeroBounds) { + Model model; + auto* integer_trail = model.GetOrCreate(); + IntegerVariable x = model.Add(NewIntegerVariable(0, 10)); + + Trail* trail = model.GetOrCreate(); + trail->Resize(10); + trail->SetDecisionLevel(1); + trail->EnqueueWithUnitReason(Literal(-1)); + trail->EnqueueWithUnitReason(Literal(-2)); + EXPECT_TRUE(integer_trail->Propagate(trail)); + + // Enqueue. + EXPECT_TRUE(integer_trail->Enqueue( + IntegerLiteral::GreaterOrEqual(x, IntegerValue(1)), Literals({+1}), {})); + EXPECT_TRUE(integer_trail->Enqueue( + IntegerLiteral::LowerOrEqual(x, IntegerValue(2)), Literals({+2}), {})); + + // TEST. + EXPECT_EQ(integer_trail->LowerBound(x), IntegerValue(1)); + EXPECT_EQ(integer_trail->UpperBound(x), IntegerValue(2)); + EXPECT_EQ(integer_trail->LevelZeroLowerBound(x), IntegerValue(0)); + EXPECT_EQ(integer_trail->LevelZeroUpperBound(x), IntegerValue(10)); +} + +TEST(IntegerTrailTest, RelaxLinearReason) { + Model model; + IntegerTrail* integer_trail = model.GetOrCreate(); + const IntegerVariable a = model.Add(NewIntegerVariable(0, 10)); + const IntegerVariable b = model.Add(NewIntegerVariable(0, 10)); + + Trail* trail = model.GetOrCreate(); + trail->SetDecisionLevel(1); + EXPECT_TRUE(integer_trail->Propagate(trail)); + + EXPECT_TRUE(integer_trail->Enqueue( + IntegerLiteral::GreaterOrEqual(a, IntegerValue(1)), {}, {})); + EXPECT_TRUE(integer_trail->Enqueue( + IntegerLiteral::GreaterOrEqual(a, IntegerValue(2)), {}, {})); + EXPECT_TRUE(integer_trail->Enqueue( + IntegerLiteral::GreaterOrEqual(b, IntegerValue(1)), {}, {})); + EXPECT_TRUE(integer_trail->Enqueue( + IntegerLiteral::GreaterOrEqual(a, IntegerValue(3)), {}, {})); + EXPECT_TRUE(integer_trail->Enqueue( + IntegerLiteral::GreaterOrEqual(b, IntegerValue(3)), {}, {})); + + std::vector coeffs(2, IntegerValue(1)); + std::vector reasons{ + IntegerLiteral::GreaterOrEqual(a, IntegerValue(3)), + IntegerLiteral::GreaterOrEqual(b, IntegerValue(3))}; + + // No slack, nothing happens. + integer_trail->RelaxLinearReason(IntegerValue(0), coeffs, &reasons); + EXPECT_THAT(reasons, + ElementsAre(IntegerLiteral::GreaterOrEqual(a, IntegerValue(3)), + IntegerLiteral::GreaterOrEqual(b, IntegerValue(3)))); + + // Some slack, we find the "lowest" possible reason in term of trail index. + integer_trail->RelaxLinearReason(IntegerValue(3), coeffs, &reasons); + EXPECT_THAT(reasons, + ElementsAre(IntegerLiteral::GreaterOrEqual(a, IntegerValue(2)), + IntegerLiteral::GreaterOrEqual(b, IntegerValue(1)))); +} + +TEST(IntegerTrailTest, LiteralIsTrueOrFalse) { + Model model; + const IntegerVariable a = model.Add(NewIntegerVariable(1, 9)); + + auto* integer_trail = model.GetOrCreate(); + EXPECT_TRUE(integer_trail->IntegerLiteralIsTrue( + IntegerLiteral::GreaterOrEqual(a, IntegerValue(0)))); + EXPECT_TRUE(integer_trail->IntegerLiteralIsTrue( + IntegerLiteral::LowerOrEqual(a, IntegerValue(10)))); + + EXPECT_TRUE(integer_trail->IntegerLiteralIsTrue( + IntegerLiteral::GreaterOrEqual(a, IntegerValue(1)))); + EXPECT_FALSE(integer_trail->IntegerLiteralIsFalse( + IntegerLiteral::GreaterOrEqual(a, IntegerValue(1)))); + + EXPECT_FALSE(integer_trail->IntegerLiteralIsTrue( + IntegerLiteral::GreaterOrEqual(a, IntegerValue(2)))); + EXPECT_FALSE(integer_trail->IntegerLiteralIsFalse( + IntegerLiteral::GreaterOrEqual(a, IntegerValue(2)))); + + EXPECT_FALSE(integer_trail->IntegerLiteralIsTrue( + IntegerLiteral::GreaterOrEqual(a, IntegerValue(10)))); + EXPECT_TRUE(integer_trail->IntegerLiteralIsFalse( + IntegerLiteral::GreaterOrEqual(a, IntegerValue(10)))); +} + +TEST(IntegerTrailTest, VariableWithHole) { + Model model; + IntegerVariable a = + model.Add(NewIntegerVariable(Domain::FromIntervals({{1, 3}, {6, 7}}))); + model.Add(GreaterOrEqual(a, 4)); + EXPECT_EQ(model.Get(LowerBound(a)), 6); +} + +TEST(GenericLiteralWatcherTest, LevelZeroModifiedVariablesCallbackTest) { + Model model; + auto* integer_trail = model.GetOrCreate(); + auto* watcher = model.GetOrCreate(); + IntegerVariable a = model.Add(NewIntegerVariable(0, 10)); + IntegerVariable b = model.Add(NewIntegerVariable(-10, 10)); + IntegerVariable c = model.Add(NewIntegerVariable(20, 30)); + + std::vector collector; + watcher->RegisterLevelZeroModifiedVariablesCallback( + [&collector](const std::vector& modified_vars) { + collector = modified_vars; + }); + + // No propagation. + auto* sat_solver = model.GetOrCreate(); + EXPECT_TRUE(sat_solver->Propagate()); + EXPECT_EQ(0, collector.size()); + + // Modify 1 variable. + EXPECT_TRUE(integer_trail->Enqueue( + IntegerLiteral::LowerOrEqual(c, IntegerValue(27)), {}, {})); + EXPECT_TRUE(sat_solver->Propagate()); + EXPECT_EQ(1, collector.size()); + EXPECT_EQ(NegationOf(c), collector[0]); + + // Modify 2 variables. + EXPECT_TRUE(integer_trail->Enqueue( + IntegerLiteral::GreaterOrEqual(a, IntegerValue(10)), {}, {})); + EXPECT_TRUE(integer_trail->Enqueue( + IntegerLiteral::LowerOrEqual(b, IntegerValue(7)), {}, {})); + EXPECT_TRUE(sat_solver->Propagate()); + ASSERT_EQ(2, collector.size()); + EXPECT_EQ(a, collector[0]); + EXPECT_EQ(NegationOf(b), collector[1]); + + // Modify 1 variable at level 1. + model.GetOrCreate()->SetDecisionLevel(1); + EXPECT_TRUE(sat_solver->Propagate()); + collector.clear(); + EXPECT_TRUE(integer_trail->Enqueue( + IntegerLiteral::LowerOrEqual(b, IntegerValue(6)), {}, {})); + EXPECT_TRUE(sat_solver->Propagate()); + EXPECT_TRUE(collector.empty()); +} + +TEST(GenericLiteralWatcherTest, RevIsInDiveUpdate) { + Model model; + bool is_in_dive = false; + auto* sat_solver = model.GetOrCreate(); + auto* watcher = model.GetOrCreate(); + const Literal a(sat_solver->NewBooleanVariable(), true); + const Literal b(sat_solver->NewBooleanVariable(), true); + + // First decision. + EXPECT_TRUE(sat_solver->EnqueueDecisionIfNotConflicting(a)); + EXPECT_FALSE(is_in_dive); + watcher->SetUntilNextBacktrack(&is_in_dive); + + // Second decision. + EXPECT_TRUE(sat_solver->EnqueueDecisionIfNotConflicting(b)); + EXPECT_TRUE(is_in_dive); + watcher->SetUntilNextBacktrack(&is_in_dive); + + // If we backtrack, it should be set to false. + EXPECT_TRUE(sat_solver->ResetToLevelZero()); + EXPECT_FALSE(is_in_dive); + + // We can redo the same. + EXPECT_FALSE(is_in_dive); + watcher->SetUntilNextBacktrack(&is_in_dive); + + EXPECT_TRUE(sat_solver->EnqueueDecisionIfNotConflicting(a)); + EXPECT_TRUE(is_in_dive); +} + +TEST(IntegerEncoderTest, BasicInequalityEncoding) { + Model model; + IntegerEncoder* encoder = model.GetOrCreate(); + const IntegerVariable var = model.Add(NewIntegerVariable(0, 10)); + const Literal l3 = encoder->GetOrCreateAssociatedLiteral( + IntegerLiteral::GreaterOrEqual(var, IntegerValue(3))); + const Literal l7 = encoder->GetOrCreateAssociatedLiteral( + IntegerLiteral::GreaterOrEqual(var, IntegerValue(7))); + const Literal l5 = encoder->GetOrCreateAssociatedLiteral( + IntegerLiteral::GreaterOrEqual(var, IntegerValue(5))); + + // Test SearchForLiteralAtOrBefore(). + for (IntegerValue v(0); v < 10; ++v) { + IntegerValue unused; + const LiteralIndex lb_index = encoder->SearchForLiteralAtOrBefore( + IntegerLiteral::GreaterOrEqual(var, v), &unused); + const LiteralIndex ub_index = encoder->SearchForLiteralAtOrBefore( + IntegerLiteral::LowerOrEqual(var, v), &unused); + if (v < 3) { + EXPECT_EQ(lb_index, kNoLiteralIndex); + EXPECT_EQ(ub_index, l3.NegatedIndex()); + } else if (v < 5) { + EXPECT_EQ(lb_index, l3.Index()); + EXPECT_EQ(ub_index, l5.NegatedIndex()); + } else if (v < 7) { + EXPECT_EQ(lb_index, l5.Index()); + EXPECT_EQ(ub_index, l7.NegatedIndex()); + } else { + EXPECT_EQ(lb_index, l7.Index()); + EXPECT_EQ(ub_index, kNoLiteralIndex); + } + } + + // Test the propagation from the literal to the bounds. + // By default the polarity of the literal are false. + EXPECT_EQ(SatSolver::FEASIBLE, model.GetOrCreate()->Solve()); + EXPECT_FALSE(model.Get(Value(l3))); + EXPECT_FALSE(model.Get(Value(l5))); + EXPECT_FALSE(model.Get(Value(l7))); + EXPECT_EQ(0, model.Get(LowerBound(var))); + EXPECT_EQ(2, model.Get(UpperBound(var))); + + // Test the other way around. + model.GetOrCreate()->Backtrack(0); + model.Add(GreaterOrEqual(var, 4)); + EXPECT_EQ(SatSolver::FEASIBLE, model.GetOrCreate()->Solve()); + EXPECT_TRUE(model.Get(Value(l3))); + EXPECT_FALSE(model.Get(Value(l5))); + EXPECT_FALSE(model.Get(Value(l7))); + EXPECT_EQ(4, model.Get(LowerBound(var))); + EXPECT_EQ(4, model.Get(UpperBound(var))); +} + +TEST(IntegerEncoderTest, GetOrCreateTrivialAssociatedLiteral) { + Model model; + IntegerEncoder* encoder = model.GetOrCreate(); + const IntegerVariable var = model.Add(NewIntegerVariable(0, 10)); + EXPECT_EQ(encoder->GetTrueLiteral(), + encoder->GetOrCreateAssociatedLiteral( + IntegerLiteral::GreaterOrEqual(var, IntegerValue(0)))); + EXPECT_EQ(encoder->GetTrueLiteral(), + encoder->GetOrCreateAssociatedLiteral( + IntegerLiteral::GreaterOrEqual(var, IntegerValue(-1)))); + EXPECT_EQ(encoder->GetTrueLiteral(), + encoder->GetOrCreateAssociatedLiteral( + IntegerLiteral::LowerOrEqual(var, IntegerValue(10)))); + EXPECT_EQ(encoder->GetFalseLiteral(), + encoder->GetOrCreateAssociatedLiteral( + IntegerLiteral::GreaterOrEqual(var, IntegerValue(11)))); + EXPECT_EQ(encoder->GetFalseLiteral(), + encoder->GetOrCreateAssociatedLiteral( + IntegerLiteral::GreaterOrEqual(var, IntegerValue(12)))); + EXPECT_EQ(encoder->GetFalseLiteral(), + encoder->GetOrCreateAssociatedLiteral( + IntegerLiteral::LowerOrEqual(var, IntegerValue(-1)))); +} + +TEST(IntegerEncoderTest, ShiftedBinary) { + Model model; + IntegerEncoder* encoder = model.GetOrCreate(); + const IntegerVariable var = model.Add(NewIntegerVariable(1, 2)); + + encoder->FullyEncodeVariable(var); + EXPECT_EQ(encoder->FullDomainEncoding(var).size(), 2); + const std::vector var_encoding = + encoder->FullDomainEncoding(var); + + const Literal g2 = encoder->GetOrCreateAssociatedLiteral( + IntegerLiteral::GreaterOrEqual(var, IntegerValue(2))); + const Literal l1 = encoder->GetOrCreateAssociatedLiteral( + IntegerLiteral::LowerOrEqual(var, IntegerValue(1))); + + EXPECT_EQ(g2, var_encoding[1].literal); + EXPECT_EQ(l1, var_encoding[0].literal); + EXPECT_EQ(g2, l1.Negated()); +} + +TEST(IntegerEncoderTest, SizeTwoDomains) { + Model model; + IntegerEncoder* encoder = model.GetOrCreate(); + const IntegerVariable var = + model.Add(NewIntegerVariable(Domain::FromValues({1, 3}))); + + encoder->FullyEncodeVariable(var); + EXPECT_EQ(encoder->FullDomainEncoding(var).size(), 2); + const std::vector var_encoding = + encoder->FullDomainEncoding(var); + + const Literal g2 = encoder->GetOrCreateAssociatedLiteral( + IntegerLiteral::GreaterOrEqual(var, IntegerValue(2))); + const Literal g3 = encoder->GetOrCreateAssociatedLiteral( + IntegerLiteral::GreaterOrEqual(var, IntegerValue(3))); + const Literal l1 = encoder->GetOrCreateAssociatedLiteral( + IntegerLiteral::LowerOrEqual(var, IntegerValue(1))); + const Literal l2 = encoder->GetOrCreateAssociatedLiteral( + IntegerLiteral::LowerOrEqual(var, IntegerValue(2))); + + EXPECT_EQ(g3, var_encoding[1].literal); + EXPECT_EQ(l1, var_encoding[0].literal); + EXPECT_EQ(g3, l1.Negated()); + EXPECT_EQ(g2, g3); + EXPECT_EQ(l1, l2); +} + +TEST(IntegerEncoderDeathTest, NegatedIsNotCreatedTwice) { + Model model; + IntegerEncoder* encoder = model.GetOrCreate(); + const IntegerVariable var = model.Add(NewIntegerVariable(0, 10)); + const IntegerLiteral l = IntegerLiteral::GreaterOrEqual(var, IntegerValue(3)); + const Literal associated = encoder->GetOrCreateAssociatedLiteral(l); + EXPECT_EQ(associated.Negated(), + encoder->GetOrCreateAssociatedLiteral(l.Negated())); +} + +TEST(IntegerEncoderTest, AutomaticallyDetectFullEncoding) { + Model model; + IntegerEncoder* encoder = model.GetOrCreate(); + const IntegerVariable var = + model.Add(NewIntegerVariable(Domain::FromValues({3, -4, 0}))); + + // Adding <= min should automatically also add == min. + encoder->GetOrCreateAssociatedLiteral( + IntegerLiteral::LowerOrEqual(var, IntegerValue(-4))); + + // We still miss one value. + EXPECT_FALSE(encoder->VariableIsFullyEncoded(var)); + EXPECT_FALSE(encoder->VariableIsFullyEncoded(NegationOf(var))); + + // This is enough to fully encode, because not(<=0) is >=3 which is ==3, and + // we do have all values. + encoder->GetOrCreateLiteralAssociatedToEquality(var, IntegerValue(0)); + EXPECT_TRUE(encoder->VariableIsFullyEncoded(var)); + EXPECT_TRUE(encoder->VariableIsFullyEncoded(NegationOf(var))); + + std::vector values; + for (const auto pair : encoder->FullDomainEncoding(var)) { + values.push_back(pair.value.value()); + } + EXPECT_THAT(values, ElementsAre(-4, 0, 3)); +} + +TEST(IntegerEncoderTest, BasicFullEqualityEncoding) { + Model model; + IntegerEncoder* encoder = model.GetOrCreate(); + const IntegerVariable var = + model.Add(NewIntegerVariable(Domain::FromValues({3, -4, 0}))); + encoder->FullyEncodeVariable(var); + + // Normal var. + { + const auto& result = encoder->FullDomainEncoding(var); + EXPECT_EQ(result.size(), 3); + EXPECT_EQ(result[0], ValueLiteralPair({IntegerValue(-4), + Literal(BooleanVariable(0), true)})); + EXPECT_EQ(result[1], ValueLiteralPair({IntegerValue(0), + Literal(BooleanVariable(1), true)})); + EXPECT_EQ(result[2], + ValueLiteralPair( + {IntegerValue(3), Literal(BooleanVariable(2), false)})); + } + + // Its negation. + { + const auto& result = encoder->FullDomainEncoding(NegationOf(var)); + EXPECT_EQ(result.size(), 3); + EXPECT_EQ(result[0], + ValueLiteralPair( + {IntegerValue(-3), Literal(BooleanVariable(2), false)})); + EXPECT_EQ(result[1], ValueLiteralPair({IntegerValue(0), + Literal(BooleanVariable(1), true)})); + EXPECT_EQ(result[2], ValueLiteralPair({IntegerValue(4), + Literal(BooleanVariable(0), true)})); + } +} + +TEST(IntegerEncoderTest, PartialEncodingOfBinaryVarIsFull) { + Model model; + IntegerEncoder* encoder = model.GetOrCreate(); + const IntegerVariable var = + model.Add(NewIntegerVariable(Domain::FromValues({0, 5}))); + const Literal lit(model.Add(NewBooleanVariable()), true); + + // Initially empty. + EXPECT_TRUE(encoder->PartialDomainEncoding(var).empty()); + + // Normal var. + encoder->AssociateToIntegerEqualValue(lit, var, IntegerValue(0)); + { + const auto& result = encoder->PartialDomainEncoding(var); + EXPECT_EQ(result.size(), 2); + EXPECT_EQ(result[0], ValueLiteralPair({IntegerValue(0), lit})); + EXPECT_EQ(result[1], ValueLiteralPair({IntegerValue(5), lit.Negated()})); + } + + // Its negation. + { + const auto& result = encoder->PartialDomainEncoding(NegationOf(var)); + EXPECT_EQ(result.size(), 2); + EXPECT_EQ(result[0], ValueLiteralPair({IntegerValue(-5), lit.Negated()})); + EXPECT_EQ(result[1], ValueLiteralPair({IntegerValue(0), lit})); + } +} + +TEST(IntegerEncoderTest, PartialEncodingOfLargeVar) { + Model model; + IntegerEncoder* encoder = model.GetOrCreate(); + const IntegerVariable var = model.Add(NewIntegerVariable(0, 1e12)); + for (const int value : {50, 1000, 1}) { + const Literal lit(model.Add(NewBooleanVariable()), true); + encoder->AssociateToIntegerEqualValue(lit, var, IntegerValue(value)); + } + const auto& result = encoder->PartialDomainEncoding(var); + EXPECT_EQ(result.size(), 4); + // Zero is created because encoding (== 1) requires (>= 1 and <= 1), but the + // negation of (>= 1) is also (== 0). + EXPECT_EQ(result[0].value, IntegerValue(0)); + EXPECT_EQ(result[1].value, IntegerValue(1)); + EXPECT_EQ(result[2].value, IntegerValue(50)); + EXPECT_EQ(result[3].value, IntegerValue(1000)); +} + +TEST(IntegerEncoderTest, UpdateInitialDomain) { + Model model; + IntegerEncoder* encoder = model.GetOrCreate(); + const IntegerVariable var = + model.Add(NewIntegerVariable(Domain::FromValues({3, -4, 0}))); + encoder->FullyEncodeVariable(var); + EXPECT_TRUE(model.GetOrCreate()->UpdateInitialDomain( + var, Domain::FromIntervals({{-4, -4}, {0, 0}, {5, 5}}))); + + // Note that we return the filtered encoding. + { + const auto& result = encoder->FullDomainEncoding(var); + EXPECT_EQ(result.size(), 2); + EXPECT_EQ(result[0], ValueLiteralPair({IntegerValue(-4), + Literal(BooleanVariable(0), true)})); + EXPECT_EQ(result[1], ValueLiteralPair({IntegerValue(0), + Literal(BooleanVariable(1), true)})); + } +} + +TEST(IntegerEncoderTest, Canonicalize) { + Model model; + IntegerEncoder* encoder = model.GetOrCreate(); + const IntegerVariable var = + model.Add(NewIntegerVariable(Domain::FromIntervals({{1, 4}, {7, 9}}))); + + EXPECT_EQ(encoder->Canonicalize( + IntegerLiteral::GreaterOrEqual(var, IntegerValue(2))), + std::make_pair(IntegerLiteral::GreaterOrEqual(var, IntegerValue(2)), + IntegerLiteral::LowerOrEqual(var, IntegerValue(1)))); + EXPECT_EQ(encoder->Canonicalize( + IntegerLiteral::GreaterOrEqual(var, IntegerValue(4))), + std::make_pair(IntegerLiteral::GreaterOrEqual(var, IntegerValue(4)), + IntegerLiteral::LowerOrEqual(var, IntegerValue(3)))); + EXPECT_EQ( + encoder->Canonicalize(IntegerLiteral::LowerOrEqual(var, IntegerValue(4))), + std::make_pair(IntegerLiteral::LowerOrEqual(var, IntegerValue(4)), + IntegerLiteral::GreaterOrEqual(var, IntegerValue(7)))); + EXPECT_EQ( + encoder->Canonicalize(IntegerLiteral::LowerOrEqual(var, IntegerValue(6))), + std::make_pair(IntegerLiteral::LowerOrEqual(var, IntegerValue(4)), + IntegerLiteral::GreaterOrEqual(var, IntegerValue(7)))); +} + +TEST(IntegerEncoderDeathTest, CanonicalizeDoNotAcceptTrivialLiterals) { + if (!DEBUG_MODE) GTEST_SKIP() << "Moot in opt mode"; + + Model model; + IntegerEncoder* encoder = model.GetOrCreate(); + const IntegerVariable var = + model.Add(NewIntegerVariable(Domain::FromIntervals({{1, 4}, {7, 9}}))); + + EXPECT_DEATH(encoder->Canonicalize( + IntegerLiteral::GreaterOrEqual(var, IntegerValue(1))), + ""); + EXPECT_DEATH(encoder->Canonicalize( + IntegerLiteral::GreaterOrEqual(var, IntegerValue(0))), + ""); + EXPECT_DEATH( + encoder->Canonicalize(IntegerLiteral::LowerOrEqual(var, IntegerValue(0))), + ""); + EXPECT_DEATH(encoder->Canonicalize( + IntegerLiteral::GreaterOrEqual(var, IntegerValue(0))), + ""); + + EXPECT_DEATH( + encoder->Canonicalize(IntegerLiteral::LowerOrEqual(var, IntegerValue(9))), + ""); + EXPECT_DEATH(encoder->Canonicalize( + IntegerLiteral::LowerOrEqual(var, IntegerValue(15))), + ""); +} + +TEST(IntegerEncoderTest, TrivialAssociation) { + Model model; + IntegerEncoder* encoder = model.GetOrCreate(); + const IntegerVariable var = + model.Add(NewIntegerVariable(Domain::FromIntervals({{1, 1}, {5, 5}}))); + + { + const Literal l(model.Add(NewBooleanVariable()), true); + encoder->AssociateToIntegerLiteral( + l, IntegerLiteral::GreaterOrEqual(var, IntegerValue(1))); + EXPECT_EQ(model.Get(Value(l)), true); + } + { + const Literal l(model.Add(NewBooleanVariable()), true); + encoder->AssociateToIntegerLiteral( + l, IntegerLiteral::GreaterOrEqual(var, IntegerValue(6))); + EXPECT_EQ(model.Get(Value(l)), false); + } + { + const Literal l(model.Add(NewBooleanVariable()), true); + encoder->AssociateToIntegerEqualValue(l, var, IntegerValue(4)); + EXPECT_EQ(model.Get(Value(l)), false); + } +} + +TEST(IntegerEncoderTest, TrivialAssociationWithFixedVariable) { + Model model; + IntegerEncoder* encoder = model.GetOrCreate(); + const IntegerVariable var = model.Add(NewIntegerVariable(Domain(1))); + { + const Literal l(model.Add(NewBooleanVariable()), true); + encoder->AssociateToIntegerEqualValue(l, var, IntegerValue(1)); + EXPECT_EQ(model.Get(Value(l)), true); + } +} + +TEST(IntegerEncoderTest, FullEqualityEncodingForTwoValuesWithDuplicates) { + Model model; + IntegerEncoder* encoder = model.GetOrCreate(); + const IntegerVariable var = + model.Add(NewIntegerVariable(Domain::FromValues({3, 5, 3}))); + encoder->FullyEncodeVariable(var); + + // Normal var. + { + const auto& result = encoder->FullDomainEncoding(var); + EXPECT_EQ(result.size(), 2); + EXPECT_EQ(result[0], ValueLiteralPair({IntegerValue(3), + Literal(BooleanVariable(0), true)})); + EXPECT_EQ(result[1], + ValueLiteralPair( + {IntegerValue(5), Literal(BooleanVariable(0), false)})); + } + + // Its negation. + { + const auto& result = encoder->FullDomainEncoding(NegationOf(var)); + EXPECT_EQ(result.size(), 2); + EXPECT_EQ(result[0], + ValueLiteralPair( + {IntegerValue(-5), Literal(BooleanVariable(0), false)})); + EXPECT_EQ(result[1], ValueLiteralPair({IntegerValue(-3), + Literal(BooleanVariable(0), true)})); + } +} + +#define EXPECT_BOUNDS_EQ(var, lb, ub) \ + EXPECT_EQ(model.Get(LowerBound(var)), lb); \ + EXPECT_EQ(model.Get(UpperBound(var)), ub) + +TEST(IntegerEncoderTest, IntegerTrailToEncodingPropagation) { + Model model; + SatSolver* sat_solver = model.GetOrCreate(); + IntegerEncoder* encoder = model.GetOrCreate(); + Trail* trail = model.GetOrCreate(); + IntegerTrail* integer_trail = model.GetOrCreate(); + + const IntegerVariable var = model.Add( + NewIntegerVariable(Domain::FromIntervals({{3, 4}, {7, 7}, {9, 9}}))); + model.Add(FullyEncodeVariable(var)); + + // We copy this because Enqueue() might change it. + const auto encoding = encoder->FullDomainEncoding(var); + + // Initial propagation is correct. + EXPECT_TRUE(sat_solver->Propagate()); + EXPECT_BOUNDS_EQ(var, 3, 9); + + // Note that the bounds snap to the possible values. + const VariablesAssignment& assignment = trail->Assignment(); + EXPECT_TRUE(integer_trail->Enqueue( + IntegerLiteral::LowerOrEqual(var, IntegerValue(8)), {}, {})); + EXPECT_TRUE(sat_solver->Propagate()); + EXPECT_TRUE(assignment.LiteralIsFalse(encoding[3].literal)); + EXPECT_FALSE(assignment.VariableIsAssigned(encoding[0].literal.Variable())); + EXPECT_FALSE(assignment.VariableIsAssigned(encoding[1].literal.Variable())); + EXPECT_FALSE(assignment.VariableIsAssigned(encoding[2].literal.Variable())); + EXPECT_BOUNDS_EQ(var, 3, 7); + + EXPECT_TRUE(integer_trail->Enqueue( + IntegerLiteral::GreaterOrEqual(var, IntegerValue(5)), {}, {})); + EXPECT_TRUE(sat_solver->Propagate()); + EXPECT_TRUE(assignment.LiteralIsFalse(encoding[0].literal)); + EXPECT_TRUE(assignment.LiteralIsFalse(encoding[1].literal)); + EXPECT_TRUE(assignment.LiteralIsTrue(encoding[2].literal)); + EXPECT_BOUNDS_EQ(var, 7, 7); + + // Encoding[2] will become true on the sat solver propagation. + EXPECT_TRUE(sat_solver->Propagate()); + EXPECT_TRUE(assignment.LiteralIsTrue(encoding[2].literal)); +} + +TEST(IntegerEncoderTest, EncodingToIntegerTrailPropagation) { + Model model; + SatSolver* sat_solver = model.GetOrCreate(); + IntegerEncoder* encoder = model.GetOrCreate(); + Trail* trail = model.GetOrCreate(); + IntegerTrail* integer_trail = model.GetOrCreate(); + const IntegerVariable var = model.Add( + NewIntegerVariable(Domain::FromIntervals({{3, 4}, {7, 7}, {9, 9}}))); + model.Add(FullyEncodeVariable(var)); + const auto& encoding = encoder->FullDomainEncoding(var); + + // Initial propagation is correct. + EXPECT_TRUE(sat_solver->Propagate()); + EXPECT_BOUNDS_EQ(var, 3, 9); + + // We remove the value 4, nothing happen. + trail->SetDecisionLevel(1); + trail->EnqueueSearchDecision(encoding[1].literal.Negated()); + EXPECT_TRUE(sat_solver->Propagate()); + EXPECT_BOUNDS_EQ(var, 3, 9); + + // When we remove 3, the lower bound change though. + trail->SetDecisionLevel(2); + trail->EnqueueSearchDecision(encoding[0].literal.Negated()); + EXPECT_TRUE(sat_solver->Propagate()); + EXPECT_BOUNDS_EQ(var, 7, 9); + + // The reason for the lower bounds is that both encoding[0] and encoding[1] + // are false. But it is captured by the literal associated to x >= 7. + { + const IntegerLiteral l = integer_trail->LowerBoundAsLiteral(var); + EXPECT_EQ(integer_trail->ReasonFor(l), + std::vector{ + Literal(encoder->GetAssociatedLiteral(l)).Negated()}); + } + + // Test the other direction. + trail->SetDecisionLevel(3); + trail->EnqueueSearchDecision(encoding[3].literal.Negated()); + EXPECT_TRUE(sat_solver->Propagate()); + EXPECT_BOUNDS_EQ(var, 7, 7); + { + const IntegerLiteral l = integer_trail->UpperBoundAsLiteral(var); + EXPECT_EQ(integer_trail->ReasonFor(l), + std::vector{ + Literal(encoder->GetAssociatedLiteral(l)).Negated()}); + } +} + +TEST(IntegerEncoderTest, IsFixedOrHasAssociatedLiteral) { + Model model; + SatSolver* sat_solver = model.GetOrCreate(); + IntegerEncoder* encoder = model.GetOrCreate(); + const IntegerVariable var = model.Add( + NewIntegerVariable(Domain::FromIntervals({{3, 4}, {7, 7}, {9, 9}}))); + + // Initial propagation is correct. + EXPECT_TRUE(sat_solver->Propagate()); + EXPECT_BOUNDS_EQ(var, 3, 9); + + // These are trivially true/false. + EXPECT_TRUE(encoder->IsFixedOrHasAssociatedLiteral( + IntegerLiteral::GreaterOrEqual(var, 2))); + EXPECT_TRUE(encoder->IsFixedOrHasAssociatedLiteral( + IntegerLiteral::GreaterOrEqual(var, 3))); + EXPECT_TRUE(encoder->IsFixedOrHasAssociatedLiteral( + IntegerLiteral::GreaterOrEqual(var, 10))); + + // Not other encoding currently. + EXPECT_FALSE(encoder->IsFixedOrHasAssociatedLiteral( + IntegerLiteral::GreaterOrEqual(var, 4))); + EXPECT_FALSE(encoder->IsFixedOrHasAssociatedLiteral( + IntegerLiteral::GreaterOrEqual(var, 9))); + + // Add one encoding and test. + encoder->GetOrCreateAssociatedLiteral(IntegerLiteral::GreaterOrEqual(var, 7)); + EXPECT_TRUE(encoder->IsFixedOrHasAssociatedLiteral( + IntegerLiteral::GreaterOrEqual(var, 5))); + EXPECT_TRUE(encoder->IsFixedOrHasAssociatedLiteral( + IntegerLiteral::GreaterOrEqual(var, 7))); + EXPECT_TRUE(encoder->IsFixedOrHasAssociatedLiteral( + IntegerLiteral::LowerOrEqual(var, 6))); + EXPECT_TRUE(encoder->IsFixedOrHasAssociatedLiteral( + IntegerLiteral::LowerOrEqual(var, 4))); +} + +TEST(IntegerEncoderTest, EncodingOfConstantVariableHasSizeOne) { + Model model; + IntegerEncoder* encoder = model.GetOrCreate(); + const IntegerVariable var = model.Add(NewIntegerVariable(7, 7)); + model.Add(FullyEncodeVariable(var)); + const auto& encoding = encoder->FullDomainEncoding(var); + EXPECT_EQ(encoding.size(), 1); + EXPECT_TRUE(model.GetOrCreate()->Assignment().LiteralIsTrue( + encoding[0].literal)); +} + +TEST(IntegerEncoderTest, IntegerVariableOfAssignedLiteralIsFixed) { + Model model; + SatSolver* sat_solver = model.GetOrCreate(); + + { + Literal literal_false = Literal(sat_solver->NewBooleanVariable(), true); + CHECK(sat_solver->AddUnitClause(literal_false.Negated())); + const IntegerVariable zero = + model.Add(NewIntegerVariableFromLiteral(literal_false)); + EXPECT_EQ(model.Get(UpperBound(zero)), 0); + } + + { + Literal literal_true = Literal(sat_solver->NewBooleanVariable(), true); + CHECK(sat_solver->AddUnitClause(literal_true)); + const IntegerVariable one = + model.Add(NewIntegerVariableFromLiteral(literal_true)); + EXPECT_EQ(model.Get(LowerBound(one)), 1); + } +} + +TEST(IntegerEncoderTest, LiteralView1) { + Model model; + IntegerEncoder* encoder = model.GetOrCreate(); + const IntegerVariable var = model.Add(NewIntegerVariable(0, 1)); + const Literal literal(model.Add(NewBooleanVariable()), true); + encoder->AssociateToIntegerEqualValue(literal, var, IntegerValue(1)); + EXPECT_EQ(var, encoder->GetLiteralView(literal)); + EXPECT_EQ(kNoIntegerVariable, encoder->GetLiteralView(literal.Negated())); +} + +TEST(IntegerEncoderTest, LiteralView2) { + Model model; + IntegerEncoder* encoder = model.GetOrCreate(); + const IntegerVariable var = model.Add(NewIntegerVariable(0, 1)); + const Literal literal(model.Add(NewBooleanVariable()), true); + encoder->AssociateToIntegerEqualValue(literal, var, IntegerValue(0)); + EXPECT_EQ(kNoIntegerVariable, encoder->GetLiteralView(literal)); + EXPECT_EQ(var, encoder->GetLiteralView(literal.Negated())); +} + +TEST(IntegerEncoderTest, LiteralView3) { + Model model; + IntegerEncoder* encoder = model.GetOrCreate(); + const IntegerVariable var = model.Add(NewIntegerVariable(0, 1)); + const Literal literal(model.Add(NewBooleanVariable()), true); + encoder->AssociateToIntegerLiteral( + literal, IntegerLiteral::GreaterOrEqual(var, IntegerValue(1))); + EXPECT_EQ(var, encoder->GetLiteralView(literal)); + EXPECT_EQ(kNoIntegerVariable, encoder->GetLiteralView(literal.Negated())); +} + +TEST(IntegerEncoderTest, LiteralView4) { + Model model; + IntegerEncoder* encoder = model.GetOrCreate(); + const IntegerVariable var = model.Add(NewIntegerVariable(0, 1)); + const Literal literal(model.Add(NewBooleanVariable()), true); + encoder->AssociateToIntegerLiteral( + literal, IntegerLiteral::LowerOrEqual(var, IntegerValue(0))); + EXPECT_EQ(kNoIntegerVariable, encoder->GetLiteralView(literal)); + EXPECT_EQ(var, encoder->GetLiteralView(literal.Negated())); +} + +TEST(IntegerEncoderTest, IssueWhenNotFullyingPropagatingAtLoading) { + Model model; + auto* integer_trail = model.GetOrCreate(); + auto* integer_encoder = model.GetOrCreate(); + const IntegerVariable var = + integer_trail->AddIntegerVariable(Domain::FromValues({0, 3, 7, 9})); + const Literal false_literal = integer_encoder->GetFalseLiteral(); + integer_encoder->DisableImplicationBetweenLiteral(); + + // This currently doesn't propagate the domain. + integer_encoder->AssociateToIntegerLiteral( + false_literal, IntegerLiteral::GreaterOrEqual(var, IntegerValue(5))); + EXPECT_EQ(integer_trail->LowerBound(var), 0); + EXPECT_EQ(integer_trail->UpperBound(var), 9); + + // And that used to fail because it does some domain propagation when it + // detect that some value cannot be there and update the domains of var while + // iterating over it. + integer_encoder->FullyEncodeVariable(var); +} + +#undef EXPECT_BOUNDS_EQ + +TEST(SolveIntegerProblemWithLazyEncodingTest, Sat) { + static const int kNumVariables = 10; + Model model; + std::vector integer_vars; + for (int i = 0; i < kNumVariables; ++i) { + integer_vars.push_back(model.Add(NewIntegerVariable(0, 10))); + } + model.GetOrCreate()->fixed_search = + FirstUnassignedVarAtItsMinHeuristic(integer_vars, &model); + ConfigureSearchHeuristics(&model); + ASSERT_EQ(model.GetOrCreate()->SolveIntegerProblem(), + SatSolver::Status::FEASIBLE); + for (const IntegerVariable var : integer_vars) { + EXPECT_EQ(model.Get(LowerBound(var)), model.Get(UpperBound(var))); + } +} + +TEST(SolveIntegerProblemWithLazyEncodingTest, Unsat) { + Model model; + const IntegerVariable var = model.Add(NewIntegerVariable(-100, 100)); + model.Add(LowerOrEqual(var, -10)); + model.Add(GreaterOrEqual(var, 10)); + model.GetOrCreate()->fixed_search = + FirstUnassignedVarAtItsMinHeuristic({var}, &model); + ConfigureSearchHeuristics(&model); + EXPECT_EQ(model.GetOrCreate()->SolveIntegerProblem(), + SatSolver::Status::INFEASIBLE); +} + +TEST(IntegerTrailTest, InitialVariableDomainIsUpdated) { + Model model; + IntegerTrail* integer_trail = model.GetOrCreate(); + const IntegerVariable var = + integer_trail->AddIntegerVariable(IntegerValue(0), IntegerValue(1000)); + EXPECT_EQ(integer_trail->InitialVariableDomain(var), Domain(0, 1000)); + EXPECT_EQ(integer_trail->InitialVariableDomain(NegationOf(var)), + Domain(-1000, 0)); + + EXPECT_TRUE(integer_trail->Enqueue( + IntegerLiteral::GreaterOrEqual(var, IntegerValue(7)), {}, {})); + EXPECT_EQ(integer_trail->InitialVariableDomain(var), Domain(7, 1000)); + EXPECT_EQ(integer_trail->InitialVariableDomain(NegationOf(var)), + Domain(-1000, -7)); +} + +TEST(IntegerTrailTest, AppendNewBounds) { + Model model; + const Literal l(model.Add(NewBooleanVariable()), true); + const IntegerVariable var(model.Add(NewIntegerVariable(0, 100))); + + // So that there is a decision. + EXPECT_TRUE( + model.GetOrCreate()->EnqueueDecisionIfNotConflicting(l)); + + // Enqueue a bunch of fact. + IntegerTrail* integer_trail = model.GetOrCreate(); + EXPECT_TRUE(integer_trail->Enqueue( + IntegerLiteral::GreaterOrEqual(var, IntegerValue(2)), {l.Negated()}, {})); + EXPECT_TRUE(integer_trail->Enqueue( + IntegerLiteral::GreaterOrEqual(var, IntegerValue(4)), {l.Negated()}, {})); + EXPECT_TRUE(integer_trail->Enqueue( + IntegerLiteral::GreaterOrEqual(var, IntegerValue(8)), {l.Negated()}, {})); + EXPECT_TRUE(integer_trail->Enqueue( + IntegerLiteral::GreaterOrEqual(var, IntegerValue(9)), {l.Negated()}, {})); + + // Only the last bound should be present. + std::vector bounds; + integer_trail->AppendNewBounds(&bounds); + EXPECT_THAT(bounds, ElementsAre(IntegerLiteral::GreaterOrEqual( + var, IntegerValue(9)))); +} + +TEST(FastDivisionTest, AllPossibleValues) { + for (int i = 1; i <= std::numeric_limits::max(); ++i) { + const QuickSmallDivision div(i); + for (int j = 0; j <= std::numeric_limits::max(); ++j) { + const uint16_t result = div.DivideByDivisor(j); + const uint16_t j_rounded_to_lowest_multiple = result * i; + CHECK_LE(j_rounded_to_lowest_multiple, j); + CHECK_GT(j_rounded_to_lowest_multiple + i, j); + } + } +} + +static void BM_FloorRatio(benchmark::State& state) { + IntegerValue divisor(654676436498); + IntegerValue dividend(45454655155444); + IntegerValue test(0); + for (auto _ : state) { + dividend++; + divisor++; + benchmark::DoNotOptimize(test += FloorRatio(dividend, divisor)); + } + state.SetBytesProcessed(static_cast(state.iterations())); +} + +static void BM_PositiveRemainder(benchmark::State& state) { + IntegerValue divisor(654676436498); + IntegerValue dividend(45454655155444); + IntegerValue test(0); + for (auto _ : state) { + dividend++; + divisor++; + benchmark::DoNotOptimize(test += PositiveRemainder(dividend, divisor)); + } + state.SetBytesProcessed(static_cast(state.iterations())); +} + +static void BM_PositiveRemainderAlternative(benchmark::State& state) { + IntegerValue divisor(654676436498); + IntegerValue dividend(45454655155444); + IntegerValue test(0); + for (auto _ : state) { + dividend++; + divisor++; + benchmark::DoNotOptimize(test += dividend - + divisor * FloorRatio(dividend, divisor)); + } + state.SetBytesProcessed(static_cast(state.iterations())); +} + +// What we use in the code. This is safe of integer overflow. The compiler +// should also do a single integer division to get the quotient and remainder. +static void BM_DivisionAndRemainder(benchmark::State& state) { + IntegerValue divisor(654676436498); + IntegerValue dividend(45454655155444); + IntegerValue test(0); + for (auto _ : state) { + dividend++; + divisor++; + benchmark::DoNotOptimize(test += FloorRatio(dividend, divisor)); + benchmark::DoNotOptimize(test += PositiveRemainder(dividend, divisor)); + } + state.SetBytesProcessed(static_cast(state.iterations())); +} + +// An alternative version, note however that divisor * f might overflow! +static void BM_DivisionAndRemainderAlternative(benchmark::State& state) { + IntegerValue divisor(654676436498); + IntegerValue dividend(45454655155444); + IntegerValue test(0); + for (auto _ : state) { + dividend++; + divisor++; + const IntegerValue f = FloorRatio(dividend, divisor); + benchmark::DoNotOptimize(test += f); + benchmark::DoNotOptimize(test += dividend - divisor * f); + } + state.SetBytesProcessed(static_cast(state.iterations())); +} + +// The best we can hope for ? +static void BM_DivisionAndRemainderBaseline(benchmark::State& state) { + IntegerValue divisor(654676436498); + IntegerValue dividend(45454655155444); + IntegerValue test(0); + for (auto _ : state) { + dividend++; + divisor++; + benchmark::DoNotOptimize(test += dividend / divisor); + benchmark::DoNotOptimize(test += dividend % divisor); + } + state.SetBytesProcessed(static_cast(state.iterations())); +} + +BENCHMARK(BM_FloorRatio); +BENCHMARK(BM_PositiveRemainder); +BENCHMARK(BM_PositiveRemainderAlternative); +BENCHMARK(BM_DivisionAndRemainder); +BENCHMARK(BM_DivisionAndRemainderAlternative); +BENCHMARK(BM_DivisionAndRemainderBaseline); + +} // namespace +} // namespace sat +} // namespace operations_research diff --git a/ortools/sat/intervals_test.cc b/ortools/sat/intervals_test.cc new file mode 100644 index 0000000000..ab2ead8f90 --- /dev/null +++ b/ortools/sat/intervals_test.cc @@ -0,0 +1,278 @@ +// Copyright 2010-2024 Google LLC +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "ortools/sat/intervals.h" + +#include + +#include +#include + +#include "gtest/gtest.h" +#include "ortools/sat/integer.h" +#include "ortools/sat/linear_constraint.h" +#include "ortools/sat/model.h" +#include "ortools/sat/sat_base.h" +#include "ortools/sat/sat_solver.h" +#include "ortools/util/strong_integers.h" + +namespace operations_research { +namespace sat { +namespace { + +TEST(IntervalsRepositoryTest, Precedences) { + Model model; + const AffineExpression start1(model.Add(NewIntegerVariable(0, 10))); + const AffineExpression size1(model.Add(NewIntegerVariable(2, 10))); + const AffineExpression end1(model.Add(NewIntegerVariable(0, 10))); + const AffineExpression start2(model.Add(NewIntegerVariable(0, 10))); + const AffineExpression size2(model.Add(NewIntegerVariable(2, 10))); + const AffineExpression end2(model.Add(NewIntegerVariable(0, 10))); + + auto* repo = model.GetOrCreate(); + const IntervalVariable a = repo->CreateInterval(start1, end1, size1); + const IntervalVariable b = repo->CreateInterval(start2, end2, size2); + + // Ok to call many times. + repo->CreateDisjunctivePrecedenceLiteral(a, b); + repo->CreateDisjunctivePrecedenceLiteral(a, b); + + EXPECT_NE(kNoLiteralIndex, repo->GetPrecedenceLiteral(a, b)); + EXPECT_EQ(Literal(repo->GetPrecedenceLiteral(a, b)), + Literal(repo->GetPrecedenceLiteral(b, a)).Negated()); +} + +TEST(SchedulingConstraintHelperTest, PushConstantBoundWithOptionalIntervals) { + Model model; + auto* repo = model.GetOrCreate(); + + const AffineExpression start(IntegerValue(0)); + const AffineExpression size(IntegerValue(10)); + const AffineExpression end(IntegerValue(10)); + + Literal presence2 = Literal(model.Add(NewBooleanVariable()), true); + IntervalVariable inter1 = + repo->CreateInterval(start, end, size, kNoLiteralIndex, false); + IntervalVariable inter2 = + repo->CreateInterval(start, end, size, presence2.Index(), false); + + SchedulingConstraintHelper helper({inter1, inter2}, &model); + + EXPECT_TRUE(helper.IncreaseStartMin(1, IntegerValue(20))); + EXPECT_FALSE(model.Get(Value(presence2))); +} + +TEST(SchedulingDemandHelperTest, EnergyInWindow) { + Model model; + + const AffineExpression start(model.Add(NewIntegerVariable(0, 10))); + const AffineExpression size(model.Add(NewIntegerVariable(2, 10))); + const AffineExpression end(model.Add(NewIntegerVariable(0, 10))); + const IntervalVariable inter = + model.GetOrCreate()->CreateInterval( + start, end, size, kNoLiteralIndex, false); + + const AffineExpression demand(model.Add(NewIntegerVariable(2, 10))); + + SchedulingConstraintHelper helper({inter}, &model); + SchedulingDemandHelper demands_helper({demand}, &helper, &model); + demands_helper.CacheAllEnergyValues(); + EXPECT_EQ(demands_helper.EnergyMin(0), IntegerValue(4)); + + const Literal alt1 = Literal(model.Add(NewBooleanVariable()), true); + const Literal alt2 = Literal(model.Add(NewBooleanVariable()), true); + demands_helper.OverrideDecomposedEnergies( + {{{alt1, IntegerValue(2), IntegerValue(4)}, + {alt2, IntegerValue(4), IntegerValue(2)}}}); + demands_helper.CacheAllEnergyValues(); + EXPECT_EQ(demands_helper.EnergyMin(0), IntegerValue(8)); + + EXPECT_EQ(0, demands_helper.EnergyMinInWindow(0, 8, 2)); + EXPECT_EQ(8, demands_helper.EnergyMinInWindow(0, 0, 10)); + EXPECT_EQ(0, demands_helper.EnergyMinInWindow(0, 2, 10)); + EXPECT_EQ(0, demands_helper.EnergyMinInWindow(0, 0, 8)); + EXPECT_EQ(4, demands_helper.EnergyMinInWindow(0, 0, 9)); +} + +TEST(SchedulingDemandHelperTest, EnergyInWindowTakeIntoAccountWindowSize) { + Model model; + + const AffineExpression start(model.Add(NewIntegerVariable(0, 4))); + const AffineExpression size(model.Add(NewIntegerVariable(6, 10))); + const AffineExpression end(model.Add(NewIntegerVariable(0, 10))); + const IntervalVariable inter = + model.GetOrCreate()->CreateInterval( + start, end, size, kNoLiteralIndex, false); + + const AffineExpression demand(model.Add(NewIntegerVariable(6, 10))); + + SchedulingConstraintHelper helper({inter}, &model); + SchedulingDemandHelper demands_helper({demand}, &helper, &model); + demands_helper.CacheAllEnergyValues(); + + const Literal alt1 = Literal(model.Add(NewBooleanVariable()), true); + const Literal alt2 = Literal(model.Add(NewBooleanVariable()), true); + demands_helper.OverrideDecomposedEnergies( + {{{alt1, IntegerValue(8), IntegerValue(6)}, + {alt2, IntegerValue(6), IntegerValue(8)}}}); + demands_helper.CacheAllEnergyValues(); + EXPECT_EQ(demands_helper.EnergyMin(0), IntegerValue(48)); + + EXPECT_EQ(6, demands_helper.EnergyMinInWindow(0, 5, 6)); +} + +TEST(SchedulingDemandHelperTest, LinearizedDemandWithAffineExpression) { + Model model; + + const AffineExpression start(model.Add(NewIntegerVariable(0, 10))); + const AffineExpression size(model.Add(NewIntegerVariable(2, 10))); + const AffineExpression end(model.Add(NewIntegerVariable(0, 10))); + const IntervalVariable inter = + model.GetOrCreate()->CreateInterval( + start, end, size, kNoLiteralIndex, false); + + const AffineExpression demand( + AffineExpression(model.Add(NewIntegerVariable(2, 10)), 2, 5)); + + SchedulingConstraintHelper helper({inter}, &model); + SchedulingDemandHelper demands_helper({demand}, &helper, &model); + demands_helper.CacheAllEnergyValues(); + + LinearConstraintBuilder builder(&model); + ASSERT_TRUE(demands_helper.AddLinearizedDemand(0, &builder)); + EXPECT_EQ(builder.BuildExpression().DebugString(), "2*X3 + 5"); +} + +TEST(SchedulingDemandHelperTest, LinearizedDemandWithDecomposedEnergy) { + Model model; + + const AffineExpression start(model.Add(NewIntegerVariable(0, 10))); + const AffineExpression size(model.Add(NewIntegerVariable(2, 10))); + const AffineExpression end(model.Add(NewIntegerVariable(0, 10))); + const IntervalVariable inter = + model.GetOrCreate()->CreateInterval( + start, end, size, kNoLiteralIndex, false); + + const AffineExpression demand(model.Add(NewIntegerVariable(2, 10))); + + SchedulingConstraintHelper helper({inter}, &model); + SchedulingDemandHelper demands_helper({demand}, &helper, &model); + demands_helper.CacheAllEnergyValues(); + EXPECT_EQ(demands_helper.EnergyMin(0), IntegerValue(4)); + + const Literal alt1 = Literal(model.Add(NewBooleanVariable()), true); + const IntegerVariable var1(model.Add(NewIntegerVariable(0, 1))); + model.GetOrCreate()->AssociateToIntegerEqualValue( + alt1, var1, IntegerValue(1)); + + const Literal alt2 = Literal(model.Add(NewBooleanVariable()), true); + const IntegerVariable var2(model.Add(NewIntegerVariable(0, 1))); + model.GetOrCreate()->AssociateToIntegerEqualValue( + alt2, var2, IntegerValue(1)); + demands_helper.OverrideDecomposedEnergies( + {{{alt1, IntegerValue(2), IntegerValue(4)}, + {alt2, IntegerValue(4), IntegerValue(2)}}}); + demands_helper.CacheAllEnergyValues(); + LinearConstraintBuilder builder(&model); + ASSERT_TRUE(demands_helper.AddLinearizedDemand(0, &builder)); + EXPECT_EQ(builder.BuildExpression().DebugString(), "4*X4 2*X5"); +} + +TEST(SchedulingDemandHelperTest, FilteredDecomposedEnergy) { + Model model; + SatSolver* sat_solver = model.GetOrCreate(); + IntegerEncoder* encoder = model.GetOrCreate(); + + const AffineExpression start(model.Add(NewIntegerVariable(0, 10))); + const AffineExpression size(model.Add(NewIntegerVariable(2, 10))); + const AffineExpression end(model.Add(NewIntegerVariable(0, 10))); + const IntervalVariable inter = + model.GetOrCreate()->CreateInterval( + start, end, size, kNoLiteralIndex, false); + + const AffineExpression demand(model.Add(NewIntegerVariable(2, 10))); + + SchedulingConstraintHelper helper({inter}, &model); + SchedulingDemandHelper demands_helper({demand}, &helper, &model); + demands_helper.CacheAllEnergyValues(); + EXPECT_EQ(demands_helper.EnergyMin(0), IntegerValue(4)); + + const std::vector no_energy; + EXPECT_EQ(demands_helper.FilteredDecomposedEnergy(0), no_energy); + + const Literal alt1 = Literal(model.Add(NewBooleanVariable()), true); + const IntegerVariable var1(model.Add(NewIntegerVariable(0, 1))); + encoder->AssociateToIntegerEqualValue(alt1, var1, IntegerValue(1)); + + const Literal alt2 = Literal(model.Add(NewBooleanVariable()), true); + const IntegerVariable var2(model.Add(NewIntegerVariable(0, 1))); + encoder->AssociateToIntegerEqualValue(alt2, var2, IntegerValue(1)); + const std::vector energy = { + {alt1, IntegerValue(2), IntegerValue(4)}, + {alt2, IntegerValue(4), IntegerValue(2)}}; + demands_helper.OverrideDecomposedEnergies({energy}); + demands_helper.CacheAllEnergyValues(); + EXPECT_EQ(demands_helper.FilteredDecomposedEnergy(0), energy); + + EXPECT_EQ(sat_solver->EnqueueDecisionAndBackjumpOnConflict(alt1.Negated()), + 0); + const std::vector filtered_energy = { + {alt2, IntegerValue(4), IntegerValue(2)}}; + EXPECT_EQ(demands_helper.FilteredDecomposedEnergy(0), filtered_energy); + EXPECT_EQ(demands_helper.DecomposedEnergies()[0], energy); +} + +TEST(SchedulingDemandHelperTest, FilteredDecomposedEnergyWithFalseLiteral) { + Model model; + IntegerEncoder* encoder = model.GetOrCreate(); + + const AffineExpression start(model.Add(NewIntegerVariable(0, 10))); + const AffineExpression size(model.Add(NewIntegerVariable(2, 10))); + const AffineExpression end(model.Add(NewIntegerVariable(0, 10))); + const IntervalVariable inter = + model.GetOrCreate()->CreateInterval( + start, end, size, kNoLiteralIndex, false); + + const AffineExpression demand(model.Add(NewIntegerVariable(2, 10))); + + SchedulingConstraintHelper helper({inter}, &model); + SchedulingDemandHelper demands_helper({demand}, &helper, &model); + demands_helper.CacheAllEnergyValues(); + EXPECT_EQ(demands_helper.EnergyMin(0), IntegerValue(4)); + + const std::vector no_energy; + EXPECT_EQ(demands_helper.FilteredDecomposedEnergy(0), no_energy); + + const Literal alt1 = encoder->GetFalseLiteral(); + const IntegerVariable var1(model.Add(NewIntegerVariable(0, 1))); + model.GetOrCreate()->AssociateToIntegerEqualValue( + alt1, var1, IntegerValue(1)); + + const Literal alt2 = Literal(model.Add(NewBooleanVariable()), true); + const IntegerVariable var2(model.Add(NewIntegerVariable(0, 1))); + encoder->AssociateToIntegerEqualValue(alt2, var2, IntegerValue(1)); + const std::vector energy = { + {alt1, IntegerValue(2), IntegerValue(4)}, + {alt2, IntegerValue(4), IntegerValue(2)}}; + demands_helper.OverrideDecomposedEnergies({energy}); + demands_helper.CacheAllEnergyValues(); + const std::vector filtered_energy = { + {alt2, IntegerValue(4), IntegerValue(2)}}; + EXPECT_EQ(demands_helper.DecomposedEnergies()[0], filtered_energy); + EXPECT_EQ(demands_helper.FilteredDecomposedEnergy(0), filtered_energy); + EXPECT_EQ(0, model.GetOrCreate()->CurrentDecisionLevel()); +} + +} // namespace +} // namespace sat +} // namespace operations_research diff --git a/ortools/sat/linear_constraint_manager_test.cc b/ortools/sat/linear_constraint_manager_test.cc new file mode 100644 index 0000000000..e842599ca5 --- /dev/null +++ b/ortools/sat/linear_constraint_manager_test.cc @@ -0,0 +1,421 @@ +// Copyright 2010-2024 Google LLC +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "ortools/sat/linear_constraint_manager.h" + +#include +#include +#include + +#include "gtest/gtest.h" +#include "ortools/base/gmock.h" +#include "ortools/base/strong_vector.h" +#include "ortools/glop/variables_info.h" +#include "ortools/lp_data/lp_types.h" +#include "ortools/sat/integer.h" +#include "ortools/sat/linear_constraint.h" +#include "ortools/sat/model.h" +#include "ortools/sat/sat_base.h" +#include "ortools/sat/sat_parameters.pb.h" +#include "ortools/util/strong_integers.h" + +namespace operations_research { +namespace sat { +namespace { + +using ::testing::ElementsAre; +using ::testing::EndsWith; +using ::testing::StartsWith; +using ::testing::UnorderedElementsAre; +using ConstraintIndex = LinearConstraintManager::ConstraintIndex; + +TEST(LinearConstraintManagerTest, DuplicateDetection) { + Model model; + LinearConstraintManager manager(&model); + const IntegerVariable x = model.Add(NewIntegerVariable(-10, 10)); + + LinearConstraintBuilder ct_one(IntegerValue(0), IntegerValue(10)); + ct_one.AddTerm(x, IntegerValue(2)); + manager.Add(ct_one.Build()); + + LinearConstraintBuilder ct_two(IntegerValue(-4), IntegerValue(6)); + ct_two.AddTerm(NegationOf(x), IntegerValue(-2)); + manager.Add(ct_two.Build()); + + EXPECT_EQ(manager.AllConstraints().size(), 1); + EXPECT_EQ(manager.AllConstraints().front().constraint.DebugString(), + "0 <= 1*X0 <= 3"); +} + +void SetLpValue(IntegerVariable v, double value, Model* model) { + auto& values = *model->GetOrCreate(); + const int needed_size = 1 + std::max(v.value(), NegationOf(v).value()); + if (needed_size > values.size()) values.resize(needed_size, 0.0); + values[v] = value; + values[NegationOf(v)] = -value; +} + +TEST(LinearConstraintManagerTest, DuplicateDetectionCuts) { + Model model; + LinearConstraintManager manager(&model); + const IntegerVariable x = model.Add(NewIntegerVariable(-10, 10)); + SetLpValue(x, -4.0, &model); + + LinearConstraintBuilder ct_one(IntegerValue(0), IntegerValue(10)); + ct_one.AddTerm(x, IntegerValue(2)); + manager.AddCut(ct_one.Build(), "Cut"); + + LinearConstraintBuilder ct_two(IntegerValue(-4), IntegerValue(6)); + ct_two.AddTerm(NegationOf(x), IntegerValue(-2)); + manager.AddCut(ct_two.Build(), "Cut"); + + // The second cut is more restrictive so it counts. + EXPECT_EQ(manager.num_cuts(), 2); + + EXPECT_EQ(manager.AllConstraints().size(), 1); + EXPECT_EQ(manager.AllConstraints().front().constraint.DebugString(), + "0 <= 1*X0 <= 3"); +} + +TEST(LinearConstraintManagerTest, DuplicateDetectionCauseLpChange) { + Model model; + LinearConstraintManager manager(&model); + const IntegerVariable x = model.Add(NewIntegerVariable(-10, 10)); + SetLpValue(x, 0.0, &model); + + LinearConstraintBuilder ct_one(IntegerValue(0), IntegerValue(10)); + ct_one.AddTerm(x, IntegerValue(2)); + manager.Add(ct_one.Build()); + + manager.AddAllConstraintsToLp(); + EXPECT_THAT(manager.LpConstraints(), + UnorderedElementsAre(ConstraintIndex(0))); + glop::BasisState state; + state.statuses.resize(glop::ColIndex(1)); + EXPECT_FALSE(manager.ChangeLp(&state)); + + // Adding the second constraint will cause a bound change, so ChangeLp() will + // returns true even if the constraint is satisfied. + LinearConstraintBuilder ct_two(IntegerValue(-4), IntegerValue(6)); + ct_two.AddTerm(x, IntegerValue(2)); + manager.Add(ct_two.Build()); + EXPECT_TRUE(manager.ChangeLp(&state)); + + EXPECT_EQ(manager.AllConstraints().size(), 1); + EXPECT_EQ(manager.AllConstraints().front().constraint.DebugString(), + "0 <= 1*X0 <= 3"); +} + +TEST(LinearConstraintManagerTest, OnlyAddInfeasibleConstraints) { + Model model; + LinearConstraintManager manager(&model); + const IntegerVariable x = model.Add(NewIntegerVariable(-10, 10)); + const IntegerVariable y = model.Add(NewIntegerVariable(-10, 10)); + SetLpValue(x, 0.0, &model); + SetLpValue(y, 0.0, &model); + + LinearConstraintBuilder ct_one(IntegerValue(0), IntegerValue(10)); + ct_one.AddTerm(x, IntegerValue(2)); + ct_one.AddTerm(y, IntegerValue(3)); + manager.Add(ct_one.Build()); + + LinearConstraintBuilder ct_two(IntegerValue(-4), IntegerValue(6)); + ct_two.AddTerm(x, IntegerValue(3)); + ct_one.AddTerm(y, IntegerValue(2)); + manager.Add(ct_two.Build()); + + EXPECT_TRUE(manager.LpConstraints().empty()); + EXPECT_EQ(manager.AllConstraints().size(), 2); + + // All constraints satisfy this, so no change. + glop::BasisState state; + state.statuses.resize(glop::ColIndex(2)); // Content is not relevant. + EXPECT_FALSE(manager.ChangeLp(&state)); + EXPECT_FALSE(manager.ChangeLp(&state)); + + SetLpValue(x, -1.0, &model); + EXPECT_TRUE(manager.ChangeLp(&state)); + EXPECT_THAT(manager.LpConstraints(), + UnorderedElementsAre(ConstraintIndex(0))); + EXPECT_EQ(state.statuses.size(), glop::ColIndex(3)); // State was resized. + EXPECT_EQ(state.statuses[glop::ColIndex(2)], glop::VariableStatus::BASIC); + + // Note that we keep the first constraint even if the value of 4.0 make it + // satisfied. + SetLpValue(x, 4.0, &model); + EXPECT_TRUE(manager.ChangeLp(&state)); + EXPECT_THAT(manager.LpConstraints(), + UnorderedElementsAre(ConstraintIndex(0), ConstraintIndex(1))); + EXPECT_EQ(state.statuses.size(), glop::ColIndex(4)); // State was resized. + EXPECT_EQ(state.statuses[glop::ColIndex(3)], glop::VariableStatus::BASIC); +} + +TEST(LinearConstraintManagerTest, OnlyAddOrthogonalConstraints) { + Model model; + model.GetOrCreate()->set_min_orthogonality_for_lp_constraints( + 0.8); + LinearConstraintManager manager(&model); + const IntegerVariable x = model.Add(NewIntegerVariable(0, 10)); + const IntegerVariable y = model.Add(NewIntegerVariable(0, 10)); + const IntegerVariable z = model.Add(NewIntegerVariable(0, 10)); + SetLpValue(x, 1.0, &model); + SetLpValue(y, 1.0, &model); + SetLpValue(z, 1.0, &model); + + LinearConstraintBuilder ct_one(IntegerValue(0), IntegerValue(11)); + ct_one.AddTerm(x, IntegerValue(3)); + ct_one.AddTerm(y, IntegerValue(-4)); + manager.Add(ct_one.Build()); + + LinearConstraintBuilder ct_two(IntegerValue(-4), IntegerValue(2)); + ct_two.AddTerm(z, IntegerValue(-5)); + manager.Add(ct_two.Build()); + + LinearConstraintBuilder ct_three(IntegerValue(0), IntegerValue(14)); + ct_three.AddTerm(x, IntegerValue(5)); + ct_three.AddTerm(y, IntegerValue(5)); + ct_three.AddTerm(z, IntegerValue(5)); + manager.Add(ct_three.Build()); + + EXPECT_TRUE(manager.LpConstraints().empty()); + EXPECT_EQ(manager.AllConstraints().size(), 3); + + // First Call. Last constraint does not satisfy the orthogonality criteria. + glop::BasisState state; + EXPECT_TRUE(manager.ChangeLp(&state)); + EXPECT_THAT(manager.LpConstraints(), + UnorderedElementsAre(ConstraintIndex(0), ConstraintIndex(1))); + + // Second Call. Only the last constraint is considered. The other two + // constraints are already added. + EXPECT_TRUE(manager.ChangeLp(&state)); + EXPECT_THAT(manager.LpConstraints(), + UnorderedElementsAre(ConstraintIndex(0), ConstraintIndex(1), + ConstraintIndex(2))); +} + +TEST(LinearConstraintManagerTest, RemoveIneffectiveCuts) { + Model model; + model.GetOrCreate()->set_max_consecutive_inactive_count(0); + + LinearConstraintManager manager(&model); + const IntegerVariable x = model.Add(NewIntegerVariable(0, 10)); + const IntegerVariable y = model.Add(NewIntegerVariable(0, 10)); + SetLpValue(x, 1.0, &model); + SetLpValue(y, 1.0, &model); + + LinearConstraintBuilder ct_one(IntegerValue(0), IntegerValue(11)); + ct_one.AddTerm(x, IntegerValue(3)); + ct_one.AddTerm(y, IntegerValue(-4)); + manager.AddCut(ct_one.Build(), "Cut"); + + EXPECT_TRUE(manager.LpConstraints().empty()); + EXPECT_EQ(manager.AllConstraints().size(), 1); + + // First Call. The constraint is added to LP. + glop::BasisState state; + EXPECT_TRUE(manager.ChangeLp(&state)); + EXPECT_THAT(manager.LpConstraints(), + UnorderedElementsAre(ConstraintIndex(0))); + + // Second Call. Constraint is inactive and hence removed. + state.statuses.resize(glop::ColIndex(2 + manager.LpConstraints().size())); + state.statuses[glop::ColIndex(2)] = glop::VariableStatus::BASIC; + EXPECT_TRUE(manager.ChangeLp(&state)); + EXPECT_TRUE(manager.LpConstraints().empty()); + EXPECT_EQ(state.statuses.size(), glop::ColIndex(2)); +} + +TEST(LinearConstraintManagerTest, ObjectiveParallelism) { + Model model; + LinearConstraintManager manager(&model); + const IntegerVariable x = model.Add(NewIntegerVariable(0, 10)); + const IntegerVariable y = model.Add(NewIntegerVariable(0, 10)); + const IntegerVariable z = model.Add(NewIntegerVariable(0, 10)); + SetLpValue(x, 1.0, &model); + SetLpValue(y, 1.0, &model); + SetLpValue(z, 1.0, &model); + + manager.SetObjectiveCoefficient(x, IntegerValue(1)); + manager.SetObjectiveCoefficient(y, IntegerValue(1)); + + LinearConstraintBuilder ct_one(IntegerValue(0), IntegerValue(0)); + ct_one.AddTerm(z, IntegerValue(-1)); + manager.Add(ct_one.Build()); + + LinearConstraintBuilder ct_two(IntegerValue(0), IntegerValue(2)); + ct_two.AddTerm(x, IntegerValue(1)); + ct_two.AddTerm(y, IntegerValue(1)); + ct_two.AddTerm(z, IntegerValue(1)); + manager.Add(ct_two.Build()); + + EXPECT_TRUE(manager.LpConstraints().empty()); + EXPECT_EQ(manager.AllConstraints().size(), 2); + + // Last constraint is more parallel to the objective. + glop::BasisState state; + EXPECT_TRUE(manager.ChangeLp(&state)); + // scores: efficacy, orthogonality, obj_para, total + // ct_one: 1, 1, 0, 2 + // ct_two: 0.5774, 1, 0.8165, 2.394 + + EXPECT_THAT(manager.LpConstraints(), + ElementsAre(ConstraintIndex(1), ConstraintIndex(0))); +} + +TEST(LinearConstraintManagerTest, SimplificationRemoveFixedVariable) { + Model model; + const IntegerVariable x = model.Add(NewIntegerVariable(0, 10)); + const IntegerVariable y = model.Add(NewIntegerVariable(0, 5)); + const IntegerVariable z = model.Add(NewIntegerVariable(0, 10)); + SetLpValue(x, 0.0, &model); + SetLpValue(y, 0.0, &model); + SetLpValue(z, 0.0, &model); + + LinearConstraintManager manager(&model); + + { + LinearConstraintBuilder ct(IntegerValue(0), IntegerValue(11)); + ct.AddTerm(x, IntegerValue(3)); + ct.AddTerm(y, IntegerValue(-4)); + ct.AddTerm(z, IntegerValue(7)); + manager.Add(ct.Build()); + } + + const LinearConstraintManager::ConstraintIndex index(0); + EXPECT_EQ("0 <= 3*X0 -4*X1 7*X2 <= 11", + manager.AllConstraints()[index].constraint.DebugString()); + + // ChangeLp will trigger the simplification. + EXPECT_TRUE(model.GetOrCreate()->Enqueue( + IntegerLiteral::GreaterOrEqual(y, IntegerValue(5)), {}, {})); + glop::BasisState state; + EXPECT_TRUE(manager.ChangeLp(&state)); + EXPECT_EQ(1, manager.num_shortened_constraints()); + EXPECT_EQ("20 <= 3*X0 7*X2 <= 31", + manager.AllConstraints()[index].constraint.DebugString()); + + // We also test that the constraint equivalence work with the change. + // Adding a constraint equiv to the new one is detected. + { + LinearConstraintBuilder ct(IntegerValue(0), IntegerValue(21)); + ct.AddTerm(x, IntegerValue(3)); + ct.AddTerm(z, IntegerValue(7)); + manager.Add(ct.Build()); + } + EXPECT_EQ(manager.AllConstraints().size(), 1); + EXPECT_EQ("20 <= 3*X0 7*X2 <= 21", + manager.AllConstraints()[index].constraint.DebugString()); +} + +TEST(LinearConstraintManagerTest, SimplificationStrenghtenUb) { + Model model; + const IntegerVariable x = model.Add(NewIntegerVariable(0, 10)); + const IntegerVariable y = model.Add(NewIntegerVariable(0, 10)); + const IntegerVariable z = model.Add(NewIntegerVariable(0, 10)); + LinearConstraintManager manager(&model); + + LinearConstraintBuilder ct(IntegerValue(-100), IntegerValue(30 + 70 - 5)); + ct.AddTerm(x, IntegerValue(3)); + ct.AddTerm(y, IntegerValue(-8)); + ct.AddTerm(z, IntegerValue(7)); + manager.Add(ct.Build()); + + const LinearConstraintManager::ConstraintIndex index(0); + EXPECT_EQ(2, manager.num_coeff_strenghtening()); + EXPECT_THAT(manager.AllConstraints()[index].constraint.DebugString(), + EndsWith("3*X0 -5*X1 5*X2 <= 75")); +} + +TEST(LinearConstraintManagerTest, SimplificationStrenghtenLb) { + Model model; + const IntegerVariable x = model.Add(NewIntegerVariable(0, 10)); + const IntegerVariable y = model.Add(NewIntegerVariable(0, 10)); + const IntegerVariable z = model.Add(NewIntegerVariable(0, 10)); + LinearConstraintManager manager(&model); + + LinearConstraintBuilder ct(IntegerValue(-75), IntegerValue(1000)); + ct.AddTerm(x, IntegerValue(3)); + ct.AddTerm(y, IntegerValue(-8)); + ct.AddTerm(z, IntegerValue(7)); + manager.Add(ct.Build()); + + const LinearConstraintManager::ConstraintIndex index(0); + EXPECT_EQ(2, manager.num_coeff_strenghtening()); + EXPECT_THAT(manager.AllConstraints()[index].constraint.DebugString(), + StartsWith("-45 <= 3*X0 -5*X1 5*X2")); +} + +TEST(LinearConstraintManagerTest, AdvancedStrenghtening1) { + Model model; + const IntegerVariable x = model.Add(NewIntegerVariable(0, 10)); + const IntegerVariable y = model.Add(NewIntegerVariable(0, 10)); + const IntegerVariable z = model.Add(NewIntegerVariable(0, 10)); + LinearConstraintManager manager(&model); + + LinearConstraintBuilder ct(IntegerValue(16), IntegerValue(1000)); + ct.AddTerm(x, IntegerValue(15)); + ct.AddTerm(y, IntegerValue(9)); + ct.AddTerm(z, IntegerValue(14)); + manager.Add(ct.Build()); + + const LinearConstraintManager::ConstraintIndex index(0); + EXPECT_EQ(3, manager.num_coeff_strenghtening()); + EXPECT_THAT(manager.AllConstraints()[index].constraint.DebugString(), + StartsWith("2 <= 1*X0 1*X1 1*X2")); +} + +TEST(LinearConstraintManagerTest, AdvancedStrenghtening2) { + Model model; + const IntegerVariable x = model.Add(NewIntegerVariable(0, 10)); + const IntegerVariable y = model.Add(NewIntegerVariable(0, 10)); + const IntegerVariable z = model.Add(NewIntegerVariable(0, 10)); + LinearConstraintManager manager(&model); + + LinearConstraintBuilder ct(IntegerValue(16), IntegerValue(1000)); + ct.AddTerm(x, IntegerValue(15)); + ct.AddTerm(y, IntegerValue(7)); + ct.AddTerm(z, IntegerValue(14)); + manager.Add(ct.Build()); + + const LinearConstraintManager::ConstraintIndex index(0); + EXPECT_EQ(2, manager.num_coeff_strenghtening()); + EXPECT_THAT(manager.AllConstraints()[index].constraint.DebugString(), + StartsWith("16 <= 9*X0 7*X1 9*X2")); +} + +TEST(LinearConstraintManagerTest, AdvancedStrenghtening3) { + Model model; + const IntegerVariable x = model.Add(NewIntegerVariable(0, 10)); + const IntegerVariable y = model.Add(NewIntegerVariable(0, 10)); + const IntegerVariable z = model.Add(NewIntegerVariable(0, 10)); + LinearConstraintManager manager(&model); + + LinearConstraintBuilder ct(IntegerValue(5), IntegerValue(1000)); + ct.AddTerm(x, IntegerValue(5)); + ct.AddTerm(y, IntegerValue(5)); + ct.AddTerm(z, IntegerValue(4)); + manager.Add(ct.Build()); + + // TODO(user): Technically, because the 5 are "enforcement" the inner + // constraint is 4*X2 >= 5 which can be rewriten and X2 >= 2, and we could + // instead have 2X0 + 2X1 + X2 >= 2 which should be tighter. + const LinearConstraintManager::ConstraintIndex index(0); + EXPECT_EQ(1, manager.num_coeff_strenghtening()); + EXPECT_THAT(manager.AllConstraints()[index].constraint.DebugString(), + StartsWith("5 <= 5*X0 5*X1 3*X2")); +} + +} // namespace +} // namespace sat +} // namespace operations_research diff --git a/ortools/sat/linear_constraint_test.cc b/ortools/sat/linear_constraint_test.cc new file mode 100644 index 0000000000..6c6dbc4ada --- /dev/null +++ b/ortools/sat/linear_constraint_test.cc @@ -0,0 +1,480 @@ +// Copyright 2010-2024 Google LLC +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "ortools/sat/linear_constraint.h" + +#include +#include +#include +#include +#include + +#include "absl/types/span.h" +#include "gtest/gtest.h" +#include "ortools/base/gmock.h" +#include "ortools/base/strong_vector.h" +#include "ortools/sat/integer.h" +#include "ortools/sat/model.h" +#include "ortools/sat/sat_base.h" +#include "ortools/util/strong_integers.h" + +namespace operations_research { +namespace sat { +namespace { + +using ::testing::ElementsAre; + +TEST(ComputeActivityTest, BasicBehavior) { + // The bounds are not useful for this test. + LinearConstraintBuilder ct(IntegerValue(0), IntegerValue(0)); + + ct.AddTerm(IntegerVariable(0), IntegerValue(1)); + ct.AddTerm(IntegerVariable(2), IntegerValue(-2)); + ct.AddTerm(IntegerVariable(4), IntegerValue(3)); + + util_intops::StrongVector values = {0.5, 0.0, 1.4, + 0.0, -2.1, 0.0}; + EXPECT_EQ(ComputeActivity(ct.Build(), values), 1 * 0.5 - 2 * 1.4 - 3 * 2.1); +} + +TEST(ComputeActivityTest, EmptyConstraint) { + // The bounds are not useful for this test. + LinearConstraintBuilder ct(IntegerValue(-10), IntegerValue(10)); + util_intops::StrongVector values; + EXPECT_EQ(ComputeActivity(ct.Build(), values), 0.0); +} + +TEST(ComputeInfinityNormTest, BasicTest) { + IntegerVariable x(0); + IntegerVariable y(2); + IntegerVariable z(4); + { + LinearConstraint constraint; + EXPECT_EQ(IntegerValue(0), ComputeInfinityNorm(constraint)); + } + { + LinearConstraintBuilder constraint; + constraint.AddTerm(x, IntegerValue(3)); + constraint.AddTerm(y, IntegerValue(-4)); + constraint.AddTerm(z, IntegerValue(1)); + EXPECT_EQ(IntegerValue(4), ComputeInfinityNorm(constraint.Build())); + } + { + LinearConstraintBuilder constraint; + constraint.AddTerm(y, IntegerValue(std::numeric_limits::max())); + EXPECT_EQ(IntegerValue(std::numeric_limits::max()), + ComputeInfinityNorm(constraint.Build())); + } +} + +TEST(ComputeL2NormTest, BasicTest) { + IntegerVariable x(0); + IntegerVariable y(2); + IntegerVariable z(4); + { + LinearConstraint constraint; + EXPECT_EQ(0.0, ComputeL2Norm(constraint)); + } + { + LinearConstraintBuilder constraint; + constraint.AddTerm(x, IntegerValue(3)); + constraint.AddTerm(y, IntegerValue(-4)); + constraint.AddTerm(z, IntegerValue(12)); + EXPECT_EQ(13.0, ComputeL2Norm(constraint.Build())); + } + { + LinearConstraintBuilder constraint; + constraint.AddTerm(x, kMaxIntegerValue); + constraint.AddTerm(y, kMaxIntegerValue); + EXPECT_EQ(std::numeric_limits::infinity(), + ComputeL2Norm(constraint.Build())); + } + { + LinearConstraintBuilder constraint; + constraint.AddTerm(x, IntegerValue(1LL << 60)); + constraint.AddTerm(y, IntegerValue(1LL << 60)); + EXPECT_NEAR(1.6304772e+18, ComputeL2Norm(constraint.Build()), 1e+16); + } +} + +TEST(ScalarProductTest, BasicTest) { + IntegerVariable x(0); + IntegerVariable y(2); + IntegerVariable z(4); + + LinearConstraintBuilder ct_one(IntegerValue(0), IntegerValue(11)); + ct_one.AddTerm(x, IntegerValue(3)); + ct_one.AddTerm(y, IntegerValue(-4)); + + LinearConstraintBuilder ct_two(IntegerValue(1), IntegerValue(2)); + ct_two.AddTerm(z, IntegerValue(-1)); + + LinearConstraintBuilder ct_three(IntegerValue(0), IntegerValue(2)); + ct_three.AddTerm(x, IntegerValue(1)); + ct_three.AddTerm(y, IntegerValue(1)); + ct_three.AddTerm(z, IntegerValue(1)); + + EXPECT_EQ(0.0, ScalarProduct(ct_one.Build(), ct_two.Build())); + EXPECT_EQ(-1.0, ScalarProduct(ct_one.Build(), ct_three.Build())); + EXPECT_EQ(-1.0, ScalarProduct(ct_two.Build(), ct_three.Build())); +} + +namespace { + +// Creates an upper bounded LinearConstraintBuilder from a dense representation. +LinearConstraint CreateUbConstraintForTest( + absl::Span dense_coeffs, int64_t upper_bound) { + LinearConstraint result; + result.resize(dense_coeffs.size()); + int new_size = 0; + for (int i = 0; i < dense_coeffs.size(); ++i) { + if (dense_coeffs[i] != 0) { + result.vars[new_size] = IntegerVariable(i); + result.coeffs[new_size] = dense_coeffs[i]; + ++new_size; + } + } + result.resize(new_size); + result.lb = kMinIntegerValue; + result.ub = upper_bound; + return result; +} + +} // namespace + +TEST(DivideByGCDTest, BasicBehaviorWithoughLowerBound) { + LinearConstraint ct = CreateUbConstraintForTest({2, 4, -8}, 11); + DivideByGCD(&ct); + const LinearConstraint expected = CreateUbConstraintForTest({1, 2, -4}, 5); + EXPECT_EQ(ct, expected); +} + +TEST(DivideByGCDTest, BasicBehaviorWithLowerBound) { + LinearConstraint ct = CreateUbConstraintForTest({2, 4, -8}, 11); + ct.lb = IntegerValue(-3); + DivideByGCD(&ct); + LinearConstraint expected = CreateUbConstraintForTest({1, 2, -4}, 5); + expected.lb = IntegerValue(-1); + EXPECT_EQ(ct, expected); +} + +TEST(RemoveZeroTermsTest, BasicBehavior) { + LinearConstraint ct = CreateUbConstraintForTest({2, 4, -8}, 11); + ct.coeffs[1] = IntegerValue(0); + RemoveZeroTerms(&ct); + EXPECT_EQ(ct, CreateUbConstraintForTest({2, 0, -8}, 11)); +} + +TEST(MakeAllCoefficientsPositiveTest, BasicBehavior) { + // Note that this relies on the fact that the negation of an IntegerVariable + // var is is the one with IntegerVariable(var.value() ^ 1); + LinearConstraint ct = CreateUbConstraintForTest({-2, 0, -7, 0}, 10); + MakeAllCoefficientsPositive(&ct); + EXPECT_EQ(ct, CreateUbConstraintForTest({0, 2, 0, 7}, 10)); +} + +TEST(LinearConstraintBuilderTest, DuplicateCoefficient) { + Model model; + model.GetOrCreate(); + LinearConstraintBuilder builder(&model, kMinIntegerValue, IntegerValue(10)); + + // Note that internally, positive variable have an even index, so we only + // use those so that we don't remap a negated variable. + builder.AddTerm(IntegerVariable(0), IntegerValue(100)); + builder.AddTerm(IntegerVariable(2), IntegerValue(10)); + builder.AddTerm(IntegerVariable(4), IntegerValue(7)); + builder.AddTerm(IntegerVariable(0), IntegerValue(-10)); + builder.AddTerm(IntegerVariable(2), IntegerValue(1)); + builder.AddTerm(IntegerVariable(4), IntegerValue(-7)); + builder.AddTerm(IntegerVariable(2), IntegerValue(3)); + + EXPECT_EQ(builder.Build(), CreateUbConstraintForTest({90, 0, 14}, 10)); +} + +TEST(LinearConstraintBuilderTest, AffineExpression) { + Model model; + model.GetOrCreate(); + LinearConstraintBuilder builder(&model, kMinIntegerValue, IntegerValue(10)); + + // Note that internally, positive variable have an even index, so we only + // use those so that we don't remap a negated variable. + const IntegerVariable var(0); + builder.AddTerm(AffineExpression(var, IntegerValue(3), IntegerValue(2)), + IntegerValue(100)); + builder.AddTerm(AffineExpression(var, IntegerValue(-2), IntegerValue(1)), + IntegerValue(70)); + + // Coeff is 3*100 - 2 * 70, ub is 10 - 2*100 - 1*70 + EXPECT_EQ(builder.Build(), CreateUbConstraintForTest({160}, -260)) + << builder.Build().DebugString(); +} + +TEST(LinearConstraintBuilderTest, AddLiterals) { + Model model; + model.GetOrCreate(); + const BooleanVariable b = model.Add(NewBooleanVariable()); + const BooleanVariable c = model.Add(NewBooleanVariable()); + const BooleanVariable d = model.Add(NewBooleanVariable()); + + // Create integer views. + model.Add(NewIntegerVariableFromLiteral(Literal(b, true))); // X0 + model.Add(NewIntegerVariableFromLiteral(Literal(b, false))); // X1 + model.Add(NewIntegerVariableFromLiteral(Literal(c, false))); // X2 + model.Add(NewIntegerVariableFromLiteral(Literal(d, false))); // X3 + model.Add(NewIntegerVariableFromLiteral(Literal(d, true))); // X4 + + // When we have both view, we use the lowest IntegerVariable. + { + LinearConstraintBuilder builder(&model, kMinIntegerValue, IntegerValue(1)); + EXPECT_TRUE(builder.AddLiteralTerm(Literal(b, true), IntegerValue(1))); + EXPECT_EQ(builder.Build().DebugString(), "1*X0 <= 1"); + } + { + LinearConstraintBuilder builder(&model, kMinIntegerValue, IntegerValue(1)); + EXPECT_TRUE(builder.AddLiteralTerm(Literal(b, false), IntegerValue(1))); + EXPECT_EQ(builder.Build().DebugString(), "-1*X0 <= 0"); + } + { + LinearConstraintBuilder builder(&model, kMinIntegerValue, IntegerValue(1)); + EXPECT_TRUE(builder.AddLiteralTerm(Literal(d, true), IntegerValue(1))); + EXPECT_EQ(builder.Build().DebugString(), "-1*X3 <= 0"); + } + { + LinearConstraintBuilder builder(&model, kMinIntegerValue, IntegerValue(1)); + EXPECT_TRUE(builder.AddLiteralTerm(Literal(d, false), IntegerValue(1))); + EXPECT_EQ(builder.Build().DebugString(), "1*X3 <= 1"); + } + + // When we have just one view, we use the one we have. + { + LinearConstraintBuilder builder(&model, kMinIntegerValue, IntegerValue(1)); + EXPECT_TRUE(builder.AddLiteralTerm(Literal(c, true), IntegerValue(1))); + EXPECT_EQ(builder.Build().DebugString(), "-1*X2 <= 0"); + } + { + LinearConstraintBuilder builder(&model, kMinIntegerValue, IntegerValue(1)); + EXPECT_TRUE(builder.AddLiteralTerm(Literal(c, false), IntegerValue(1))); + EXPECT_EQ(builder.Build().DebugString(), "1*X2 <= 1"); + } +} + +TEST(LinearConstraintBuilderTest, AddConstant) { + Model model; + model.GetOrCreate(); + LinearConstraintBuilder builder1(&model, kMinIntegerValue, IntegerValue(10)); + builder1.AddTerm(IntegerVariable(0), IntegerValue(5)); + builder1.AddTerm(IntegerVariable(2), IntegerValue(10)); + builder1.AddConstant(IntegerValue(3)); + EXPECT_EQ(builder1.Build().DebugString(), "5*X0 10*X1 <= 7"); + + LinearConstraintBuilder builder2(&model, IntegerValue(4), kMaxIntegerValue); + builder2.AddTerm(IntegerVariable(0), IntegerValue(5)); + builder2.AddTerm(IntegerVariable(2), IntegerValue(10)); + builder2.AddConstant(IntegerValue(-3)); + EXPECT_EQ(builder2.Build().DebugString(), "7 <= 5*X0 10*X1"); + + LinearConstraintBuilder builder3(&model, kMinIntegerValue, IntegerValue(10)); + builder3.AddTerm(IntegerVariable(0), IntegerValue(5)); + builder3.AddTerm(IntegerVariable(2), IntegerValue(10)); + builder3.AddConstant(IntegerValue(-3)); + EXPECT_EQ(builder3.Build().DebugString(), "5*X0 10*X1 <= 13"); + + LinearConstraintBuilder builder4(&model, IntegerValue(4), kMaxIntegerValue); + builder4.AddTerm(IntegerVariable(0), IntegerValue(5)); + builder4.AddTerm(IntegerVariable(2), IntegerValue(10)); + builder4.AddConstant(IntegerValue(3)); + EXPECT_EQ(builder4.Build().DebugString(), "1 <= 5*X0 10*X1"); + + LinearConstraintBuilder builder5(&model, IntegerValue(4), IntegerValue(10)); + builder5.AddTerm(IntegerVariable(0), IntegerValue(5)); + builder5.AddTerm(IntegerVariable(2), IntegerValue(10)); + builder5.AddConstant(IntegerValue(3)); + EXPECT_EQ(builder5.Build().DebugString(), "1 <= 5*X0 10*X1 <= 7"); +} + +TEST(CleanTermsAndFillConstraintTest, VarAndItsNegation) { + std::vector> terms; + terms.push_back({IntegerVariable(4), IntegerValue(7)}); + terms.push_back({IntegerVariable(5), IntegerValue(4)}); + LinearConstraint constraint; + CleanTermsAndFillConstraint(&terms, &constraint); + EXPECT_EQ(constraint.DebugString(), "0 <= 3*X2 <= 0"); +} + +TEST(LinearConstraintBuilderTest, AddQuadraticLowerBound) { + Model model; + model.GetOrCreate(); + IntegerTrail* integer_trail = model.GetOrCreate(); + IntegerVariable x0 = model.Add(NewIntegerVariable(2, 5)); + IntegerVariable x1 = model.Add(NewIntegerVariable(3, 6)); + LinearConstraintBuilder builder1(&model, kMinIntegerValue, IntegerValue(10)); + AffineExpression a0(x0, IntegerValue(3), IntegerValue(2)); // 3 * x0 + 2. + builder1.AddQuadraticLowerBound(a0, x1, integer_trail); + EXPECT_EQ(builder1.Build().DebugString(), "9*X0 8*X1 <= 28"); +} + +TEST(LinearConstraintBuilderTest, AddQuadraticLowerBoundAffineIsVar) { + Model model; + model.GetOrCreate(); + IntegerTrail* integer_trail = model.GetOrCreate(); + IntegerVariable x0 = model.Add(NewIntegerVariable(2, 5)); + IntegerVariable x1 = model.Add(NewIntegerVariable(3, 6)); + LinearConstraintBuilder builder1(&model, kMinIntegerValue, IntegerValue(10)); + builder1.AddQuadraticLowerBound(x0, x1, integer_trail); + EXPECT_EQ(builder1.Build().DebugString(), "3*X0 2*X1 <= 16"); +} + +TEST(LinearConstraintBuilderTest, AddQuadraticLowerBoundAffineIsConstant) { + Model model; + model.GetOrCreate(); + IntegerTrail* integer_trail = model.GetOrCreate(); + IntegerVariable x0 = model.Add(NewIntegerVariable(2, 5)); + LinearConstraintBuilder builder1(&model, kMinIntegerValue, IntegerValue(10)); + builder1.AddQuadraticLowerBound(IntegerValue(4), x0, integer_trail); + EXPECT_EQ(builder1.Build().DebugString(), "4*X0 <= 10"); +} + +TEST(LinExprTest, Bounds) { + Model model; + std::vector vars{model.Add(NewIntegerVariable(1, 2)), + model.Add(NewIntegerVariable(0, 3)), + model.Add(NewIntegerVariable(-2, 4))}; + IntegerTrail* integer_trail = model.GetOrCreate(); + LinearExpression expr1; // 2x0 + 3x1 - 5 + expr1.vars = {vars[0], vars[1]}; + expr1.coeffs = {IntegerValue(2), IntegerValue(3)}; + expr1.offset = IntegerValue(-5); + expr1 = CanonicalizeExpr(expr1); + EXPECT_EQ(IntegerValue(-3), expr1.Min(*integer_trail)); + EXPECT_EQ(IntegerValue(8), expr1.Max(*integer_trail)); + + LinearExpression expr2; // 2x1 - 5x2 + 6 + expr2.vars = {vars[1], vars[2]}; + expr2.coeffs = {IntegerValue(2), IntegerValue(-5)}; + expr2.offset = IntegerValue(6); + expr2 = CanonicalizeExpr(expr2); + EXPECT_EQ(IntegerValue(-14), expr2.Min(*integer_trail)); + EXPECT_EQ(IntegerValue(22), expr2.Max(*integer_trail)); + + LinearExpression expr3; // 2x0 + 3x2 + expr3.vars = {vars[0], vars[2]}; + expr3.coeffs = {IntegerValue(2), IntegerValue(3)}; + expr3 = CanonicalizeExpr(expr3); + EXPECT_EQ(IntegerValue(-4), expr3.Min(*integer_trail)); + EXPECT_EQ(IntegerValue(16), expr3.Max(*integer_trail)); +} + +TEST(LinExprTest, Canonicalization) { + Model model; + std::vector vars{model.Add(NewIntegerVariable(1, 2)), + model.Add(NewIntegerVariable(0, 3))}; + LinearExpression expr; // 2x0 - 3x1 - 5 + expr.vars = vars; + expr.coeffs = {IntegerValue(2), IntegerValue(-3)}; + expr.offset = IntegerValue(-5); + + LinearExpression canonical_expr = CanonicalizeExpr(expr); + EXPECT_THAT(canonical_expr.vars, ElementsAre(vars[0], NegationOf(vars[1]))); + EXPECT_THAT(canonical_expr.coeffs, + ElementsAre(IntegerValue(2), IntegerValue(3))); + EXPECT_EQ(canonical_expr.offset, IntegerValue(-5)); +} + +TEST(NoDuplicateVariable, BasicBehavior) { + LinearConstraint ct; + ct.lb = kMinIntegerValue; + ct.ub = IntegerValue(10); + + ct.resize(3); + ct.num_terms = 1; + ct.vars[0] = IntegerVariable(4); + ct.coeffs[0] = IntegerValue(1); + EXPECT_TRUE(NoDuplicateVariable(ct)); + + ct.num_terms = 2; + ct.vars[1] = IntegerVariable(2); + ct.coeffs[1] = IntegerValue(5); + EXPECT_TRUE(NoDuplicateVariable(ct)); + + ct.num_terms = 3; + ct.vars[2] = IntegerVariable(4); + ct.coeffs[2] = IntegerValue(1); + EXPECT_FALSE(NoDuplicateVariable(ct)); +} + +TEST(NoDuplicateVariable, BasicBehaviorNegativeVar) { + LinearConstraint ct; + + ct.lb = kMinIntegerValue; + ct.ub = IntegerValue(10); + + ct.resize(3); + ct.num_terms = 1; + ct.vars[0] = IntegerVariable(4); + ct.coeffs[0] = IntegerValue(1); + EXPECT_TRUE(NoDuplicateVariable(ct)); + + ct.num_terms = 2; + ct.vars[1] = IntegerVariable(2); + ct.coeffs[1] = IntegerValue(5); + EXPECT_TRUE(NoDuplicateVariable(ct)); + + ct.num_terms = 3; + ct.vars[2] = IntegerVariable(5); + ct.coeffs[2] = IntegerValue(1); + EXPECT_FALSE(NoDuplicateVariable(ct)); +} + +TEST(PositiveVarExpr, BasicBehaviorNegativeVar) { + LinearExpression ct; + ct.offset = IntegerValue(10); + ct.vars.push_back(IntegerVariable(4)); + ct.coeffs.push_back(IntegerValue(1)); + + ct.vars.push_back(IntegerVariable(1)); + ct.coeffs.push_back(IntegerValue(5)); + + LinearExpression positive_var_expr = PositiveVarExpr(ct); + EXPECT_THAT(positive_var_expr.vars, + ElementsAre(ct.vars[0], NegationOf(ct.vars[1]))); + EXPECT_THAT(positive_var_expr.coeffs, + ElementsAre(ct.coeffs[0], -ct.coeffs[1])); + EXPECT_EQ(positive_var_expr.offset, ct.offset); +} + +TEST(GetCoefficient, BasicBehavior) { + LinearExpression ct; + ct.offset = IntegerValue(10); + ct.vars.push_back(IntegerVariable(4)); + ct.coeffs.push_back(IntegerValue(2)); + + EXPECT_EQ(IntegerValue(2), GetCoefficient(IntegerVariable(4), ct)); + EXPECT_EQ(IntegerValue(-2), GetCoefficient(IntegerVariable(5), ct)); + EXPECT_EQ(IntegerValue(0), GetCoefficient(IntegerVariable(2), ct)); +} + +TEST(GetCoefficientOfPositiveVar, BasicBehavior) { + LinearExpression ct; + ct.offset = IntegerValue(10); + ct.vars.push_back(IntegerVariable(4)); + ct.coeffs.push_back(IntegerValue(2)); + + EXPECT_EQ(IntegerValue(2), + GetCoefficientOfPositiveVar(IntegerVariable(4), ct)); + EXPECT_EQ(IntegerValue(0), + GetCoefficientOfPositiveVar(IntegerVariable(2), ct)); +} + +} // namespace +} // namespace sat +} // namespace operations_research diff --git a/ortools/sat/linear_propagation_test.cc b/ortools/sat/linear_propagation_test.cc new file mode 100644 index 0000000000..aeda248a41 --- /dev/null +++ b/ortools/sat/linear_propagation_test.cc @@ -0,0 +1,321 @@ +// Copyright 2010-2024 Google LLC +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "ortools/sat/linear_propagation.h" + +#include + +#include +#include + +#include "absl/log/check.h" +#include "absl/types/span.h" +#include "gtest/gtest.h" +#include "ortools/base/gmock.h" +#include "ortools/sat/integer.h" +#include "ortools/sat/model.h" +#include "ortools/sat/sat_base.h" +#include "ortools/sat/sat_solver.h" +#include "ortools/util/strong_integers.h" + +namespace operations_research { +namespace sat { +namespace { + +using ::testing::ElementsAre; + +TEST(EnforcementPropagatorTest, BasicTest) { + Model model; + auto* sat_solver = model.GetOrCreate(); + auto* trail = model.GetOrCreate(); + auto* propag = model.GetOrCreate(); + sat_solver->SetNumVariables(10); + + const EnforcementId id1 = propag->Register(Literals({+1})); + const EnforcementId id2 = propag->Register(Literals({+1, +2})); + const EnforcementId id3 = propag->Register(Literals({-2})); + + EXPECT_TRUE(propag->Propagate(trail)); + EXPECT_EQ(propag->Status(id1), EnforcementStatus::CAN_PROPAGATE); + EXPECT_EQ(propag->Status(id2), EnforcementStatus::CANNOT_PROPAGATE); + EXPECT_EQ(propag->Status(id3), EnforcementStatus::CAN_PROPAGATE); + + sat_solver->EnqueueDecisionIfNotConflicting(Literal(+1)); + EXPECT_TRUE(propag->Propagate(trail)); + EXPECT_EQ(propag->Status(id1), EnforcementStatus::IS_ENFORCED); + EXPECT_EQ(propag->Status(id2), EnforcementStatus::CAN_PROPAGATE); + EXPECT_EQ(propag->Status(id3), EnforcementStatus::CAN_PROPAGATE); + + sat_solver->EnqueueDecisionIfNotConflicting(Literal(+2)); + EXPECT_EQ(propag->Status(id1), EnforcementStatus::IS_ENFORCED); + EXPECT_EQ(propag->Status(id2), EnforcementStatus::IS_ENFORCED); + EXPECT_EQ(propag->Status(id3), EnforcementStatus::IS_FALSE); + + CHECK(sat_solver->ResetToLevelZero()); + EXPECT_EQ(propag->Status(id1), EnforcementStatus::CAN_PROPAGATE); + EXPECT_EQ(propag->Status(id2), EnforcementStatus::CANNOT_PROPAGATE); + EXPECT_EQ(propag->Status(id3), EnforcementStatus::CAN_PROPAGATE); +} + +TEST(EnforcementPropagatorTest, UntrailWork) { + Model model; + auto* sat_solver = model.GetOrCreate(); + auto* trail = model.GetOrCreate(); + auto* propag = model.GetOrCreate(); + sat_solver->SetNumVariables(10); + + const EnforcementId id1 = propag->Register(Literals({+1})); + const EnforcementId id2 = propag->Register(Literals({+2})); + const EnforcementId id3 = propag->Register(Literals({+3})); + + EXPECT_TRUE(propag->Propagate(trail)); + EXPECT_EQ(propag->Status(id1), EnforcementStatus::CAN_PROPAGATE); + EXPECT_EQ(propag->Status(id2), EnforcementStatus::CAN_PROPAGATE); + EXPECT_EQ(propag->Status(id3), EnforcementStatus::CAN_PROPAGATE); + + sat_solver->EnqueueDecisionIfNotConflicting(Literal(+1)); + EXPECT_TRUE(propag->Propagate(trail)); + EXPECT_EQ(propag->Status(id1), EnforcementStatus::IS_ENFORCED); + EXPECT_EQ(propag->Status(id2), EnforcementStatus::CAN_PROPAGATE); + EXPECT_EQ(propag->Status(id3), EnforcementStatus::CAN_PROPAGATE); + + sat_solver->EnqueueDecisionIfNotConflicting(Literal(+2)); + EXPECT_TRUE(propag->Propagate(trail)); + EXPECT_EQ(propag->Status(id1), EnforcementStatus::IS_ENFORCED); + EXPECT_EQ(propag->Status(id2), EnforcementStatus::IS_ENFORCED); + EXPECT_EQ(propag->Status(id3), EnforcementStatus::CAN_PROPAGATE); + const int level = sat_solver->CurrentDecisionLevel(); + + sat_solver->EnqueueDecisionIfNotConflicting(Literal(+3)); + EXPECT_TRUE(propag->Propagate(trail)); + EXPECT_EQ(propag->Status(id1), EnforcementStatus::IS_ENFORCED); + EXPECT_EQ(propag->Status(id2), EnforcementStatus::IS_ENFORCED); + EXPECT_EQ(propag->Status(id3), EnforcementStatus::IS_ENFORCED); + + sat_solver->Backtrack(level); + EXPECT_EQ(propag->Status(id1), EnforcementStatus::IS_ENFORCED); + EXPECT_EQ(propag->Status(id2), EnforcementStatus::IS_ENFORCED); + EXPECT_EQ(propag->Status(id3), EnforcementStatus::CAN_PROPAGATE); +} + +TEST(EnforcementPropagatorTest, AddingAtPositiveLevelTrue) { + Model model; + auto* sat_solver = model.GetOrCreate(); + auto* trail = model.GetOrCreate(); + auto* propag = model.GetOrCreate(); + sat_solver->SetNumVariables(10); + + EXPECT_TRUE(propag->Propagate(trail)); + sat_solver->EnqueueDecisionIfNotConflicting(Literal(+1)); + EXPECT_TRUE(propag->Propagate(trail)); + + const EnforcementId id = propag->Register(std::vector{+1}); + EXPECT_EQ(propag->Status(id), EnforcementStatus::IS_ENFORCED); + + sat_solver->Backtrack(0); + EXPECT_TRUE(propag->Propagate(trail)); + EXPECT_EQ(propag->Status(id), EnforcementStatus::CAN_PROPAGATE); +} + +TEST(EnforcementPropagatorTest, AddingAtPositiveLevelFalse) { + Model model; + auto* sat_solver = model.GetOrCreate(); + auto* trail = model.GetOrCreate(); + auto* propag = model.GetOrCreate(); + sat_solver->SetNumVariables(10); + + EXPECT_TRUE(propag->Propagate(trail)); + sat_solver->EnqueueDecisionIfNotConflicting(Literal(-1)); + EXPECT_TRUE(propag->Propagate(trail)); + + const EnforcementId id = propag->Register(std::vector{+1}); + EXPECT_EQ(propag->Status(id), EnforcementStatus::IS_FALSE); + + sat_solver->Backtrack(0); + EXPECT_TRUE(propag->Propagate(trail)); + EXPECT_EQ(propag->Status(id), EnforcementStatus::CAN_PROPAGATE); +} + +// TEST copied from integer_expr test with little modif to use the new propag. +IntegerVariable AddWeightedSum(const absl::Span vars, + const absl::Span coeffs, + Model* model) { + IntegerVariable sum = model->Add(NewIntegerVariable(-10000, 10000)); + std::vector c; + std::vector v; + for (int i = 0; i < coeffs.size(); ++i) { + c.push_back(IntegerValue(coeffs[i])); + v.push_back(vars[i]); + } + c.push_back(IntegerValue(-1)); + v.push_back(sum); + + // <= sum + auto* propag = model->GetOrCreate(); + propag->AddConstraint({}, v, c, IntegerValue(0)); + + // >= sum + for (IntegerValue& ref : c) ref = -ref; + propag->AddConstraint({}, v, c, IntegerValue(0)); + + return sum; +} + +void AddWeightedSumLowerOrEqual(const absl::Span vars, + const absl::Span coeffs, int64_t rhs, + Model* model) { + std::vector c; + std::vector v; + for (int i = 0; i < coeffs.size(); ++i) { + c.push_back(IntegerValue(coeffs[i])); + v.push_back(vars[i]); + } + auto* propag = model->GetOrCreate(); + propag->AddConstraint({}, v, c, IntegerValue(rhs)); +} + +void AddWeightedSumLowerOrEqualReified( + Literal equiv, const absl::Span vars, + const absl::Span coeffs, int64_t rhs, Model* model) { + std::vector c; + std::vector v; + for (int i = 0; i < coeffs.size(); ++i) { + c.push_back(IntegerValue(coeffs[i])); + v.push_back(vars[i]); + } + auto* propag = model->GetOrCreate(); + propag->AddConstraint({equiv}, v, c, IntegerValue(rhs)); + + for (IntegerValue& ref : c) ref = -ref; + propag->AddConstraint({equiv.Negated()}, v, c, IntegerValue(-rhs) - 1); +} + +// A simple macro to make the code more readable. +#define EXPECT_BOUNDS_EQ(var, lb, ub) \ + EXPECT_EQ(model.Get(LowerBound(var)), lb); \ + EXPECT_EQ(model.Get(UpperBound(var)), ub) + +TEST(WeightedSumTest, LevelZeroPropagation) { + Model model; + std::vector vars{model.Add(NewIntegerVariable(4, 9)), + model.Add(NewIntegerVariable(-7, -2)), + model.Add(NewIntegerVariable(3, 8))}; + + const IntegerVariable sum = AddWeightedSum(vars, {1, -2, 3}, &model); + EXPECT_EQ(SatSolver::FEASIBLE, model.GetOrCreate()->Solve()); + EXPECT_EQ(model.Get(LowerBound(sum)), 4 + 2 * 2 + 3 * 3); + EXPECT_EQ(model.Get(UpperBound(sum)), 9 + 2 * 7 + 3 * 8); + + // Setting this leave only a slack of 2. + model.Add(LowerOrEqual(sum, 19)); + EXPECT_EQ(SatSolver::FEASIBLE, model.GetOrCreate()->Solve()); + EXPECT_BOUNDS_EQ(vars[0], 4, 6); // coeff = 1, slack = 2 + EXPECT_BOUNDS_EQ(vars[1], -3, -2); // coeff = 2, slack = 1 + EXPECT_BOUNDS_EQ(vars[2], 3, 3); // coeff = 3, slack = 0 +} + +// This one used to fail before CL 139204507. +TEST(WeightedSumTest, LevelZeroPropagationWithNegativeNumbers) { + Model model; + std::vector vars{model.Add(NewIntegerVariable(-5, 0)), + model.Add(NewIntegerVariable(-6, 0)), + model.Add(NewIntegerVariable(-4, 0))}; + + const IntegerVariable sum = AddWeightedSum(vars, {3, 3, 3}, &model); + EXPECT_EQ(SatSolver::FEASIBLE, model.GetOrCreate()->Solve()); + EXPECT_EQ(model.Get(LowerBound(sum)), -15 * 3); + EXPECT_EQ(model.Get(UpperBound(sum)), 0); + + // Setting this leave only a slack of 5 which is not an exact multiple of 3. + model.Add(LowerOrEqual(sum, -40)); + EXPECT_EQ(SatSolver::FEASIBLE, model.GetOrCreate()->Solve()); + EXPECT_BOUNDS_EQ(vars[0], -5, -4); + EXPECT_BOUNDS_EQ(vars[1], -6, -5); + EXPECT_BOUNDS_EQ(vars[2], -4, -3); +} + +TEST(WeightedSumLowerOrEqualTest, UnaryRounding) { + Model model; + IntegerVariable var = model.Add(NewIntegerVariable(0, 10)); + const std::vector coeffs = {-100}; + AddWeightedSumLowerOrEqual({var}, coeffs, -320, &model); + EXPECT_EQ(SatSolver::FEASIBLE, model.GetOrCreate()->Solve()); + EXPECT_EQ(model.Get(LowerBound(var)), 4); +} + +TEST(ReifiedWeightedSumLeTest, ReifToBoundPropagation) { + Model model; + const Literal r = Literal(model.Add(NewBooleanVariable()), true); + const IntegerVariable var = model.Add(NewIntegerVariable(4, 9)); + AddWeightedSumLowerOrEqualReified(r, {var}, {1}, 6, &model); + EXPECT_EQ( + SatSolver::FEASIBLE, + model.GetOrCreate()->ResetAndSolveWithGivenAssumptions({r})); + EXPECT_BOUNDS_EQ(var, 4, 6); + EXPECT_EQ(SatSolver::FEASIBLE, + model.GetOrCreate()->ResetAndSolveWithGivenAssumptions( + {r.Negated()})); + EXPECT_BOUNDS_EQ(var, 7, 9); // The associated literal (x <= 6) is false. +} + +TEST(ReifiedWeightedSumLeTest, ReifToBoundPropagationWithNegatedCoeff) { + Model model; + const Literal r = Literal(model.Add(NewBooleanVariable()), true); + const IntegerVariable var = model.Add(NewIntegerVariable(-9, 9)); + AddWeightedSumLowerOrEqualReified(r, {var}, {-3}, 7, &model); + EXPECT_EQ( + SatSolver::FEASIBLE, + model.GetOrCreate()->ResetAndSolveWithGivenAssumptions({r})); + EXPECT_BOUNDS_EQ(var, -2, 9); + EXPECT_EQ(SatSolver::FEASIBLE, + model.GetOrCreate()->ResetAndSolveWithGivenAssumptions( + {r.Negated()})); + EXPECT_BOUNDS_EQ(var, -9, -3); // The associated literal (x >= -2) is false. +} + +TEST(ReifiedWeightedSumGeTest, ReifToBoundPropagation) { + Model model; + const Literal r = Literal(model.Add(NewBooleanVariable()), true); + const IntegerVariable var = model.Add(NewIntegerVariable(4, 9)); + AddWeightedSumLowerOrEqualReified(r, {var}, {-1}, -6, &model); + EXPECT_EQ( + SatSolver::FEASIBLE, + model.GetOrCreate()->ResetAndSolveWithGivenAssumptions({r})); + EXPECT_BOUNDS_EQ(var, 6, 9); + EXPECT_EQ(SatSolver::FEASIBLE, + model.GetOrCreate()->ResetAndSolveWithGivenAssumptions( + {r.Negated()})); + EXPECT_BOUNDS_EQ(var, 4, 5); +} + +TEST(ReifiedWeightedSumTest, BoundToReifTrueLe) { + Model model; + const Literal r = Literal(model.Add(NewBooleanVariable()), true); + const IntegerVariable var = model.Add(NewIntegerVariable(4, 9)); + AddWeightedSumLowerOrEqualReified(r, {var}, {1}, 9, &model); + EXPECT_TRUE(model.GetOrCreate()->Propagate()); + EXPECT_TRUE(model.Get(Value(r))); +} + +TEST(ReifiedWeightedSumTest, BoundToReifFalseLe) { + Model model; + const Literal r = Literal(model.Add(NewBooleanVariable()), true); + const IntegerVariable var = model.Add(NewIntegerVariable(4, 9)); + AddWeightedSumLowerOrEqualReified(r, {var}, {1}, 3, &model); + EXPECT_TRUE(model.GetOrCreate()->Propagate()); + EXPECT_FALSE(model.Get(Value(r))); +} + +} // namespace +} // namespace sat +} // namespace operations_research diff --git a/ortools/sat/model_test.cc b/ortools/sat/model_test.cc new file mode 100644 index 0000000000..b3862ff5d4 --- /dev/null +++ b/ortools/sat/model_test.cc @@ -0,0 +1,92 @@ +// Copyright 2010-2024 Google LLC +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "ortools/sat/model.h" + +#include + +#include "gtest/gtest.h" + +namespace operations_research { +namespace sat { +namespace { + +struct A { + A() = default; + explicit A(Model* model) {} + std::string name; +}; + +class B { + public: + explicit B(A* a) : a_(a) {} + explicit B(Model* model) : a_(model->GetOrCreate()) {} + + std::string name() const { return a_->name; } + + private: + A* a_; +}; + +TEST(ModelTest, RecursiveCreationTest) { + Model model; + B* b = model.GetOrCreate(); + model.GetOrCreate()->name = "test"; + EXPECT_EQ("test", b->name()); +} + +struct C1 { + C1() = default; +}; +struct C2 { + explicit C2(Model* model) {} +}; +struct C3 { + C3() : name("no_arg") {} + explicit C3(Model*) : name("model") {} + std::string name; +}; + +TEST(ModelTest, DefaultConstructorFallback) { + Model model; + model.GetOrCreate(); + model.GetOrCreate(); + EXPECT_EQ(model.GetOrCreate()->name, "model"); +} + +TEST(ModelTest, Register) { + Model model; + C3 c3; + c3.name = "Shared struct"; + model.Register(&c3); + EXPECT_EQ(model.GetOrCreate()->name, c3.name); +} + +TEST(ModelTest, RegisterDeathTest) { + Model model; + C3 c3; + model.Register(&c3); + C3 c3_2; + EXPECT_DEATH(model.Register(&c3_2), ""); +} + +TEST(ModelTest, RegisterDeathTest2) { + Model model; + model.GetOrCreate(); + C3 c3; + EXPECT_DEATH(model.Register(&c3), ""); +} + +} // namespace +} // namespace sat +} // namespace operations_research diff --git a/ortools/sat/optimization_test.cc b/ortools/sat/optimization_test.cc new file mode 100644 index 0000000000..0401dc74fa --- /dev/null +++ b/ortools/sat/optimization_test.cc @@ -0,0 +1,172 @@ +// Copyright 2010-2024 Google LLC +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "ortools/sat/optimization.h" + +#include + +#include +#include +#include +#include + +#include "absl/log/check.h" +#include "absl/random/bit_gen_ref.h" +#include "absl/random/distributions.h" +#include "absl/strings/str_format.h" +#include "gtest/gtest.h" +#include "ortools/base/gmock.h" +#include "ortools/sat/boolean_problem.h" +#include "ortools/sat/boolean_problem.pb.h" +#include "ortools/sat/integer.h" +#include "ortools/sat/integer_search.h" +#include "ortools/sat/model.h" +#include "ortools/sat/pb_constraint.h" +#include "ortools/sat/sat_base.h" +#include "ortools/sat/sat_parameters.pb.h" +#include "ortools/sat/sat_solver.h" +#include "ortools/util/strong_integers.h" + +namespace operations_research { +namespace sat { +namespace { + +using ::testing::ElementsAre; + +// Test the lazy encoding logic on a trivial problem. +TEST(MinimizeIntegerVariableWithLinearScanAndLazyEncodingTest, BasicProblem) { + Model model; + IntegerVariable var = model.Add(NewIntegerVariable(-5, 10)); + model.GetOrCreate()->fixed_search = + FirstUnassignedVarAtItsMinHeuristic({var}, &model); + ConfigureSearchHeuristics(&model); + int num_feasible_solution = 0; + SatSolver::Status status = + MinimizeIntegerVariableWithLinearScanAndLazyEncoding( + var, + /*feasible_solution_observer=*/ + [var, &num_feasible_solution, &model]() { + ++num_feasible_solution; + EXPECT_EQ(model.Get(Value(var)), -5); + }, + + &model); + EXPECT_EQ(num_feasible_solution, 1); + EXPECT_EQ(status, SatSolver::Status::INFEASIBLE); // Search done. +} + +TEST(MinimizeIntegerVariableWithLinearScanAndLazyEncodingTest, + BasicProblemWithSolutionLimit) { + Model model; + SatParameters* parameters = model.GetOrCreate(); + parameters->set_stop_after_first_solution(true); + IntegerVariable var = model.Add(NewIntegerVariable(-5, 10)); + model.GetOrCreate()->fixed_search = + FirstUnassignedVarAtItsMinHeuristic({var}, &model); + ConfigureSearchHeuristics(&model); + + SatSolver::Status status = + MinimizeIntegerVariableWithLinearScanAndLazyEncoding( + var, + /*feasible_solution_observer=*/ + [var, &model]() { EXPECT_EQ(model.Get(Value(var)), -5); }, &model); + EXPECT_EQ(status, SatSolver::Status::LIMIT_REACHED); +} + +TEST(MinimizeIntegerVariableWithLinearScanAndLazyEncodingTest, + BasicProblemWithBadHeuristic) { + Model model; + IntegerVariable var = model.Add(NewIntegerVariable(-5, 10)); + int expected_value = 10; + int num_feasible_solution = 0; + + model.GetOrCreate()->fixed_search = + FirstUnassignedVarAtItsMinHeuristic({NegationOf(var)}, &model); + ConfigureSearchHeuristics(&model); + + SatSolver::Status status = + MinimizeIntegerVariableWithLinearScanAndLazyEncoding( + var, + /*feasible_solution_observer=*/ + [&]() { + ++num_feasible_solution; + EXPECT_EQ(model.Get(Value(var)), expected_value--); + }, + &model); + EXPECT_EQ(num_feasible_solution, 16); + EXPECT_EQ(status, SatSolver::Status::INFEASIBLE); // Search done. +} + +// TODO(user): The core find the best solution right away here, so it doesn't +// really exercise the solution limit... +TEST(MinimizeWithCoreAndLazyEncodingTest, BasicProblemWithSolutionLimit) { + Model model; + SatParameters* parameters = model.GetOrCreate(); + parameters->set_stop_after_first_solution(true); + IntegerVariable var = model.Add(NewIntegerVariable(-5, 10)); + std::vector vars = {var}; + std::vector coeffs = {IntegerValue(1)}; + + model.GetOrCreate()->fixed_search = + FirstUnassignedVarAtItsMinHeuristic({var}, &model); + ConfigureSearchHeuristics(&model); + + int num_solutions = 0; + CoreBasedOptimizer core( + var, vars, coeffs, + /*feasible_solution_observer=*/ + [var, &model, &num_solutions]() { + ++num_solutions; + EXPECT_EQ(model.Get(Value(var)), -5); + }, + &model); + SatSolver::Status status = core.Optimize(); + EXPECT_EQ(status, SatSolver::Status::INFEASIBLE); // i.e. optimal. + EXPECT_EQ(1, num_solutions); +} + +TEST(PresolveBooleanLinearExpressionTest, NegateCoeff) { + Coefficient offset(0); + std::vector literals = Literals({+1}); + std::vector coefficients = {Coefficient(-3)}; + PresolveBooleanLinearExpression(&literals, &coefficients, &offset); + EXPECT_THAT(literals, ElementsAre(Literal(-1))); + EXPECT_THAT(coefficients, ElementsAre(Coefficient(3))); + EXPECT_EQ(offset, -3); +} + +TEST(PresolveBooleanLinearExpressionTest, Duplicate) { + Coefficient offset(0); + std::vector literals = Literals({+1, -4, +1}); + std::vector coefficients = {Coefficient(-3), Coefficient(7), + Coefficient(5)}; + PresolveBooleanLinearExpression(&literals, &coefficients, &offset); + EXPECT_THAT(literals, ElementsAre(Literal(+1), Literal(-4))); + EXPECT_THAT(coefficients, ElementsAre(Coefficient(2), Coefficient(7))); + EXPECT_EQ(offset, 0); +} + +TEST(PresolveBooleanLinearExpressionTest, NegatedLiterals) { + Coefficient offset(0); + std::vector literals = Literals({+1, -4, -1}); + std::vector coefficients = {Coefficient(-3), Coefficient(7), + Coefficient(-5)}; + PresolveBooleanLinearExpression(&literals, &coefficients, &offset); + EXPECT_THAT(literals, ElementsAre(Literal(+1), Literal(-4))); + EXPECT_THAT(coefficients, ElementsAre(Coefficient(2), Coefficient(7))); + EXPECT_EQ(offset, -5); +} + +} // namespace +} // namespace sat +} // namespace operations_research diff --git a/ortools/sat/parameters_validation_test.cc b/ortools/sat/parameters_validation_test.cc new file mode 100644 index 0000000000..3c039f3e8c --- /dev/null +++ b/ortools/sat/parameters_validation_test.cc @@ -0,0 +1,125 @@ +// Copyright 2010-2024 Google LLC +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "ortools/sat/parameters_validation.h" + +#include +#include + +#include "google/protobuf/descriptor.h" +#include "google/protobuf/message.h" +#include "gtest/gtest.h" +#include "ortools/base/gmock.h" +#include "ortools/sat/sat_parameters.pb.h" + +namespace operations_research { +namespace sat { +namespace { + +using ::testing::HasSubstr; +using ::testing::IsEmpty; + +TEST(ValidateParameters, MaxTimeInSeconds) { + SatParameters params; + params.set_max_time_in_seconds(-1); + EXPECT_THAT(ValidateParameters(params), HasSubstr("non-negative")); +} + +TEST(ValidateParameters, ParametersInRange) { + SatParameters params; + params.set_mip_max_bound(-1); + EXPECT_THAT(ValidateParameters(params), + HasSubstr("'mip_max_bound' should be in")); +} + +TEST(ValidateParameters, NumWorkers) { + SatParameters params; + params.set_num_workers(-1); + EXPECT_THAT(ValidateParameters(params), HasSubstr("should be in [0,10000]")); +} + +TEST(ValidateParameters, NumSearchWorkers) { + SatParameters params; + params.set_num_search_workers(-1); + EXPECT_THAT(ValidateParameters(params), HasSubstr("should be in [0,10000]")); +} + +TEST(ValidateParameters, LinearizationLevel) { + SatParameters params; + params.set_linearization_level(-1); + EXPECT_THAT(ValidateParameters(params), HasSubstr("non-negative")); +} + +TEST(ValidateParameters, NumSharedTreeSearchWorkers) { + SatParameters params; + params.set_shared_tree_num_workers(-1); + EXPECT_THAT(ValidateParameters(params), HasSubstr("should be in [0,10000]")); +} + +TEST(ValidateParameters, SharedTreeSearchMaxNodesPerWorker) { + SatParameters params; + params.set_shared_tree_max_nodes_per_worker(0); + EXPECT_THAT(ValidateParameters(params), HasSubstr("positive")); +} + +TEST(ValidateParameters, SharedTreeSearchOpenLeavesPerWorker) { + SatParameters params; + params.set_shared_tree_open_leaves_per_worker(0.0); + EXPECT_THAT(ValidateParameters(params), HasSubstr("should be in [1,10000]")); +} + +TEST(ValidateParameters, UseSharedTreeSearch) { + SatParameters params; + params.set_use_shared_tree_search(true); + EXPECT_THAT(ValidateParameters(params), HasSubstr("only be set on workers")); +} + +TEST(ValidateParameters, NaNs) { + const google::protobuf::Descriptor& descriptor = *SatParameters::descriptor(); + const google::protobuf::Reflection& reflection = + *SatParameters::GetReflection(); + for (int i = 0; i < descriptor.field_count(); ++i) { + const google::protobuf::FieldDescriptor* const field = descriptor.field(i); + SCOPED_TRACE(field->name()); + + SatParameters params; + switch (field->type()) { + case google::protobuf::FieldDescriptor::TYPE_DOUBLE: + reflection.SetDouble(¶ms, field, + std::numeric_limits::quiet_NaN()); + break; + case google::protobuf::FieldDescriptor::TYPE_FLOAT: + reflection.SetFloat(¶ms, field, + std::numeric_limits::quiet_NaN()); + break; + default: + continue; + } + + EXPECT_THAT(ValidateParameters(params), + AllOf(HasSubstr(field->name()), HasSubstr("NaN"))); + } +} + +TEST(ValidateParameters, ValidateSubsolvers) { + SatParameters params; + params.add_extra_subsolvers("not_defined"); + EXPECT_THAT(ValidateParameters(params), HasSubstr("is not valid")); + + params.add_subsolver_params()->set_name("not_defined"); + EXPECT_THAT(ValidateParameters(params), IsEmpty()); +} + +} // namespace +} // namespace sat +} // namespace operations_research diff --git a/ortools/sat/pb_constraint_test.cc b/ortools/sat/pb_constraint_test.cc new file mode 100644 index 0000000000..5b3d7d91be --- /dev/null +++ b/ortools/sat/pb_constraint_test.cc @@ -0,0 +1,673 @@ +// Copyright 2010-2024 Google LLC +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "ortools/sat/pb_constraint.h" + +#include +#include +#include +#include + +#include "absl/container/flat_hash_map.h" +#include "absl/log/check.h" +#include "absl/types/span.h" +#include "gtest/gtest.h" +#include "ortools/base/gmock.h" +#include "ortools/base/strong_vector.h" +#include "ortools/sat/model.h" +#include "ortools/sat/sat_base.h" +#include "ortools/util/strong_integers.h" + +namespace operations_research { +namespace sat { +namespace { + +using ::testing::ContainerEq; + +template +auto LiteralsAre(Args... literals) { + return ::testing::ElementsAre(Literal(literals)...); +} + +std::vector MakePb( + absl::Span> input) { + std::vector result; + result.reserve(input.size()); + for (const auto p : input) { + result.push_back({Literal(p.first), p.second}); + } + return result; +} + +TEST(ComputeBooleanLinearExpressionCanonicalForm, RemoveZeroCoefficient) { + Coefficient bound_shift, max_value; + auto cst = MakePb({{+1, 4}, {+2, 0}, {+3, 4}, {+5, 0}}); + const auto result = MakePb({{+1, 4}, {+3, 4}}); + EXPECT_TRUE(ComputeBooleanLinearExpressionCanonicalForm(&cst, &bound_shift, + &max_value)); + EXPECT_THAT(cst, ContainerEq(result)); + EXPECT_EQ(bound_shift, 0); + EXPECT_EQ(max_value, 8); +} + +TEST(ComputeBooleanLinearExpressionCanonicalForm, MakeAllCoefficientPositive) { + Coefficient bound_shift, max_value; + auto cst = MakePb({{+1, 4}, {+2, -3}, {+3, 4}, {+5, -1}}); + const auto result = MakePb({{-5, 1}, {-2, 3}, {+1, 4}, {+3, 4}}); + EXPECT_TRUE(ComputeBooleanLinearExpressionCanonicalForm(&cst, &bound_shift, + &max_value)); + EXPECT_THAT(cst, ContainerEq(result)); + EXPECT_EQ(bound_shift, 4); + EXPECT_EQ(max_value, 12); +} + +TEST(ComputeBooleanLinearExpressionCanonicalForm, MergeSameVariableCase1) { + Coefficient bound_shift, max_value; + // 4x -3(1-x) +4(1-x) -x is the same as to 2x + 1 + auto cst = MakePb({{+1, 4}, {-1, -3}, {-1, 4}, {+1, -1}}); + const auto result = MakePb({{+1, 2}}); + EXPECT_TRUE(ComputeBooleanLinearExpressionCanonicalForm(&cst, &bound_shift, + &max_value)); + EXPECT_THAT(cst, ContainerEq(result)); + EXPECT_EQ(bound_shift, -1); + EXPECT_EQ(max_value, 2); +} + +TEST(ComputeBooleanLinearExpressionCanonicalForm, MergeSameVariableCase2) { + Coefficient bound_shift, max_value; + // 4x -3(1-x) +4(1-x) -5x is the same as to -2x + 1 + // which is expressed as 2(1-x) -2 +1 + auto cst = MakePb({{+1, 4}, {-1, -3}, {-1, 4}, {+1, -5}}); + const auto result = MakePb({{-1, 2}}); + EXPECT_TRUE(ComputeBooleanLinearExpressionCanonicalForm(&cst, &bound_shift, + &max_value)); + EXPECT_THAT(cst, ContainerEq(result)); + EXPECT_EQ(bound_shift, 1); + EXPECT_EQ(max_value, 2); +} + +TEST(ComputeBooleanLinearExpressionCanonicalForm, MergeSameVariableCase3) { + Coefficient bound_shift, max_value; + // Here the last variable will disappear completely + auto cst = MakePb({{+1, 4}, {+2, -3}, {+2, 4}, {+2, -1}}); + const auto result = MakePb({{+1, 4}}); + EXPECT_TRUE(ComputeBooleanLinearExpressionCanonicalForm(&cst, &bound_shift, + &max_value)); + EXPECT_THAT(cst, ContainerEq(result)); + EXPECT_EQ(bound_shift, 0); + EXPECT_EQ(max_value, 4); +} + +TEST(ComputeBooleanLinearExpressionCanonicalForm, Overflow) { + Coefficient bound_shift, max_value; + auto cst = MakePb({{+1, -kCoefficientMax}, {+2, -kCoefficientMax}}); + EXPECT_FALSE(ComputeBooleanLinearExpressionCanonicalForm(&cst, &bound_shift, + &max_value)); +} + +TEST(ComputeBooleanLinearExpressionCanonicalForm, BigIntCase) { + Coefficient bound_shift, max_value; + auto cst = MakePb({{+1, -kCoefficientMax}, {-1, -kCoefficientMax}}); + const auto result = MakePb({}); + EXPECT_TRUE(ComputeBooleanLinearExpressionCanonicalForm(&cst, &bound_shift, + &max_value)); + EXPECT_THAT(cst, ContainerEq(result)); + EXPECT_EQ(bound_shift, kCoefficientMax); + EXPECT_EQ(max_value, 0); +} + +TEST(ApplyLiteralMappingTest, BasicTest) { + Coefficient bound_shift, max_value; + + // This is needed to initizalize the ITIVector below. + std::vector temp{ + kTrueLiteralIndex, kFalseLiteralIndex, // var1 fixed to true. + Literal(-1).Index(), Literal(+1).Index(), // var2 mapped to not(var1) + Literal(+2).Index(), Literal(-2).Index(), // var3 mapped to var2 + kFalseLiteralIndex, kTrueLiteralIndex, // var4 fixed to false + Literal(+2).Index(), Literal(-2).Index()}; // var5 mapped to var2 + util_intops::StrongVector mapping(temp.begin(), + temp.end()); + + auto cst = MakePb({{+1, 4}, {+3, -3}, {+2, 4}, {+4, 7}, {+5, 5}}); + EXPECT_TRUE(ApplyLiteralMapping(mapping, &cst, &bound_shift, &max_value)); + const auto result = MakePb({{+2, 2}, {-1, 4}}); + EXPECT_THAT(cst, ContainerEq(result)); + EXPECT_EQ(bound_shift, -4); + EXPECT_EQ(max_value, 6); +} + +TEST(SimplifyCanonicalBooleanLinearConstraint, CoefficientsLargerThanRhs) { + auto cst = MakePb({{+1, 4}, {+2, 5}, {+3, 6}, {-4, 7}}); + Coefficient rhs(10); + SimplifyCanonicalBooleanLinearConstraint(&cst, &rhs); + EXPECT_THAT(cst, ContainerEq(cst)); + rhs = Coefficient(5); + SimplifyCanonicalBooleanLinearConstraint(&cst, &rhs); + const auto result = MakePb({{+1, 4}, {+2, 5}, {+3, 6}, {-4, 6}}); + EXPECT_THAT(cst, ContainerEq(result)); +} + +TEST(CanonicalBooleanLinearProblem, BasicTest) { + auto cst = MakePb({{+1, 4}, {+2, -5}, {+3, 6}, {-4, 7}}); + CanonicalBooleanLinearProblem problem; + problem.AddLinearConstraint(true, Coefficient(-5), true, Coefficient(5), + &cst); + + // We have just one constraint because the >= -5 is always true. + EXPECT_EQ(1, problem.NumConstraints()); + const auto result0 = MakePb({{+1, 4}, {-2, 5}, {+3, 6}, {-4, 7}}); + EXPECT_EQ(problem.Rhs(0), 10); + EXPECT_THAT(problem.Constraint(0), ContainerEq(result0)); + + // So lets restrict it and only use the lower bound + // Note that the API destroy the input so we have to reconstruct it. + cst = MakePb({{+1, 4}, {+2, -5}, {+3, 6}, {-4, 7}}); + problem.AddLinearConstraint(true, Coefficient(-4), false, + /*unused*/ Coefficient(-10), &cst); + + // Now we have another constraint corresponding to the >= -4 constraint. + EXPECT_EQ(2, problem.NumConstraints()); + const auto result1 = MakePb({{-1, 4}, {+2, 5}, {-3, 6}, {+4, 7}}); + EXPECT_EQ(problem.Rhs(1), 21); + EXPECT_THAT(problem.Constraint(1), ContainerEq(result1)); +} + +TEST(CanonicalBooleanLinearProblem, BasicTest2) { + auto cst = MakePb({{+1, 1}, {+2, 2}}); + CanonicalBooleanLinearProblem problem; + problem.AddLinearConstraint(true, Coefficient(2), false, + /*unused*/ Coefficient(0), &cst); + + EXPECT_EQ(1, problem.NumConstraints()); + const auto result = MakePb({{-1, 1}, {-2, 2}}); + EXPECT_EQ(problem.Rhs(0), 1); + EXPECT_THAT(problem.Constraint(0), ContainerEq(result)); +} + +TEST(CanonicalBooleanLinearProblem, OverflowCases) { + auto cst = MakePb({}); + CanonicalBooleanLinearProblem problem; + for (int i = 0; i < 2; ++i) { + std::vector reference; + if (i == 0) { + // This is a constraint with a "bound shift" of 10. + reference = MakePb({{+1, -10}, {+2, 10}}); + } else { + // This is a constraint with a "bound shift" of -10 since its domain value + // is actually [10, 10]. + reference = MakePb({{+1, 10}, {-1, 10}}); + } + + // All These constraint are trivially satisfiables, so no new constraints + // should be added. + cst = reference; + EXPECT_TRUE(problem.AddLinearConstraint(true, -kCoefficientMax, true, + kCoefficientMax, &cst)); + cst = reference; + EXPECT_TRUE(problem.AddLinearConstraint(true, -kCoefficientMax - 1, true, + kCoefficientMax, &cst)); + cst = reference; + EXPECT_TRUE(problem.AddLinearConstraint(true, Coefficient(-10), true, + Coefficient(10), &cst)); + + // These are trivially unsat, and all AddLinearConstraint() should return + // false. + cst = reference; + EXPECT_FALSE(problem.AddLinearConstraint(true, kCoefficientMax, true, + kCoefficientMax, &cst)); + cst = reference; + EXPECT_FALSE(problem.AddLinearConstraint(true, -kCoefficientMax, true, + -kCoefficientMax, &cst)); + cst = reference; + EXPECT_FALSE(problem.AddLinearConstraint( + true, -kCoefficientMax, true, -kCoefficientMax - Coefficient(1), &cst)); + } + + // No constraints were actually added. + EXPECT_EQ(problem.NumConstraints(), 0); +} + +// Constructs a vector from the current trail, so we can use LiteralsAre(). +std::vector TrailToVector(const Trail& trail) { + std::vector output; + for (int i = 0; i < trail.Index(); ++i) output.push_back(trail[i]); + return output; +} + +TEST(UpperBoundedLinearConstraintTest, ConstructionAndBasicPropagation) { + Coefficient threshold; + PbConstraintsEnqueueHelper helper; + helper.reasons.resize(10); + Trail trail; + trail.Resize(10); + + UpperBoundedLinearConstraint cst( + MakePb({{+1, 4}, {+2, 4}, {-3, 5}, {+4, 10}})); + cst.InitializeRhs(Coefficient(7), 0, &threshold, &trail, &helper); + EXPECT_EQ(threshold, 2); + EXPECT_THAT(TrailToVector(trail), LiteralsAre(-4)); + + trail.Enqueue(Literal(-3), AssignmentType::kSearchDecision); + threshold -= 5; // The coeff of -3 in cst. + EXPECT_TRUE(cst.Propagate(trail.Info(Literal(-3).Variable()).trail_index, + &threshold, &trail, &helper)); + EXPECT_EQ(threshold, 2); + EXPECT_THAT(TrailToVector(trail), LiteralsAre(-4, -3, -1, -2)); + + // Untrail. + trail.Untrail(0); + threshold += 5; + cst.Untrail(&threshold, 0); + EXPECT_EQ(threshold, 2); +} + +TEST(UpperBoundedLinearConstraintTest, Conflict) { + Coefficient threshold; + Trail trail; + trail.Resize(10); + PbConstraintsEnqueueHelper helper; + helper.reasons.resize(10); + + // At most one constraint. + UpperBoundedLinearConstraint cst( + MakePb({{+1, 1}, {+2, 1}, {+3, 1}, {+4, 1}})); + cst.InitializeRhs(Coefficient(1), 0, &threshold, &trail, &helper); + EXPECT_EQ(threshold, 0); + + // Two assignment from other part of the solver. + trail.SetDecisionLevel(1); + trail.Enqueue(Literal(+1), AssignmentType::kSearchDecision); + trail.SetDecisionLevel(2); + trail.Enqueue(Literal(+2), AssignmentType::kSearchDecision); + + // We propagate only +1. + threshold -= 1; + EXPECT_FALSE(cst.Propagate(trail.Info(Literal(+1).Variable()).trail_index, + &threshold, &trail, &helper)); + EXPECT_THAT(helper.conflict, LiteralsAre(-1, -2)); +} + +TEST(UpperBoundedLinearConstraintTest, CompactReason) { + Coefficient threshold; + Trail trail; + trail.Resize(10); + PbConstraintsEnqueueHelper helper; + helper.reasons.resize(10); + + // At most one constraint. + UpperBoundedLinearConstraint cst( + MakePb({{+1, 1}, {+2, 2}, {+3, 3}, {+4, 4}})); + cst.InitializeRhs(Coefficient(7), 0, &threshold, &trail, &helper); + EXPECT_EQ(threshold, 3); + + // Two assignment from other part of the solver. + trail.SetDecisionLevel(1); + trail.Enqueue(Literal(+1), AssignmentType::kSearchDecision); + trail.SetDecisionLevel(2); + trail.Enqueue(Literal(+2), AssignmentType::kSearchDecision); + trail.SetDecisionLevel(3); + trail.Enqueue(Literal(+3), AssignmentType::kSearchDecision); + + // We propagate when +3 is processed. + threshold = -3; + const int source_trail_index = trail.Info(Literal(+3).Variable()).trail_index; + EXPECT_TRUE(cst.Propagate(source_trail_index, &threshold, &trail, &helper)); + EXPECT_EQ(trail.Index(), 4); + EXPECT_EQ(trail[3], Literal(-4)); + + // -1 do not need to be in the reason since {-3, -2} propagates exactly + // the same way. + cst.FillReason(trail, source_trail_index, Literal(-4).Variable(), + &helper.conflict); + EXPECT_THAT(helper.conflict, LiteralsAre(-3, -2)); +} + +TEST(PbConstraintsTest, Duplicates) { + Model model; + PbConstraints& csts = *(model.GetOrCreate()); + Trail& trail = *(model.GetOrCreate()); + + trail.Resize(10); + csts.Resize(10); + + CHECK_EQ(csts.NumberOfConstraints(), 0); + csts.AddConstraint(MakePb({{-1, 7}, {-2, 7}, {+3, 7}}), Coefficient(20), + &trail); + csts.AddConstraint(MakePb({{-1, 1}, {-2, 3}, {+3, 7}}), Coefficient(20), + &trail); + CHECK_EQ(csts.NumberOfConstraints(), 2); + + // Adding the same constraints will do nothing. + csts.AddConstraint(MakePb({{-1, 7}, {-2, 7}, {+3, 7}}), Coefficient(20), + &trail); + CHECK_EQ(csts.NumberOfConstraints(), 2); + CHECK_EQ(trail.Index(), 0); + + // Over constraining it will fix the 3 literals. + csts.AddConstraint(MakePb({{-1, 7}, {-2, 7}, {+3, 7}}), Coefficient(6), + &trail); + CHECK_EQ(csts.NumberOfConstraints(), 2); + EXPECT_THAT(TrailToVector(trail), LiteralsAre(+1, +2, -3)); +} + +TEST(PbConstraintsTest, BasicPropagation) { + Model model; + PbConstraints& csts = *(model.GetOrCreate()); + Trail& trail = *(model.GetOrCreate()); + + trail.Resize(10); + trail.SetDecisionLevel(1); + trail.Enqueue(Literal(-1), AssignmentType::kSearchDecision); + + csts.Resize(10); + csts.AddConstraint(MakePb({{-1, 1}, {+2, 1}}), Coefficient(1), &trail); + csts.AddConstraint(MakePb({{-1, 7}, {-2, 7}, {+3, 7}}), Coefficient(20), + &trail); + csts.AddConstraint(MakePb({{-1, 1}, {-2, 1}, {-3, 1}, {+4, 1}}), + Coefficient(3), &trail); + + EXPECT_THAT(TrailToVector(trail), LiteralsAre(-1, -2)); + while (!csts.PropagationIsDone(trail)) EXPECT_TRUE(csts.Propagate(&trail)); + EXPECT_THAT(TrailToVector(trail), LiteralsAre(-1, -2, -3, -4)); + + // Test the reason for each assignment. + EXPECT_THAT(trail.Reason(Literal(-2).Variable()), LiteralsAre(+1)); + EXPECT_THAT(trail.Reason(Literal(-3).Variable()), LiteralsAre(+2, +1)); + EXPECT_THAT(trail.Reason(Literal(-4).Variable()), LiteralsAre(+3, +2, +1)); + + // Untrail, and repropagate everything. + csts.Untrail(trail, 0); + trail.Untrail(0); + trail.Enqueue(Literal(-1), AssignmentType::kSearchDecision); + while (!csts.PropagationIsDone(trail)) EXPECT_TRUE(csts.Propagate(&trail)); + EXPECT_THAT(TrailToVector(trail), LiteralsAre(-1, -2, -3, -4)); +} + +TEST(PbConstraintsTest, BasicDeletion) { + Model model; + PbConstraints& csts = *(model.GetOrCreate()); + Trail& trail = *(model.GetOrCreate()); + + PbConstraintsEnqueueHelper helper; + helper.reasons.resize(10); + trail.Resize(10); + trail.SetDecisionLevel(0); + csts.Resize(10); + csts.AddConstraint(MakePb({{-1, 1}, {+2, 1}}), Coefficient(1), &trail); + csts.AddConstraint(MakePb({{-1, 7}, {-2, 7}, {+3, 7}}), Coefficient(20), + &trail); + csts.AddConstraint(MakePb({{-1, 1}, {-2, 1}, {-3, 1}, {+4, 1}}), + Coefficient(3), &trail); + + // Delete the first constraint. + EXPECT_EQ(3, csts.NumberOfConstraints()); + csts.DeleteConstraint(0); + EXPECT_EQ(2, csts.NumberOfConstraints()); + + // The constraint 1 is deleted, so enqueuing -1 shouldn't propagate. + trail.Enqueue(Literal(-1), AssignmentType::kSearchDecision); + while (!csts.PropagationIsDone(trail)) EXPECT_TRUE(csts.Propagate(&trail)); + EXPECT_EQ("-1", trail.DebugString()); + + // But also enqueing -2 should. + trail.Enqueue(Literal(-2), AssignmentType::kSearchDecision); + while (!csts.PropagationIsDone(trail)) EXPECT_TRUE(csts.Propagate(&trail)); + EXPECT_EQ("-1 -2 -3 -4", trail.DebugString()); + + // Let's bactrack. + trail.Untrail(1); + csts.Untrail(trail, 1); + + // Let's delete one more constraint. + csts.DeleteConstraint(0); + EXPECT_EQ(1, csts.NumberOfConstraints()); + + // Now, if we enqueue -2 again, nothing is propagated. + trail.Enqueue(Literal(-2), AssignmentType::kSearchDecision); + while (!csts.PropagationIsDone(trail)) EXPECT_TRUE(csts.Propagate(&trail)); + EXPECT_EQ("-1 -2", trail.DebugString()); + + // We need to also enqueue -3 for -4 to be propagated. + trail.Enqueue(Literal(-3), AssignmentType::kSearchDecision); + while (!csts.PropagationIsDone(trail)) EXPECT_TRUE(csts.Propagate(&trail)); + EXPECT_EQ("-1 -2 -3 -4", trail.DebugString()); + + // Deleting everything doesn't crash. + csts.DeleteConstraint(0); + EXPECT_EQ(0, csts.NumberOfConstraints()); +} + +TEST(PbConstraintsTest, UnsatAtConstruction) { + Model model; + PbConstraints& csts = *(model.GetOrCreate()); + Trail& trail = *(model.GetOrCreate()); + + trail.Resize(10); + trail.SetDecisionLevel(1); + trail.Enqueue(Literal(+1), AssignmentType::kUnitReason); + trail.Enqueue(Literal(+2), AssignmentType::kUnitReason); + trail.Enqueue(Literal(+3), AssignmentType::kUnitReason); + + csts.Resize(10); + + EXPECT_TRUE( + csts.AddConstraint(MakePb({{+1, 1}, {+2, 1}}), Coefficient(2), &trail)); + while (!csts.PropagationIsDone(trail)) EXPECT_TRUE(csts.Propagate(&trail)); + + // We need to propagate before adding this constraint for the AddConstraint() + // to notice that it is unsat. Otherwise, it will be noticed at propagation + // time. + EXPECT_FALSE(csts.AddConstraint(MakePb({{+1, 1}, {+2, 1}, {+3, 1}}), + Coefficient(2), &trail)); + EXPECT_TRUE(csts.AddConstraint(MakePb({{+1, 1}, {+2, 1}, {+4, 1}}), + Coefficient(2), &trail)); +} + +TEST(PbConstraintsTest, AddConstraintWithLevel0Propagation) { + Model model; + PbConstraints& csts = *(model.GetOrCreate()); + Trail& trail = *(model.GetOrCreate()); + + trail.Resize(10); + trail.SetDecisionLevel(0); + csts.Resize(10); + + EXPECT_TRUE(csts.AddConstraint(MakePb({{+1, 1}, {+2, 3}, {+3, 7}}), + Coefficient(2), &trail)); + EXPECT_EQ(trail.Index(), 2); + EXPECT_EQ(trail[0], Literal(-2)); + EXPECT_EQ(trail[1], Literal(-3)); +} + +TEST(PbConstraintsTest, AddConstraintUMR) { + const auto cst = MakePb({{+3, 7}}); + UpperBoundedLinearConstraint c(cst); + // Calling hashing on c generates an UMR that is triggered during the hash_map + // lookup below. + const uint64_t ct_hash = c.hash(); + absl::flat_hash_map> store; + std::vector& vec = store[ct_hash]; + EXPECT_EQ(vec.size(), 0); +} + +TEST(PbConstraintsDeathTest, AddConstraintWithLevel0PropagationInSearch) { + Model model; + PbConstraints& csts = *(model.GetOrCreate()); + Trail& trail = *(model.GetOrCreate()); + + trail.Resize(10); + trail.SetDecisionLevel(10); + csts.Resize(10); + + // If the decision level is not 0, this will fail. + ASSERT_DEATH(csts.AddConstraint(MakePb({{+1, 1}, {+2, 3}, {+3, 7}}), + Coefficient(2), &trail), + "var should have been propagated at an earlier level."); +} + +TEST(PbConstraintsDeathTest, AddConstraintPrecondition) { + Model model; + PbConstraints& csts = *(model.GetOrCreate()); + Trail& trail = *(model.GetOrCreate()); + + trail.Resize(10); + trail.SetDecisionLevel(1); + trail.Enqueue(Literal(+1), AssignmentType::kSearchDecision); + trail.Enqueue(Literal(+2), AssignmentType::kUnitReason); + trail.SetDecisionLevel(2); + trail.Enqueue(Literal(+3), AssignmentType::kSearchDecision); + csts.Resize(10); + + // We can't add this constraint since it is conflicting under the current + // assignment. + EXPECT_FALSE(csts.AddConstraint(MakePb({{+1, 1}, {+2, 1}, {+3, 1}}), + Coefficient(2), &trail)); + + trail.Untrail(trail.Index() - 1); // Remove the +3. + EXPECT_EQ(trail.Index(), 2); + csts.Untrail(trail, 2); + + // Adding this one at a decision level of 2 will also fail because it will + // propagate 3 from decision level 1. + ASSERT_DEATH(csts.AddConstraint(MakePb({{+1, 1}, {+2, 1}, {+3, 2}}), + Coefficient(3), &trail), + "var should have been propagated at an earlier level."); + + // However, adding the same constraint while the decision level is 1 is ok. + // It will propagate -3 at the correct decision level. + trail.SetDecisionLevel(1); + EXPECT_TRUE(csts.AddConstraint(MakePb({{+1, 1}, {+2, 1}, {+3, 2}}), + Coefficient(3), &trail)); + EXPECT_EQ(trail.Index(), 3); + EXPECT_EQ(trail[2], Literal(-3)); +} + +TEST(MutableUpperBoundedLinearConstraintTest, LinearAddition) { + MutableUpperBoundedLinearConstraint cst_a; + cst_a.ClearAndResize(5); + cst_a.AddTerm(Literal(+1), Coefficient(3)); + cst_a.AddTerm(Literal(+2), Coefficient(4)); + cst_a.AddTerm(Literal(+3), Coefficient(5)); + cst_a.AddTerm(Literal(+4), Coefficient(1)); + cst_a.AddTerm(Literal(+5), Coefficient(1)); + cst_a.AddToRhs(Coefficient(10)); + + // The result of cst_a + cst_b is describes in the comments. + MutableUpperBoundedLinearConstraint cst_b; + cst_b.ClearAndResize(5); + cst_b.AddTerm(Literal(+1), Coefficient(3)); // 3x + 3x = 6x + cst_b.AddTerm(Literal(-2), Coefficient(3)); // 4x + 3(1-x) = x + 3 + cst_b.AddTerm(Literal(+3), Coefficient(3)); // 5x + 3x = 8x + cst_b.AddTerm(Literal(-4), Coefficient(6)); // x + 6(1-x) = 5(1-x) + 1 + cst_b.AddTerm(Literal(+5), Coefficient(5)); // x + 5x = 6x + cst_b.AddToRhs(Coefficient(10)); + + for (BooleanVariable var : cst_b.PossibleNonZeros()) { + cst_a.AddTerm(cst_b.GetLiteral(var), cst_b.GetCoefficient(var)); + } + cst_a.AddToRhs(cst_b.Rhs()); + + EXPECT_EQ(cst_a.DebugString(), "6[+1] + 1[+2] + 8[+3] + 5[-4] + 6[+5] <= 16"); +} + +TEST(MutableUpperBoundedLinearConstraintTest, ReduceCoefficients) { + MutableUpperBoundedLinearConstraint cst; + cst.ClearAndResize(100); + Coefficient max_value(0); + for (int i = 1; i <= 10; ++i) { + max_value += Coefficient(i); + cst.AddTerm(Literal(BooleanVariable(i), true), Coefficient(i)); + } + cst.AddToRhs(max_value - 3); + + // The constraint is equivalent to sum i * Literal(i, false) >= 3, + // So we can reduce any coeff > 3 to 3 and change the rhs accordingly. + cst.ReduceCoefficients(); + for (BooleanVariable var : cst.PossibleNonZeros()) { + EXPECT_LE(cst.GetCoefficient(var), 3); + } + EXPECT_EQ(cst.Rhs(), 1 + 2 + 3 * 8 - 3); +} + +TEST(MutableUpperBoundedLinearConstraintTest, ComputeSlackForTrailPrefix) { + MutableUpperBoundedLinearConstraint cst; + cst.ClearAndResize(100); + cst.AddTerm(Literal(+1), Coefficient(3)); + cst.AddTerm(Literal(+2), Coefficient(4)); + cst.AddTerm(Literal(+3), Coefficient(5)); + cst.AddTerm(Literal(+4), Coefficient(6)); + cst.AddTerm(Literal(+5), Coefficient(7)); + cst.AddToRhs(Coefficient(10)); + + Trail trail; + trail.Resize(10); + trail.Enqueue(Literal(+1), AssignmentType::kSearchDecision); + trail.Enqueue(Literal(-2), AssignmentType::kUnitReason); + trail.Enqueue(Literal(+3), AssignmentType::kSearchDecision); + trail.Enqueue(Literal(-5), AssignmentType::kSearchDecision); + trail.Enqueue(Literal(+4), AssignmentType::kSearchDecision); + + EXPECT_EQ(Coefficient(10), cst.ComputeSlackForTrailPrefix(trail, 0)); + EXPECT_EQ(Coefficient(10 - 3), cst.ComputeSlackForTrailPrefix(trail, 1)); + EXPECT_EQ(Coefficient(10 - 3), cst.ComputeSlackForTrailPrefix(trail, 2)); + EXPECT_EQ(Coefficient(10 - 3 - 5), cst.ComputeSlackForTrailPrefix(trail, 3)); + EXPECT_EQ(Coefficient(10 - 3 - 5), cst.ComputeSlackForTrailPrefix(trail, 4)); + EXPECT_EQ(Coefficient(10 - 14), cst.ComputeSlackForTrailPrefix(trail, 5)); + EXPECT_EQ(Coefficient(10 - 14), cst.ComputeSlackForTrailPrefix(trail, 50)); +} + +TEST(MutableUpperBoundedLinearConstraintTest, ReduceSlackToZero) { + MutableUpperBoundedLinearConstraint cst; + cst.ClearAndResize(100); + cst.AddTerm(Literal(+1), Coefficient(3)); + cst.AddTerm(Literal(+2), Coefficient(1)); + cst.AddTerm(Literal(+3), Coefficient(5)); + cst.AddTerm(Literal(+4), Coefficient(6)); + cst.AddTerm(Literal(+5), Coefficient(7)); + cst.AddToRhs(Coefficient(10)); + + Trail trail; + trail.Resize(10); + trail.Enqueue(Literal(+1), AssignmentType::kSearchDecision); + trail.Enqueue(Literal(-2), AssignmentType::kUnitReason); + trail.Enqueue(Literal(+3), AssignmentType::kSearchDecision); + trail.Enqueue(Literal(+5), AssignmentType::kSearchDecision); + trail.Enqueue(Literal(+4), AssignmentType::kSearchDecision); + + // +1, -2 and +3 gives a slack of 2. + EXPECT_EQ(Coefficient(2), cst.ComputeSlackForTrailPrefix(trail, 3)); + + // It also propagate -4 and -5, to have the same propagation but with a slack + // of zero, we can call ReduceSlackToZero(). + cst.ReduceSlackTo(trail, 3, Coefficient(2), Coefficient(0)); + + // +1 and +3 have the same coeff. + EXPECT_EQ(cst.GetCoefficient(BooleanVariable(0)), Coefficient(3)); + EXPECT_EQ(cst.GetCoefficient(BooleanVariable(2)), Coefficient(5)); + + // the variable 1 disappeared. + EXPECT_EQ(cst.GetCoefficient(BooleanVariable(1)), Coefficient(0)); + + // The propagated variable coeff has been reduced by the slack. + EXPECT_EQ(cst.GetCoefficient(BooleanVariable(3)), Coefficient(6 - 2)); + EXPECT_EQ(cst.GetCoefficient(BooleanVariable(4)), Coefficient(7 - 2)); + + // The rhs has been reduced by slack, and the slack is now 0. + EXPECT_EQ(cst.Rhs(), Coefficient(10 - 2)); + EXPECT_EQ(Coefficient(0), cst.ComputeSlackForTrailPrefix(trail, 3)); +} + +} // namespace +} // namespace sat +} // namespace operations_research diff --git a/ortools/sat/precedences_test.cc b/ortools/sat/precedences_test.cc new file mode 100644 index 0000000000..a18cfe2698 --- /dev/null +++ b/ortools/sat/precedences_test.cc @@ -0,0 +1,592 @@ +// Copyright 2010-2024 Google LLC +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "ortools/sat/precedences.h" + +#include +#include + +#include "absl/types/span.h" +#include "gtest/gtest.h" +#include "ortools/base/gmock.h" +#include "ortools/sat/integer.h" +#include "ortools/sat/integer_search.h" +#include "ortools/sat/model.h" +#include "ortools/sat/sat_base.h" +#include "ortools/sat/sat_solver.h" +#include "ortools/util/sorted_interval_list.h" +#include "ortools/util/strong_integers.h" + +namespace operations_research { +namespace sat { +namespace { + +using ::testing::ElementsAre; +using ::testing::Pair; +using ::testing::UnorderedElementsAre; + +// A simple macro to make the code more readable. +// TODO(user): move that in a common place. test_utils? +#define EXPECT_BOUNDS_EQ(var, lb, ub) \ + EXPECT_EQ(integer_trail->LowerBound(var), lb); \ + EXPECT_EQ(integer_trail->UpperBound(var), ub) + +// All the tests here uses 10 integer variables initially in [0, 100]. +std::vector AddVariables(IntegerTrail* integer_trail) { + std::vector vars; + const int num_variables = 10; + const IntegerValue lower_bound(0); + const IntegerValue upper_bound(100); + for (int i = 0; i < num_variables; ++i) { + vars.push_back(integer_trail->AddIntegerVariable(lower_bound, upper_bound)); + } + return vars; +} + +TEST(PrecedenceRelationsTest, BasicAPI) { + Model model; + IntegerTrail* integer_trail = model.GetOrCreate(); + const std::vector vars = AddVariables(integer_trail); + + // Note that odd indices are for the negation. + IntegerVariable a(0), b(2), c(4), d(6); + + PrecedenceRelations precedences(&model); + precedences.Add(a, b, 10); + precedences.Add(d, c, 7); + precedences.Add(b, d, 5); + + precedences.Build(); + EXPECT_EQ(precedences.GetOffset(a, b), 10); + EXPECT_EQ(precedences.GetOffset(NegationOf(b), NegationOf(a)), 10); + EXPECT_EQ(precedences.GetOffset(a, c), 22); + EXPECT_EQ(precedences.GetOffset(NegationOf(c), NegationOf(a)), 22); + EXPECT_EQ(precedences.GetOffset(a, d), 15); + EXPECT_EQ(precedences.GetOffset(NegationOf(d), NegationOf(a)), 15); + EXPECT_EQ(precedences.GetOffset(d, a), kMinIntegerValue); + + // Once built, we can update the offsets. + // Note however that this would not propagate through the precedence graphs. + precedences.Add(a, b, 15); + EXPECT_EQ(precedences.GetOffset(a, b), 15); + EXPECT_EQ(precedences.GetOffset(NegationOf(b), NegationOf(a)), 15); +} + +TEST(PrecedenceRelationsTest, CornerCase1) { + Model model; + IntegerTrail* integer_trail = model.GetOrCreate(); + const std::vector vars = AddVariables(integer_trail); + + // Note that odd indices are for the negation. + IntegerVariable a(0), b(2), c(4), d(6); + + PrecedenceRelations precedences(&model); + precedences.Add(a, b, 10); + precedences.Add(b, c, 7); + precedences.Add(b, d, 5); + precedences.Add(NegationOf(b), a, 5); + + precedences.Build(); + EXPECT_EQ(precedences.GetOffset(NegationOf(b), a), 5); + EXPECT_EQ(precedences.GetOffset(NegationOf(b), b), 15); + EXPECT_EQ(precedences.GetOffset(NegationOf(b), c), 22); + EXPECT_EQ(precedences.GetOffset(NegationOf(b), d), 20); +} + +TEST(PrecedenceRelationsTest, CornerCase2) { + Model model; + IntegerTrail* integer_trail = model.GetOrCreate(); + const std::vector vars = AddVariables(integer_trail); + + // Note that odd indices are for the negation. + IntegerVariable a(0), b(2), c(4), d(6); + + PrecedenceRelations precedences(&model); + precedences.Add(NegationOf(a), a, 10); + precedences.Add(a, b, 7); + precedences.Add(a, c, 5); + precedences.Add(a, d, 2); + + precedences.Build(); + EXPECT_EQ(precedences.GetOffset(NegationOf(a), a), 10); + EXPECT_EQ(precedences.GetOffset(NegationOf(a), b), 17); + EXPECT_EQ(precedences.GetOffset(NegationOf(a), c), 15); + EXPECT_EQ(precedences.GetOffset(NegationOf(a), d), 12); +} + +TEST(PrecedenceRelationsTest, ConditionalRelations) { + Model model; + auto* sat_solver = model.GetOrCreate(); + auto* integer_trail = model.GetOrCreate(); + const std::vector vars = AddVariables(integer_trail); + + const Literal l(model.Add(NewBooleanVariable()), true); + EXPECT_TRUE(sat_solver->EnqueueDecisionIfNotConflicting(l)); + + // Note that odd indices are for the negation. + IntegerVariable a(0), b(2); + PrecedenceRelations precedences(&model); + precedences.PushConditionalRelation({l}, a, b, 15); + precedences.PushConditionalRelation({l}, a, b, 20); + + // We only keep the best one. + EXPECT_EQ(precedences.GetConditionalOffset(a, NegationOf(b)), -15); + EXPECT_THAT(precedences.GetConditionalEnforcements(a, NegationOf(b)), + ElementsAre(l)); + + // Backtrack works. + EXPECT_TRUE(sat_solver->ResetToLevelZero()); + EXPECT_EQ(precedences.GetConditionalOffset(a, NegationOf(b)), + kMinIntegerValue); + EXPECT_THAT(precedences.GetConditionalEnforcements(a, NegationOf(b)), + ElementsAre()); +} + +TEST(PrecedencesPropagatorTest, Empty) { + Model model; + Trail* trail = model.GetOrCreate(); + PrecedencesPropagator* propagator = + model.GetOrCreate(); + EXPECT_TRUE(propagator->Propagate(trail)); + EXPECT_TRUE(propagator->Propagate(trail)); + propagator->Untrail(*trail, 0); +} + +TEST(PrecedencesPropagatorTest, BasicPropagationTest) { + Model model; + Trail* trail = model.GetOrCreate(); + IntegerTrail* integer_trail = model.GetOrCreate(); + PrecedencesPropagator* propagator = + model.GetOrCreate(); + + std::vector vars = AddVariables(integer_trail); + propagator->AddPrecedenceWithOffset(vars[0], vars[1], IntegerValue(4)); + propagator->AddPrecedenceWithOffset(vars[0], vars[2], IntegerValue(8)); + propagator->AddPrecedenceWithOffset(vars[1], vars[2], IntegerValue(10)); + + EXPECT_TRUE(propagator->Propagate(trail)); + EXPECT_BOUNDS_EQ(vars[0], 0, 86); + EXPECT_BOUNDS_EQ(vars[1], 4, 90); + EXPECT_BOUNDS_EQ(vars[2], 14, 100); + + // Lets now move vars[1] lower bound. + std::vector lr; + std::vector ir; + EXPECT_TRUE(integer_trail->Enqueue( + IntegerLiteral::GreaterOrEqual(vars[1], IntegerValue(20)), lr, ir)); + + EXPECT_TRUE(propagator->Propagate(trail)); + EXPECT_BOUNDS_EQ(vars[1], 20, 90); + EXPECT_BOUNDS_EQ(vars[2], 30, 100); +} + +TEST(PrecedencesPropagatorTest, PropagationTestWithVariableOffset) { + Model model; + Trail* trail = model.GetOrCreate(); + IntegerTrail* integer_trail = model.GetOrCreate(); + PrecedencesPropagator* propagator = + model.GetOrCreate(); + + std::vector vars = AddVariables(integer_trail); + propagator->AddPrecedenceWithVariableOffset(vars[0], vars[1], vars[2]); + + // Make var[2] >= 10 and propagate + std::vector lr; + std::vector ir; + EXPECT_TRUE(integer_trail->Enqueue( + IntegerLiteral::GreaterOrEqual(vars[2], IntegerValue(10)), lr, ir)); + EXPECT_TRUE(propagator->Propagate(trail)); + EXPECT_BOUNDS_EQ(vars[0], 0, 90); + EXPECT_BOUNDS_EQ(vars[1], 10, 100); + + // Change the lower bound to 40 and propagate again. + EXPECT_TRUE(integer_trail->Enqueue( + IntegerLiteral::GreaterOrEqual(vars[2], IntegerValue(40)), lr, ir)); + EXPECT_TRUE(propagator->Propagate(trail)); + EXPECT_BOUNDS_EQ(vars[0], 0, 60); + EXPECT_BOUNDS_EQ(vars[1], 40, 100); +} + +TEST(PrecedencesPropagatorTest, BasicPropagation) { + Model model; + Trail* trail = model.GetOrCreate(); + IntegerTrail* integer_trail = model.GetOrCreate(); + PrecedencesPropagator* propagator = + model.GetOrCreate(); + trail->Resize(10); + + std::vector vars = AddVariables(integer_trail); + propagator->AddPrecedenceWithOffset(vars[0], vars[1], IntegerValue(4)); + propagator->AddPrecedenceWithOffset(vars[1], vars[2], IntegerValue(8)); + propagator->AddPrecedenceWithOffset(vars[0], vars[3], IntegerValue(90)); + + // These arcs are not possible, because the upper bound of vars[0] is 10. + propagator->AddConditionalPrecedenceWithOffset(vars[1], vars[0], + IntegerValue(7), Literal(+1)); + propagator->AddConditionalPrecedenceWithOffset(vars[2], vars[0], + IntegerValue(-1), Literal(+2)); + + // These are is ok. + propagator->AddConditionalPrecedenceWithOffset(vars[1], vars[0], + IntegerValue(6), Literal(+3)); + propagator->AddConditionalPrecedenceWithOffset(vars[2], vars[0], + IntegerValue(-2), Literal(+4)); + + EXPECT_TRUE(propagator->Propagate(trail)); + EXPECT_TRUE(trail->Assignment().LiteralIsFalse(Literal(+1))); + EXPECT_TRUE(trail->Assignment().LiteralIsFalse(Literal(+2))); + EXPECT_FALSE(trail->Assignment().VariableIsAssigned(Literal(+3).Variable())); + EXPECT_FALSE(trail->Assignment().VariableIsAssigned(Literal(+4).Variable())); +} + +TEST(PrecedencesPropagatorTest, PropagateOnVariableOffset) { + Model model; + Trail* trail = model.GetOrCreate(); + IntegerTrail* integer_trail = model.GetOrCreate(); + PrecedencesPropagator* propagator = + model.GetOrCreate(); + trail->Resize(10); + + std::vector vars = AddVariables(integer_trail); + propagator->AddPrecedenceWithVariableOffset(vars[0], vars[1], vars[2]); + propagator->AddPrecedenceWithOffset(vars[1], vars[3], IntegerValue(50)); + + EXPECT_TRUE(propagator->Propagate(trail)); + EXPECT_BOUNDS_EQ(vars[0], 0, 50); + EXPECT_BOUNDS_EQ(vars[1], 0, 50); + EXPECT_BOUNDS_EQ(vars[2], 0, 50); +} + +TEST(PrecedencesPropagatorTest, Cycles) { + Model model; + Trail* trail = model.GetOrCreate(); + IntegerTrail* integer_trail = model.GetOrCreate(); + PrecedencesPropagator* propagator = + model.GetOrCreate(); + trail->Resize(10); + + std::vector vars = AddVariables(integer_trail); + propagator->AddPrecedenceWithOffset(vars[0], vars[1], IntegerValue(4)); + propagator->AddPrecedenceWithOffset(vars[1], vars[2], IntegerValue(8)); + propagator->AddConditionalPrecedenceWithOffset( + vars[2], vars[3], IntegerValue(-10), Literal(+1)); + propagator->AddConditionalPrecedenceWithOffset(vars[3], vars[0], + IntegerValue(-2), Literal(+2)); + propagator->AddConditionalPrecedence(vars[3], vars[0], Literal(+3)); + + // This one will force the upper bound of vars[0] to be 50, so we can + // check that the cycle is detected before the lower bound of var[0] crosses + // this bound. + propagator->AddConditionalPrecedenceWithOffset(vars[0], vars[4], + IntegerValue(50), Literal(+4)); + + // If we add this one, the cycle will be detected using the integer bound and + // not the graph cycle. TODO(user): Maybe this is a bad thing? but it seems + // difficult to avoid it without extra computations. + propagator->AddConditionalPrecedenceWithOffset(vars[0], vars[4], + IntegerValue(99), Literal(+5)); + + EXPECT_TRUE(propagator->Propagate(trail)); + + // Cycle of weight zero is fine. + trail->SetDecisionLevel(1); + EXPECT_TRUE(integer_trail->Propagate(trail)); + trail->Enqueue(Literal(+1), AssignmentType::kUnitReason); + trail->Enqueue(Literal(+2), AssignmentType::kUnitReason); + trail->Enqueue(Literal(+4), AssignmentType::kUnitReason); + EXPECT_TRUE(propagator->Propagate(trail)); + + // But a cycle of positive length is not! + trail->Enqueue(Literal(+3), AssignmentType::kUnitReason); + EXPECT_FALSE(propagator->Propagate(trail)); + EXPECT_THAT(trail->FailingClause(), + UnorderedElementsAre(Literal(-1), Literal(-3))); + + // Test the untrail. + trail->SetDecisionLevel(0); + integer_trail->Untrail(*trail, 0); + propagator->Untrail(*trail, 0); + trail->Untrail(0); + EXPECT_TRUE(propagator->Propagate(trail)); + + // Still fine here. + trail->SetDecisionLevel(1); + EXPECT_TRUE(integer_trail->Propagate(trail)); + trail->Enqueue(Literal(+5), AssignmentType::kUnitReason); + EXPECT_TRUE(propagator->Propagate(trail)); + + // But fail there with a different and longer reason. + trail->Enqueue(Literal(+1), AssignmentType::kUnitReason); + trail->Enqueue(Literal(+3), AssignmentType::kUnitReason); + EXPECT_FALSE(propagator->Propagate(trail)); + EXPECT_THAT(trail->FailingClause(), + UnorderedElementsAre(Literal(-1), Literal(-3), Literal(-5))); +} + +// This test a tricky situation: +// +// vars[0] + (offset = vars[2]) <= var[1] +// vars[1] <= vars[2] !! +TEST(PrecedencesPropagatorTest, TrickyCycle) { + Model model; + Trail* trail = model.GetOrCreate(); + IntegerTrail* integer_trail = model.GetOrCreate(); + PrecedencesPropagator* propagator = + model.GetOrCreate(); + trail->Resize(10); + + std::vector vars = AddVariables(integer_trail); + propagator->AddPrecedenceWithVariableOffset(vars[0], vars[1], vars[2]); + propagator->AddPrecedence(vars[1], vars[2]); + + // This will cause an infinite cycle. + propagator->AddConditionalPrecedenceWithOffset(vars[3], vars[0], + IntegerValue(1), Literal(+1)); + + // So far so good. + EXPECT_TRUE(propagator->Propagate(trail)); + trail->SetDecisionLevel(1); + EXPECT_TRUE(integer_trail->Propagate(trail)); + + // Conflict. + trail->Enqueue(Literal(+1), AssignmentType::kUnitReason); + EXPECT_FALSE(propagator->Propagate(trail)); + EXPECT_THAT(trail->FailingClause(), ElementsAre(Literal(-1))); + + // Test that the code dectected properly a positive cycle in the dependency + // graph instead of just pushing the bounds until the upper bound is reached. + EXPECT_LT(integer_trail->num_enqueues(), 10); +} + +TEST(PrecedencesPropagatorTest, ZeroWeightCycleOnDiscreteDomain) { + Model model; + IntegerVariable a = model.Add( + NewIntegerVariable(Domain::FromValues({2, 5, 7, 15, 16, 17, 20, 32}))); + IntegerVariable b = model.Add( + NewIntegerVariable(Domain::FromValues({3, 6, 9, 14, 16, 18, 20, 35}))); + + // Add the fact that a == b with two inequalities. + model.Add(LowerOrEqual(a, b)); + model.Add(LowerOrEqual(b, a)); + + // After propagation, we should detect that the only common values fall in + // [16, 20]. + EXPECT_TRUE(model.GetOrCreate()->Propagate()); + + // The integer_trail is only used in the macros below. + IntegerTrail* integer_trail = model.GetOrCreate(); + EXPECT_BOUNDS_EQ(a, 16, 20); + EXPECT_BOUNDS_EQ(b, 16, 20); +} + +// This was failing before CL 135903015. +TEST(PrecedencesPropagatorTest, ConditionalPrecedencesOnFixedLiteral) { + Model model; + + // To trigger the old bug, we need to add some precedences. + IntegerVariable x = model.Add(NewIntegerVariable(0, 100)); + IntegerVariable y = model.Add(NewIntegerVariable(50, 100)); + model.Add(LowerOrEqual(x, y)); + + // We then add a Boolean variable and fix it. + // This will trigger a propagation. + BooleanVariable b = model.Add(NewBooleanVariable()); + model.Add(ClauseConstraint({Literal(b, true)})); // Fix b To true. + + // We now add a conditional precedences using the fixed variable. + // This used to not be taken into account. + model.Add(ConditionalLowerOrEqualWithOffset(y, x, 0, Literal(b, true))); + + EXPECT_EQ(SatSolver::FEASIBLE, SolveIntegerProblemWithLazyEncoding(&model)); + EXPECT_EQ(model.Get(Value(x)), model.Get(Value(y))); +} + +#undef EXPECT_BOUNDS_EQ + +TEST(PrecedenceRelationsTest, CollectPrecedences) { + Model model; + auto* integer_trail = model.GetOrCreate(); + auto* relations = model.GetOrCreate(); + + std::vector vars = AddVariables(integer_trail); + relations->Add(vars[0], vars[2], IntegerValue(1)); + relations->Add(vars[0], vars[5], IntegerValue(1)); + relations->Add(vars[1], vars[2], IntegerValue(1)); + relations->Add(vars[2], vars[4], IntegerValue(1)); + relations->Add(vars[3], vars[4], IntegerValue(1)); + relations->Add(vars[4], vars[5], IntegerValue(1)); + + std::vector p; + relations->CollectPrecedences({vars[0], vars[2], vars[3]}, &p); + + // Note that we do not return precedences with just one variable. + std::vector indices; + std::vector variables; + for (const auto precedence : p) { + indices.push_back(precedence.index); + variables.push_back(precedence.var); + } + EXPECT_EQ(indices, (std::vector{1, 2})); + EXPECT_EQ(variables, (std::vector{vars[4], vars[4]})); + + // Same with NegationOf() and also test that p is cleared. + relations->CollectPrecedences({NegationOf(vars[0]), NegationOf(vars[4])}, &p); + EXPECT_TRUE(p.empty()); +} + +TEST(GreaterThanAtLeastOneOfDetectorTest, AddGreaterThanAtLeastOneOf) { + Model model; + const IntegerVariable a = model.Add(NewIntegerVariable(2, 10)); + const IntegerVariable b = model.Add(NewIntegerVariable(5, 10)); + const IntegerVariable c = model.Add(NewIntegerVariable(3, 10)); + const IntegerVariable d = model.Add(NewIntegerVariable(0, 10)); + const Literal lit_a = Literal(model.Add(NewBooleanVariable()), true); + const Literal lit_b = Literal(model.Add(NewBooleanVariable()), true); + const Literal lit_c = Literal(model.Add(NewBooleanVariable()), true); + model.Add(ClauseConstraint({lit_a, lit_b, lit_c})); + + auto* detector = model.GetOrCreate(); + detector->Add(lit_a, {a, -1}, {d, 1}, 2, 1000); // d >= a + 2 + detector->Add(lit_b, {b, -1}, {d, 1}, -1, 1000); // d >= b -1 + detector->Add(lit_c, {c, -1}, {d, 1}, 0, 1000); // d >= c + + auto* solver = model.GetOrCreate(); + EXPECT_TRUE(solver->Propagate()); + EXPECT_EQ(model.Get(LowerBound(d)), 0); + + EXPECT_EQ(1, detector->AddGreaterThanAtLeastOneOfConstraints(&model)); + EXPECT_TRUE(solver->Propagate()); + EXPECT_EQ(model.Get(LowerBound(d)), std::min({2 + 2, 5 - 1, 3 + 0})); +} + +TEST(GreaterThanAtLeastOneOfDetectorTest, + AddGreaterThanAtLeastOneOfWithAutoDetect) { + Model model; + const IntegerVariable a = model.Add(NewIntegerVariable(2, 10)); + const IntegerVariable b = model.Add(NewIntegerVariable(5, 10)); + const IntegerVariable c = model.Add(NewIntegerVariable(3, 10)); + const IntegerVariable d = model.Add(NewIntegerVariable(0, 10)); + const Literal lit_a = Literal(model.Add(NewBooleanVariable()), true); + const Literal lit_b = Literal(model.Add(NewBooleanVariable()), true); + const Literal lit_c = Literal(model.Add(NewBooleanVariable()), true); + model.Add(ClauseConstraint({lit_a, lit_b, lit_c})); + + auto* detector = model.GetOrCreate(); + detector->Add(lit_a, {a, -1}, {d, 1}, 2, 1000); // d >= a + 2 + detector->Add(lit_b, {b, -1}, {d, 1}, -1, 1000); // d >= b -1 + detector->Add(lit_c, {c, -1}, {d, 1}, 0, 1000); // d >= c + + auto* solver = model.GetOrCreate(); + EXPECT_TRUE(solver->Propagate()); + EXPECT_EQ(model.Get(LowerBound(d)), 0); + + EXPECT_EQ(1, detector->AddGreaterThanAtLeastOneOfConstraints( + &model, /*auto_detect_clauses=*/true)); + EXPECT_TRUE(solver->Propagate()); + EXPECT_EQ(model.Get(LowerBound(d)), std::min({2 + 2, 5 - 1, 3 + 0})); +} + +TEST(PrecedencesPropagatorTest, ComputeFullPrecedencesIfCycle) { + Model model; + std::vector vars(10); + for (int i = 0; i < vars.size(); ++i) { + vars[i] = model.Add(NewIntegerVariable(0, 10)); + } + + // Even if the weight are compatible, we will fail here. + model.Add(LowerOrEqualWithOffset(vars[0], vars[1], 2)); + model.Add(LowerOrEqualWithOffset(vars[1], vars[2], 2)); + model.Add(LowerOrEqualWithOffset(vars[2], vars[1], -10)); + model.Add(LowerOrEqualWithOffset(vars[0], vars[2], 5)); + + std::vector precedences; + model.GetOrCreate()->ComputeFullPrecedences( + {vars[0], vars[1]}, &precedences); + EXPECT_TRUE(precedences.empty()); +} + +TEST(PrecedencesPropagatorTest, BasicFiltering) { + Model model; + std::vector vars(10); + for (int i = 0; i < vars.size(); ++i) { + vars[i] = model.Add(NewIntegerVariable(0, 10)); + } + + // 1 + // / \ + // 0 2 -- 4 + // \ / + // 3 + model.Add(LowerOrEqualWithOffset(vars[0], vars[1], 2)); + model.Add(LowerOrEqualWithOffset(vars[1], vars[2], 2)); + model.Add(LowerOrEqualWithOffset(vars[0], vars[3], 1)); + model.Add(LowerOrEqualWithOffset(vars[3], vars[2], 2)); + model.Add(LowerOrEqualWithOffset(vars[2], vars[4], 2)); + + std::vector precedences; + model.GetOrCreate()->ComputeFullPrecedences( + {vars[0], vars[1], vars[3]}, &precedences); + + // We only output size at least 2, and "relevant" precedences. + // So here only vars[2]. + ASSERT_EQ(precedences.size(), 1); + EXPECT_EQ(precedences[0].var, vars[2]); + EXPECT_THAT(precedences[0].offsets, ElementsAre(4, 2, 2)); + EXPECT_THAT(precedences[0].indices, ElementsAre(0, 1, 2)); +} + +TEST(PrecedencesPropagatorTest, BasicFiltering2) { + Model model; + std::vector vars(10); + for (int i = 0; i < vars.size(); ++i) { + vars[i] = model.Add(NewIntegerVariable(0, 10)); + } + + // 1 + // / \ + // 0 2 -- 4 + // \ / / + // 3 5 + model.Add(LowerOrEqualWithOffset(vars[0], vars[1], 2)); + model.Add(LowerOrEqualWithOffset(vars[1], vars[2], 2)); + model.Add(LowerOrEqualWithOffset(vars[0], vars[3], 1)); + model.Add(LowerOrEqualWithOffset(vars[3], vars[2], 2)); + model.Add(LowerOrEqualWithOffset(vars[2], vars[4], 2)); + model.Add(LowerOrEqualWithOffset(vars[5], vars[4], 7)); + + std::vector precedences; + model.GetOrCreate()->ComputeFullPrecedences( + {vars[0], vars[1], vars[3]}, &precedences); + + // Same as before here. + ASSERT_EQ(precedences.size(), 1); + EXPECT_EQ(precedences[0].var, vars[2]); + EXPECT_THAT(precedences[0].offsets, ElementsAre(4, 2, 2)); + EXPECT_THAT(precedences[0].indices, ElementsAre(0, 1, 2)); + + // But if we ask for 5, we will get two results. + precedences.clear(); + model.GetOrCreate()->ComputeFullPrecedences( + {vars[0], vars[1], vars[3], vars[5]}, &precedences); + ASSERT_EQ(precedences.size(), 2); + EXPECT_EQ(precedences[0].var, vars[2]); + EXPECT_THAT(precedences[0].offsets, ElementsAre(4, 2, 2)); + EXPECT_THAT(precedences[0].indices, ElementsAre(0, 1, 2)); + EXPECT_EQ(precedences[1].var, vars[4]); + EXPECT_THAT(precedences[1].offsets, ElementsAre(6, 4, 4, 7)); + EXPECT_THAT(precedences[1].indices, ElementsAre(0, 1, 2, 3)); +} + +} // namespace +} // namespace sat +} // namespace operations_research diff --git a/ortools/sat/probing_test.cc b/ortools/sat/probing_test.cc new file mode 100644 index 0000000000..d57e474447 --- /dev/null +++ b/ortools/sat/probing_test.cc @@ -0,0 +1,80 @@ +// Copyright 2010-2024 Google LLC +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "ortools/sat/probing.h" + +#include + +#include "absl/types/span.h" +#include "gtest/gtest.h" +#include "ortools/sat/integer.h" +#include "ortools/sat/model.h" +#include "ortools/sat/sat_base.h" +#include "ortools/sat/sat_solver.h" +#include "ortools/util/sorted_interval_list.h" + +namespace operations_research { +namespace sat { +namespace { + +TEST(ProbeBooleanVariablesTest, IntegerBoundInference) { + Model model; + const BooleanVariable a = model.Add(NewBooleanVariable()); + const IntegerVariable b = model.Add(NewIntegerVariable(0, 10)); + const IntegerVariable c = model.Add(NewIntegerVariable(0, 10)); + + // Bound restriction. + model.Add(Implication({Literal(a, true)}, + IntegerLiteral::GreaterOrEqual(b, IntegerValue(2)))); + model.Add(Implication({Literal(a, false)}, + IntegerLiteral::GreaterOrEqual(b, IntegerValue(3)))); + model.Add(Implication({Literal(a, true)}, + IntegerLiteral::LowerOrEqual(b, IntegerValue(7)))); + model.Add(Implication({Literal(a, false)}, + IntegerLiteral::LowerOrEqual(b, IntegerValue(9)))); + + // Hole. + model.Add(Implication({Literal(a, true)}, + IntegerLiteral::GreaterOrEqual(c, IntegerValue(7)))); + model.Add(Implication({Literal(a, false)}, + IntegerLiteral::LowerOrEqual(c, IntegerValue(4)))); + + Prober* prober = model.GetOrCreate(); + prober->ProbeBooleanVariables(/*deterministic_time_limit=*/1.0); + auto* integer_trail = model.GetOrCreate(); + EXPECT_EQ("[2,9]", integer_trail->InitialVariableDomain(b).ToString()); + EXPECT_EQ("[0,4][7,10]", integer_trail->InitialVariableDomain(c).ToString()); +} + +TEST(FailedLiteralProbingRoundTest, TrivialExample) { + Model model; + const Literal a(model.Add(NewBooleanVariable()), true); + const Literal b(model.Add(NewBooleanVariable()), true); + const Literal c(model.Add(NewBooleanVariable()), true); + + // Setting a to false will result in a constradiction, so a must be true. + model.Add(ClauseConstraint({a, b, c})); + model.Add(Implication(a.Negated(), b.Negated())); + model.Add(Implication(c, a)); + + auto* sat_soler = model.GetOrCreate(); + EXPECT_TRUE(sat_soler->Propagate()); + EXPECT_FALSE(sat_soler->Assignment().LiteralIsAssigned(a)); + + EXPECT_TRUE(FailedLiteralProbingRound(ProbingOptions(), &model)); + EXPECT_TRUE(sat_soler->Assignment().LiteralIsTrue(a)); +} + +} // namespace +} // namespace sat +} // namespace operations_research diff --git a/ortools/sat/pseudo_costs_test.cc b/ortools/sat/pseudo_costs_test.cc new file mode 100644 index 0000000000..fc0b98ad9d --- /dev/null +++ b/ortools/sat/pseudo_costs_test.cc @@ -0,0 +1,263 @@ +// Copyright 2010-2024 Google LLC +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "ortools/sat/pseudo_costs.h" + +#include +#include + +#include "gtest/gtest.h" +#include "ortools/sat/integer.h" +#include "ortools/sat/model.h" +#include "ortools/sat/sat_base.h" +#include "ortools/sat/sat_parameters.pb.h" +#include "ortools/sat/sat_solver.h" +#include "ortools/util/strong_integers.h" + +namespace operations_research { +namespace sat { +namespace { + +TEST(GetBoundChangeTest, LowerBoundChange) { + Model model; + auto* encoder = model.GetOrCreate(); + + const IntegerVariable x = model.Add(NewIntegerVariable(0, 10)); + const Literal decision = encoder->GetOrCreateAssociatedLiteral( + IntegerLiteral::GreaterOrEqual(x, IntegerValue(3))); + + PseudoCosts pseudo_costs(&model); + pseudo_costs.SaveBoundChanges(decision, {}); + auto& bound_changes = pseudo_costs.BoundChanges(); + EXPECT_EQ(1, bound_changes.size()); + PseudoCosts::VariableBoundChange bound_change = bound_changes[0]; + EXPECT_EQ(bound_change.var, x); + EXPECT_EQ(bound_change.lower_bound_change, IntegerValue(3)); +} + +TEST(GetBoundChangeTest, UpperBoundChange) { + Model model; + auto* encoder = model.GetOrCreate(); + + const IntegerVariable x = model.Add(NewIntegerVariable(0, 10)); + const Literal decision = encoder->GetOrCreateAssociatedLiteral( + IntegerLiteral::LowerOrEqual(x, IntegerValue(7))); + + PseudoCosts pseudo_costs(&model); + pseudo_costs.SaveBoundChanges(decision, {}); + auto& bound_changes = pseudo_costs.BoundChanges(); + EXPECT_EQ(1, bound_changes.size()); + PseudoCosts::VariableBoundChange bound_change = bound_changes[0]; + EXPECT_EQ(bound_change.var, NegationOf(x)); + EXPECT_EQ(bound_change.lower_bound_change, IntegerValue(3)); +} + +TEST(GetBoundChangeTest, EqualityDecision) { + Model model; + auto* encoder = model.GetOrCreate(); + + const IntegerVariable x = model.Add(NewIntegerVariable(0, 10)); + Literal decision(model.GetOrCreate()->NewBooleanVariable(), true); + encoder->AssociateToIntegerEqualValue(decision, x, IntegerValue(6)); + + PseudoCosts pseudo_costs(&model); + pseudo_costs.SaveBoundChanges(decision, {}); + auto& bound_changes = pseudo_costs.BoundChanges(); + EXPECT_EQ(2, bound_changes.size()); + PseudoCosts::VariableBoundChange lower_bound_change = bound_changes[0]; + EXPECT_EQ(lower_bound_change.var, x); + EXPECT_EQ(lower_bound_change.lower_bound_change, IntegerValue(6)); + PseudoCosts::VariableBoundChange upper_bound_change = bound_changes[1]; + EXPECT_EQ(upper_bound_change.var, NegationOf(x)); + EXPECT_EQ(upper_bound_change.lower_bound_change, IntegerValue(4)); +} + +TEST(PseudoCosts, Initialize) { + Model model; + SatParameters* parameters = model.GetOrCreate(); + parameters->set_pseudo_cost_reliability_threshold(1); + + const IntegerVariable x = model.Add(NewIntegerVariable(0, 10)); + const IntegerVariable y = model.Add(NewIntegerVariable(0, 10)); + + PseudoCosts pseudo_costs(&model); + + EXPECT_EQ(0.0, pseudo_costs.GetCost(x)); + EXPECT_EQ(0.0, pseudo_costs.GetCost(NegationOf(x))); + EXPECT_EQ(0.0, pseudo_costs.GetCost(y)); + EXPECT_EQ(0.0, pseudo_costs.GetCost(NegationOf(y))); + EXPECT_EQ(0, pseudo_costs.GetNumRecords(x)); + EXPECT_EQ(0, pseudo_costs.GetNumRecords(NegationOf(x))); + EXPECT_EQ(0, pseudo_costs.GetNumRecords(y)); + EXPECT_EQ(0, pseudo_costs.GetNumRecords(NegationOf(y))); +} + +namespace { +void SimulateDecision(Literal decision, IntegerValue obj_delta, Model* model) { + const IntegerVariable objective_var = + model->GetOrCreate()->objective_var; + auto* integer_trail = model->GetOrCreate(); + auto* pseudo_costs = model->GetOrCreate(); + + pseudo_costs->BeforeTakingDecision(decision); + const IntegerValue lb = integer_trail->LowerBound(objective_var); + EXPECT_TRUE(integer_trail->Enqueue( + IntegerLiteral::GreaterOrEqual(objective_var, lb + obj_delta), {}, {})); + pseudo_costs->AfterTakingDecision(); +} +} // namespace + +TEST(PseudoCosts, UpdateCostOfNewVar) { + Model model; + auto* encoder = model.GetOrCreate(); + SatParameters* parameters = model.GetOrCreate(); + parameters->set_pseudo_cost_reliability_threshold(1); + + const IntegerVariable objective_var = model.Add(NewIntegerVariable(0, 100)); + model.GetOrCreate()->objective_var = objective_var; + + const IntegerVariable x = model.Add(NewIntegerVariable(0, 10)); + const IntegerVariable y = model.Add(NewIntegerVariable(0, 10)); + auto* pseudo_costs = model.GetOrCreate(); + + SimulateDecision(encoder->GetOrCreateAssociatedLiteral( + IntegerLiteral::GreaterOrEqual(x, IntegerValue(3))), + IntegerValue(6), &model); + + EXPECT_EQ(2.0, pseudo_costs->GetCost(x)); + EXPECT_EQ(0.0, pseudo_costs->GetCost(NegationOf(x))); + EXPECT_EQ(1, pseudo_costs->GetNumRecords(x)); + EXPECT_EQ(0, pseudo_costs->GetNumRecords(NegationOf(x))); + + SimulateDecision(encoder->GetOrCreateAssociatedLiteral( + IntegerLiteral::LowerOrEqual(y, IntegerValue(8))), + IntegerValue(6), &model); + + EXPECT_EQ(2.0, pseudo_costs->GetCost(x)); + EXPECT_EQ(0.0, pseudo_costs->GetCost(NegationOf(x))); + EXPECT_EQ(0.0, pseudo_costs->GetCost(y)); + EXPECT_EQ(3.0, pseudo_costs->GetCost(NegationOf(y))); + EXPECT_EQ(1, pseudo_costs->GetNumRecords(x)); + EXPECT_EQ(0, pseudo_costs->GetNumRecords(NegationOf(x))); + EXPECT_EQ(0, pseudo_costs->GetNumRecords(y)); + EXPECT_EQ(1, pseudo_costs->GetNumRecords(NegationOf(y))); +} + +TEST(PseudoCosts, BasicCostUpdate) { + Model model; + auto* encoder = model.GetOrCreate(); + SatParameters* parameters = model.GetOrCreate(); + parameters->set_pseudo_cost_reliability_threshold(1); + + const IntegerVariable objective_var = model.Add(NewIntegerVariable(0, 100)); + model.GetOrCreate()->objective_var = objective_var; + + const IntegerVariable x = model.Add(NewIntegerVariable(0, 10)); + const IntegerVariable y = model.Add(NewIntegerVariable(0, 10)); + const IntegerVariable z = model.Add(NewIntegerVariable(0, 10)); + auto* pseudo_costs = model.GetOrCreate(); + + SimulateDecision(encoder->GetOrCreateAssociatedLiteral( + IntegerLiteral::GreaterOrEqual(x, IntegerValue(3))), + IntegerValue(6), &model); + + EXPECT_EQ(2.0, pseudo_costs->GetCost(x)); + EXPECT_EQ(0.0, pseudo_costs->GetCost(NegationOf(x))); + EXPECT_EQ(0.0, pseudo_costs->GetCost(y)); + EXPECT_EQ(0.0, pseudo_costs->GetCost(NegationOf(y))); + EXPECT_EQ(0.0, pseudo_costs->GetCost(z)); + EXPECT_EQ(0.0, pseudo_costs->GetCost(NegationOf(z))); + EXPECT_EQ(1, pseudo_costs->GetNumRecords(x)); + EXPECT_EQ(0, pseudo_costs->GetNumRecords(NegationOf(x))); + EXPECT_EQ(0, pseudo_costs->GetNumRecords(y)); + EXPECT_EQ(0, pseudo_costs->GetNumRecords(NegationOf(y))); + EXPECT_EQ(0, pseudo_costs->GetNumRecords(z)); + EXPECT_EQ(0, pseudo_costs->GetNumRecords(NegationOf(z))); + + SimulateDecision(encoder->GetOrCreateAssociatedLiteral( + IntegerLiteral::LowerOrEqual(y, IntegerValue(8))), + IntegerValue(6), &model); + + EXPECT_EQ(2.0, pseudo_costs->GetCost(x)); + EXPECT_EQ(0.0, pseudo_costs->GetCost(NegationOf(x))); + EXPECT_EQ(0.0, pseudo_costs->GetCost(y)); + EXPECT_EQ(3.0, pseudo_costs->GetCost(NegationOf(y))); + EXPECT_EQ(0.0, pseudo_costs->GetCost(z)); + EXPECT_EQ(0.0, pseudo_costs->GetCost(NegationOf(z))); + EXPECT_EQ(1, pseudo_costs->GetNumRecords(x)); + EXPECT_EQ(0, pseudo_costs->GetNumRecords(NegationOf(x))); + EXPECT_EQ(0, pseudo_costs->GetNumRecords(y)); + EXPECT_EQ(1, pseudo_costs->GetNumRecords(NegationOf(y))); + EXPECT_EQ(0, pseudo_costs->GetNumRecords(z)); + EXPECT_EQ(0, pseudo_costs->GetNumRecords(NegationOf(z))); +} + +TEST(PseudoCosts, PseudoCostReliabilityTest) { + Model model; + auto* encoder = model.GetOrCreate(); + SatParameters* parameters = model.GetOrCreate(); + parameters->set_pseudo_cost_reliability_threshold(2); + + const IntegerVariable objective_var = model.Add(NewIntegerVariable(0, 100)); + model.GetOrCreate()->objective_var = objective_var; + + const IntegerVariable x = model.Add(NewIntegerVariable(0, 10)); + const IntegerVariable y = model.Add(NewIntegerVariable(0, 10)); + auto* pseudo_costs = model.GetOrCreate(); + + SimulateDecision(encoder->GetOrCreateAssociatedLiteral( + IntegerLiteral::GreaterOrEqual(x, IntegerValue(3))), + IntegerValue(6), &model); + + EXPECT_EQ(2.0, pseudo_costs->GetCost(x)); + EXPECT_EQ(0.0, pseudo_costs->GetCost(NegationOf(x))); + EXPECT_EQ(0.0, pseudo_costs->GetCost(y)); + EXPECT_EQ(0.0, pseudo_costs->GetCost(NegationOf(y))); + EXPECT_EQ(1, pseudo_costs->GetNumRecords(x)); + EXPECT_EQ(0, pseudo_costs->GetNumRecords(NegationOf(x))); + EXPECT_EQ(0, pseudo_costs->GetNumRecords(y)); + EXPECT_EQ(0, pseudo_costs->GetNumRecords(NegationOf(y))); + EXPECT_EQ(kNoIntegerVariable, pseudo_costs->GetBestDecisionVar()); + + SimulateDecision(encoder->GetOrCreateAssociatedLiteral( + IntegerLiteral::LowerOrEqual(y, IntegerValue(8))), + IntegerValue(14), &model); + + EXPECT_EQ(2.0, pseudo_costs->GetCost(x)); + EXPECT_EQ(0.0, pseudo_costs->GetCost(NegationOf(x))); + EXPECT_EQ(0.0, pseudo_costs->GetCost(y)); + EXPECT_EQ(7.0, pseudo_costs->GetCost(NegationOf(y))); + EXPECT_EQ(1, pseudo_costs->GetNumRecords(x)); + EXPECT_EQ(0, pseudo_costs->GetNumRecords(NegationOf(x))); + EXPECT_EQ(0, pseudo_costs->GetNumRecords(y)); + EXPECT_EQ(1, pseudo_costs->GetNumRecords(NegationOf(y))); + EXPECT_EQ(kNoIntegerVariable, pseudo_costs->GetBestDecisionVar()); + + SimulateDecision(encoder->GetOrCreateAssociatedLiteral( + IntegerLiteral::LowerOrEqual(x, IntegerValue(8))), + IntegerValue(6), &model); + + EXPECT_EQ(2.0, pseudo_costs->GetCost(x)); + EXPECT_EQ(3.0, pseudo_costs->GetCost(NegationOf(x))); + EXPECT_EQ(0.0, pseudo_costs->GetCost(y)); + EXPECT_EQ(7.0, pseudo_costs->GetCost(NegationOf(y))); + EXPECT_EQ(1, pseudo_costs->GetNumRecords(x)); + EXPECT_EQ(1, pseudo_costs->GetNumRecords(NegationOf(x))); + EXPECT_EQ(0, pseudo_costs->GetNumRecords(y)); + EXPECT_EQ(1, pseudo_costs->GetNumRecords(NegationOf(y))); + EXPECT_EQ(NegationOf(x), pseudo_costs->GetBestDecisionVar()); +} + +} // namespace +} // namespace sat +} // namespace operations_research diff --git a/ortools/sat/restart_test.cc b/ortools/sat/restart_test.cc new file mode 100644 index 0000000000..fd319ed33d --- /dev/null +++ b/ortools/sat/restart_test.cc @@ -0,0 +1,86 @@ +// Copyright 2010-2024 Google LLC +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "ortools/sat/restart.h" + +#include + +#include "absl/base/macros.h" +#include "gtest/gtest.h" +#include "ortools/sat/model.h" +#include "ortools/sat/sat_parameters.pb.h" + +namespace operations_research { +namespace sat { +namespace { + +TEST(SUnivTest, Luby) { + const int kSUniv[] = {1, 1, 2, 1, 1, 2, 4, 1, 1, 2, 1, 1, 2, 4, 8, 1}; + for (int i = 0; i < ABSL_ARRAYSIZE(kSUniv); ++i) { + EXPECT_EQ(kSUniv[i], SUniv(i + 1)); + } +} + +TEST(RestartPolicyTest, BasicRunningAverageTest) { + Model model; + RestartPolicy* restart = model.GetOrCreate(); + SatParameters* params = model.GetOrCreate(); + + // The parameters for this test. + params->clear_restart_algorithms(); + params->add_restart_algorithms(SatParameters::DL_MOVING_AVERAGE_RESTART); + params->set_use_blocking_restart(false); + params->set_restart_dl_average_ratio(1.0); + params->set_restart_running_window_size(10); + restart->Reset(); + + EXPECT_FALSE(restart->ShouldRestart()); + int i = 0; + for (; i < 100; ++i) { + const int unused = 0; + const int decision_level = i; + if (restart->ShouldRestart()) break; + restart->OnConflict(unused, decision_level, unused); + } + + // Increasing decision levels, so as soon as we have 11 conflicts and 10 in + // the window, the window average is > global average. + EXPECT_EQ(i, 11); + + // Now the window is reset, but not the global average. So as soon as we have + // 10 conflicts, we restart. + i = 0; + for (; i < 100; ++i) { + const int unused = 0; + const int decision_level = 1000 - i; + if (restart->ShouldRestart()) break; + restart->OnConflict(unused, decision_level, unused); + } + EXPECT_EQ(i, 10); + + // If we call Reset() the global average is reaset, so if we have conflicts at + // a decreasing decision level, we never restart. + restart->Reset(); + i = 0; + for (; i < 1000; ++i) { + const int unused = 0; + const int decision_level = 1000 - i; + if (restart->ShouldRestart()) break; + restart->OnConflict(unused, decision_level, unused); + } + EXPECT_EQ(i, 1000); +} + +} // namespace +} // namespace sat +} // namespace operations_research diff --git a/ortools/sat/routing_cuts_test.cc b/ortools/sat/routing_cuts_test.cc new file mode 100644 index 0000000000..e2adb9da48 --- /dev/null +++ b/ortools/sat/routing_cuts_test.cc @@ -0,0 +1,422 @@ +// Copyright 2010-2024 Google LLC +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "ortools/sat/routing_cuts.h" + +#include +#include +#include +#include +#include + +#include "absl/types/span.h" +#include "gtest/gtest.h" +#include "ortools/base/gmock.h" +#include "ortools/base/strong_vector.h" +#include "ortools/graph/max_flow.h" +#include "ortools/sat/cuts.h" +#include "ortools/sat/integer.h" +#include "ortools/sat/linear_constraint.h" +#include "ortools/sat/linear_constraint_manager.h" +#include "ortools/sat/model.h" +#include "ortools/sat/sat_base.h" +#include "ortools/util/strong_integers.h" + +namespace operations_research { +namespace sat { +namespace { + +using ::testing::ElementsAre; + +// Test on a simple tree: +// 3 +// / \ \ +// 1 0 5 +// / \ +// 2 4 +TEST(ExtractAllSubsetsFromForestTest, Basic) { + std::vector parents = {3, 3, 1, 3, 1, 3}; + + std::vector buffer; + std::vector> subsets; + ExtractAllSubsetsFromForest(parents, &buffer, &subsets); + + // Post order but we explore high number first. + // Alternatively, we could use unordered here, but the order is stable. + EXPECT_THAT(buffer, ElementsAre(5, 4, 2, 1, 0, 3)); + EXPECT_THAT(subsets, + ElementsAre(ElementsAre(5), ElementsAre(4), ElementsAre(2), + ElementsAre(4, 2, 1), ElementsAre(0), + ElementsAre(5, 4, 2, 1, 0, 3))); +} + +// +// 0 3 4 +// / \ | +// 1 2 5 +TEST(ExtractAllSubsetsFromForestTest, BasicForest) { + std::vector parents = {0, 0, 0, 3, 4, 4}; + + std::vector buffer; + std::vector> subsets; + ExtractAllSubsetsFromForest(parents, &buffer, &subsets); + + // Post order but we explore high number first. + // Alternatively, we could use unordered here, but the order is stable. + EXPECT_THAT(buffer, ElementsAre(2, 1, 0, 3, 5, 4)); + EXPECT_THAT(subsets, + ElementsAre(ElementsAre(2), ElementsAre(1), ElementsAre(2, 1, 0), + ElementsAre(3), ElementsAre(5), ElementsAre(5, 4))); +} + +TEST(ExtractAllSubsetsFromForestTest, Random) { + const int num_nodes = 20; + absl::BitGen random; + + // Create a random tree rooted at zero. + std::vector parents(num_nodes, 0); + for (int i = 2; i < num_nodes; ++i) { + parents[i] = absl::Uniform(random, 0, i); // in [0, i - 1]. + } + + std::vector buffer; + std::vector> subsets; + ExtractAllSubsetsFromForest(parents, &buffer, &subsets); + + // We don't test that we are exhaustive, but we check basic property. + std::vector in_subset(num_nodes, false); + for (const auto subset : subsets) { + for (const int n : subset) in_subset[n] = true; + + // There should be at most one out edge. + int root = -1; + for (const int n : subset) { + if (in_subset[parents[n]]) continue; + if (root != -1) EXPECT_EQ(parents[n], root); + root = parents[n]; + } + + // No node outside should point inside. + for (int n = 0; n < num_nodes; ++n) { + if (in_subset[n]) continue; + EXPECT_TRUE(!in_subset[parents[n]]); + } + + for (const int n : subset) in_subset[n] = false; + } +} + +TEST(SymmetrizeArcsTest, BasicTest) { + std::vector arcs{{.tail = 0, .head = 1, .lp_value = 0.5}, + {.tail = 2, .head = 0, .lp_value = 0.5}, + {.tail = 1, .head = 0, .lp_value = 0.5}}; + SymmetrizeArcs(&arcs); + EXPECT_THAT( + arcs, ElementsAre(ArcWithLpValue{.tail = 0, .head = 1, .lp_value = 1.0}, + ArcWithLpValue{.tail = 0, .head = 2, .lp_value = 0.5})); +} + +TEST(ComputeGomoryHuTreeTest, Random) { + absl::BitGen random; + + // Lets generate a random graph on a small number of nodes. + const int num_nodes = 10; + const int num_arcs = 100; + std::vector arcs; + for (int i = 0; i < num_arcs; ++i) { + const int tail = absl::Uniform(random, 0, num_nodes); + const int head = absl::Uniform(random, 0, num_nodes); + if (tail == head) continue; + const double lp_value = absl::Uniform(random, 0, 1); + arcs.push_back({tail, head, lp_value}); + } + + // Get all cut from Gomory-Hu tree. + const std::vector parents = ComputeGomoryHuTree(num_nodes, arcs); + std::vector buffer; + std::vector> subsets; + ExtractAllSubsetsFromForest(parents, &buffer, &subsets); + + // Compute the cost of entering (resp. leaving) each subset. + // TODO(user): We need the same scaling as in ComputeGomoryHu(), not super + // clean. We might want an integer input to the function, but ok for now. + std::vector in_subset(num_nodes, false); + std::vector out_costs(subsets.size(), 0); + std::vector in_costs(subsets.size(), 0); + for (int i = 0; i < subsets.size(); ++i) { + for (const int n : subsets[i]) in_subset[n] = true; + for (const auto& arc : arcs) { + if (in_subset[arc.tail] && !in_subset[arc.head]) { + out_costs[i] += std::round(1.0e6 * arc.lp_value); + } + if (!in_subset[arc.tail] && in_subset[arc.head]) { + in_costs[i] += std::round(1.0e6 * arc.lp_value); + } + } + for (const int n : subsets[i]) in_subset[n] = false; + } + + // We will test with an exhaustive comparison. We are in n ^ 3 ! + // For all (s,t) pair, get the actual max-flow on the scaled graph. + // Check than one of the cuts separate s and t, with this exact weight. + SimpleMaxFlow max_flow; + for (const auto& [tail, head, lp_value] : arcs) { + // TODO(user): the algo only seems to work on an undirected graph, or + // equivalently when we always have a reverse arc with the same weight. + // Note that you can see below that we compute "min" cut for the sum of + // outgoing + incoming arcs this way. + max_flow.AddArcWithCapacity(tail, head, std::round(1.0e6 * lp_value)); + max_flow.AddArcWithCapacity(head, tail, std::round(1.0e6 * lp_value)); + } + for (int s = 0; s < num_nodes; ++s) { + for (int t = s + 1; t < num_nodes; ++t) { + ASSERT_EQ(max_flow.Solve(s, t), SimpleMaxFlow::OPTIMAL); + const int64_t flow = max_flow.OptimalFlow(); + bool found = false; + for (int i = 0; i < subsets.size(); ++i) { + bool s_out = true; + bool t_out = true; + for (const int n : subsets[i]) { + if (n == s) s_out = false; + if (n == t) t_out = false; + } + if (!s_out && t_out && out_costs[i] + in_costs[i] == flow) found = true; + if (s_out && !t_out && in_costs[i] + out_costs[i] == flow) found = true; + if (found) break; + } + + // Debug. + if (!found) { + LOG(INFO) << s << " -> " << t << " flow= " << flow; + for (int i = 0; i < subsets.size(); ++i) { + bool s_out = true; + bool t_out = true; + for (const int n : subsets[i]) { + if (n == s) s_out = false; + if (n == t) t_out = false; + } + if (!s_out && t_out) { + LOG(INFO) << i << " out= " << out_costs[i] + in_costs[i]; + } + if (s_out && !t_out) { + LOG(INFO) << i << " in= " << in_costs[i] + out_costs[i]; + } + } + } + ASSERT_TRUE(found); + } + } +} + +TEST(CreateStronglyConnectedGraphCutGeneratorTest, BasicExample) { + Model model; + + // Lets create a simple square graph with arcs in both directions: + // + // 0 ---- 1 + // | | + // | | + // 2 ---- 3 + const int num_nodes = 4; + std::vector tails{0, 1, 1, 3, 3, 2, 2, 0}; + std::vector heads{1, 0, 3, 1, 2, 3, 0, 2}; + std::vector literals; + std::vector vars; + for (int i = 0; i < 2 * num_nodes; ++i) { + literals.push_back(Literal(model.Add(NewBooleanVariable()), true)); + vars.push_back(model.Add(NewIntegerVariableFromLiteral(literals.back()))); + } + + CutGenerator generator = CreateStronglyConnectedGraphCutGenerator( + num_nodes, tails, heads, literals, &model); + + // Suppose only 0-1 and 2-3 are in the lp solution (values do not matter). + auto& lp_values = *model.GetOrCreate(); + lp_values.resize(16, 0.0); + lp_values[vars[0]] = 0.5; + lp_values[vars[1]] = 0.5; + lp_values[vars[4]] = 1.0; + lp_values[vars[5]] = 1.0; + LinearConstraintManager manager(&model); + generator.generate_cuts(&manager); + + // We should get two cuts. + EXPECT_EQ(manager.num_cuts(), 2); + EXPECT_THAT(manager.AllConstraints().front().constraint.VarsAsSpan(), + ElementsAre(vars[3], vars[6])); + EXPECT_THAT(manager.AllConstraints().back().constraint.VarsAsSpan(), + ElementsAre(vars[2], vars[7])); +} + +TEST(CreateStronglyConnectedGraphCutGeneratorTest, AnotherExample) { + // This time, the graph is fully connected, but we still detect that {1, 2, 3} + // do not have enough outgoing flow: + // + // 0.5 + // 0 <--> 1 + // ^ | 0.5 + // 0.5 | | 1 and 2 ----> 1 + // v v + // 2 <--- 3 + // 1 + const int num_nodes = 4; + std::vector tails{0, 1, 0, 2, 1, 3, 2}; + std::vector heads{1, 0, 2, 0, 3, 2, 1}; + std::vector values{0.5, 0.0, 0.5, 0.0, 1.0, 1.0, 0.5}; + + Model model; + std::vector literals; + auto& lp_values = *model.GetOrCreate(); + lp_values.resize(16, 0.0); + for (int i = 0; i < values.size(); ++i) { + literals.push_back(Literal(model.Add(NewBooleanVariable()), true)); + lp_values[model.Add(NewIntegerVariableFromLiteral(literals.back()))] = + values[i]; + } + + CutGenerator generator = CreateStronglyConnectedGraphCutGenerator( + num_nodes, tails, heads, literals, &model); + + LinearConstraintManager manager(&model); + generator.generate_cuts(&manager); + + // The sets {2, 3} and {1, 2, 3} will generate cuts. + // However as an heuristic, we will wait another round to generate {1, 2 ,3}. + EXPECT_EQ(manager.num_cuts(), 1); + EXPECT_THAT(manager.AllConstraints().back().constraint.DebugString(), + ::testing::StartsWith("1 <= 1*X3 1*X6")); +} + +TEST(GenerateInterestingSubsetsTest, BasicExample) { + const int num_nodes = 6; + const std::vector> arcs = {{0, 5}, {2, 3}, {3, 4}}; + + // Note that the order is not important, but is currently fixed. + // This document the actual order. + std::vector subset_data; + std::vector> subsets; + GenerateInterestingSubsets(num_nodes, arcs, + /*stop_at_num_components=*/2, &subset_data, + &subsets); + EXPECT_THAT( + subsets, + ElementsAre(ElementsAre(1), ElementsAre(5), ElementsAre(0), + ElementsAre(5, 0), ElementsAre(3), ElementsAre(2), + ElementsAre(3, 2), ElementsAre(4), ElementsAre(3, 2, 4))); + + // We can call it more than once. + GenerateInterestingSubsets(num_nodes, arcs, + /*stop_at_num_components=*/2, &subset_data, + &subsets); + EXPECT_THAT( + subsets, + ElementsAre(ElementsAre(1), ElementsAre(5), ElementsAre(0), + ElementsAre(5, 0), ElementsAre(3), ElementsAre(2), + ElementsAre(3, 2), ElementsAre(4), ElementsAre(3, 2, 4))); +} + +TEST(CreateFlowCutGeneratorTest, BasicExample) { + // + // /---> 2 + // 0 ---> 1 ^ + // \---> 3 + // + // With a flow of 2 leaving 0 and a flow of 1 requested at 2 and 3. + // On each arc the flow <= max_flow * arc_indicator where max_flow = 2. + const int num_nodes = 4; + std::vector tails{0, 1, 1, 3}; + std::vector heads{1, 2, 3, 2}; + std::vector values{1.0, 0.5, 0.5, 0.0}; + + Model model; + std::vector capacities; + auto& lp_values = *model.GetOrCreate(); + lp_values.resize(16, 0.0); + for (int i = 0; i < values.size(); ++i) { + AffineExpression expr; + expr.var = model.Add(NewIntegerVariable(0, 1)); + expr.coeff = 2; + expr.constant = 0; + capacities.emplace_back(expr); + lp_values[capacities.back().var] = values[i]; + } + + const auto get_flows = [](const std::vector& in_subset, + IntegerValue* min_incoming_flow, + IntegerValue* min_outgoing_flow) { + IntegerValue demand(0); + if (in_subset[0]) demand -= 2; + if (in_subset[2]) demand += 1; + if (in_subset[3]) demand += 1; + *min_incoming_flow = std::max(IntegerValue(0), demand); + *min_outgoing_flow = std::max(IntegerValue(0), -demand); + }; + const CutGenerator generator = CreateFlowCutGenerator( + num_nodes, tails, heads, capacities, get_flows, &model); + + LinearConstraintManager manager(&model); + generator.generate_cuts(&manager); + + // The sets {2} and {3} will generate incoming flow cuts. + EXPECT_EQ(manager.num_cuts(), 2); + EXPECT_THAT(manager.AllConstraints().front().constraint.DebugString(), + ::testing::StartsWith("1 <= 1*X2")); + EXPECT_THAT(manager.AllConstraints().back().constraint.DebugString(), + ::testing::StartsWith("1 <= 1*X1 1*X3")); +} + +TEST(CreateFlowCutGeneratorTest, WithMinusOneArcs) { + // 0 ---> 1 --> + // | + // \ --> + const int num_nodes = 2; + std::vector tails{0, 1, 1}; + std::vector heads{1, -1, -1}; + std::vector values{1.0, 0.5, 0.0}; + + Model model; + std::vector capacities; + auto& lp_values = *model.GetOrCreate(); + lp_values.resize(16, 0.0); + for (int i = 0; i < values.size(); ++i) { + AffineExpression expr; + expr.var = model.Add(NewIntegerVariable(0, 1)); + expr.coeff = 2; + expr.constant = 0; + capacities.emplace_back(expr); + lp_values[capacities.back().var] = values[i]; + } + + const auto get_flows = [](const std::vector& in_subset, + IntegerValue* min_incoming_flow, + IntegerValue* min_outgoing_flow) { + IntegerValue demand(0); + if (in_subset[0]) demand -= 2; + *min_incoming_flow = std::max(IntegerValue(0), demand); + *min_outgoing_flow = std::max(IntegerValue(0), -demand); + }; + const CutGenerator generator = CreateFlowCutGenerator( + num_nodes, tails, heads, capacities, get_flows, &model); + + LinearConstraintManager manager(&model); + generator.generate_cuts(&manager); + + // We artificially put bad LP values so that {1} generate outgoing flow cut. + EXPECT_EQ(manager.num_cuts(), 1); + EXPECT_THAT(manager.AllConstraints().front().constraint.DebugString(), + ::testing::StartsWith("1 <= 1*X1 1*X2")); +} + +} // namespace +} // namespace sat +} // namespace operations_research diff --git a/ortools/sat/sat_base_test.cc b/ortools/sat/sat_base_test.cc new file mode 100644 index 0000000000..391efb1d09 --- /dev/null +++ b/ortools/sat/sat_base_test.cc @@ -0,0 +1,74 @@ +// Copyright 2010-2024 Google LLC +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "ortools/sat/sat_base.h" + +#include + +#include "gtest/gtest.h" +#include "ortools/util/strong_integers.h" + +namespace operations_research { +namespace sat { +namespace { + +TEST(BooleanVariableTest, Api) { + BooleanVariable var1(1); + BooleanVariable var2(2); + BooleanVariable var3(2); + EXPECT_NE(var1, var2); + EXPECT_EQ(var2, var3); +} + +TEST(LiteralTest, Api) { + BooleanVariable var1(1); + BooleanVariable var2(2); + Literal l1(var1, true); + Literal l2(var2, false); + Literal l3 = l2.Negated(); + EXPECT_EQ(l1.Variable(), var1); + EXPECT_EQ(l2.Variable(), var2); + EXPECT_EQ(l3.Variable(), var2); + EXPECT_TRUE(l1.IsPositive()); + EXPECT_TRUE(l2.IsNegative()); + EXPECT_TRUE(l3.IsPositive()); +} + +TEST(VariablesAssignmentTest, Api) { + BooleanVariable var0(0); + BooleanVariable var1(1); + BooleanVariable var2(2); + + VariablesAssignment assignment; + assignment.Resize(3); + assignment.AssignFromTrueLiteral(Literal(var0, true)); + assignment.AssignFromTrueLiteral(Literal(var1, false)); + + EXPECT_TRUE(assignment.LiteralIsTrue(Literal(var0, true))); + EXPECT_TRUE(assignment.LiteralIsFalse(Literal(var0, false))); + EXPECT_TRUE(assignment.LiteralIsTrue(Literal(var1, false))); + EXPECT_FALSE(assignment.VariableIsAssigned(var2)); + + assignment.UnassignLiteral(Literal(var0, true)); + EXPECT_FALSE(assignment.VariableIsAssigned(var0)); + + assignment.AssignFromTrueLiteral(Literal(var2, false)); + EXPECT_TRUE(assignment.LiteralIsTrue(Literal(var2, false))); + EXPECT_FALSE(assignment.LiteralIsTrue(Literal(var2, true))); + EXPECT_TRUE(assignment.LiteralIsFalse(Literal(var2, true))); + EXPECT_FALSE(assignment.LiteralIsFalse(Literal(var2, false))); +} + +} // namespace +} // namespace sat +} // namespace operations_research diff --git a/ortools/sat/sat_inprocessing_test.cc b/ortools/sat/sat_inprocessing_test.cc new file mode 100644 index 0000000000..291ca0aff9 --- /dev/null +++ b/ortools/sat/sat_inprocessing_test.cc @@ -0,0 +1,287 @@ +// Copyright 2010-2024 Google LLC +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "ortools/sat/sat_inprocessing.h" + +#include +#include + +#include "absl/container/inlined_vector.h" +#include "absl/log/check.h" +#include "absl/types/span.h" +#include "gtest/gtest.h" +#include "ortools/base/gmock.h" +#include "ortools/sat/clause.h" +#include "ortools/sat/model.h" +#include "ortools/sat/sat_base.h" +#include "ortools/sat/sat_solver.h" + +namespace operations_research { +namespace sat { +namespace { + +TEST(InprocessingTest, ClauseCleanupWithFixedVariables) { + Model model; + auto* sat_solver = model.GetOrCreate(); + auto* clause_manager = model.GetOrCreate(); + auto* inprocessing = model.GetOrCreate(); + + // Lets add some clauses. + sat_solver->SetNumVariables(100); + EXPECT_TRUE(clause_manager->AddClause(Literals({+1, +2, +3, +4}))); + EXPECT_TRUE(clause_manager->AddClause(Literals({+1, -2, -3, +5}))); + EXPECT_TRUE(clause_manager->AddClause(Literals({+2, -2, -3, +1, +1}))); + + // Nothing fixed, we don't even look at the clause. + const bool log_info = true; + EXPECT_TRUE(inprocessing->DetectEquivalencesAndStamp(false, log_info)); + EXPECT_TRUE(inprocessing->RemoveFixedAndEquivalentVariables(log_info)); + { + const auto& all_clauses = clause_manager->AllClausesInCreationOrder(); + EXPECT_EQ(all_clauses.size(), 3); + EXPECT_EQ(all_clauses[2]->AsSpan(), Literals({+2, -2, -3, +1, +1})); + } + + // Lets fix 3. + CHECK(sat_solver->AddUnitClause(Literal(+3))); + EXPECT_TRUE(sat_solver->FinishPropagation()); + EXPECT_TRUE(inprocessing->DetectEquivalencesAndStamp(false, log_info)); + EXPECT_TRUE(inprocessing->RemoveFixedAndEquivalentVariables(log_info)); + { + const auto& all_clauses = clause_manager->AllClausesInCreationOrder(); + EXPECT_EQ(all_clauses.size(), 3); + EXPECT_EQ(all_clauses[0]->AsSpan(), Literals({})); // +3 true. + EXPECT_EQ(all_clauses[1]->AsSpan(), Literals({+1, -2, +5})); + EXPECT_EQ(all_clauses[2]->AsSpan(), Literals({})); // trivially true. + } +} + +TEST(InprocessingTest, ClauseCleanupWithEquivalence) { + Model model; + auto* sat_solver = model.GetOrCreate(); + auto* clause_manager = model.GetOrCreate(); + auto* implication_graph = model.GetOrCreate(); + auto* inprocessing = model.GetOrCreate(); + + // Lets add some clauses. + sat_solver->SetNumVariables(100); + EXPECT_TRUE(clause_manager->AddClause(Literals({+1, +2, +5, +4}))); + EXPECT_TRUE(clause_manager->AddClause(Literals({+1, -2, -3, +5}))); + EXPECT_TRUE(clause_manager->AddClause(Literals({+2, +6, -3, +1, +1}))); + EXPECT_TRUE(clause_manager->AddClause(Literals({+2, +6, -3, +1, -5}))); + + // Lets make 3 and 5 equivalent. + implication_graph->AddBinaryClause(Literal(-3), Literal(+5)); + implication_graph->AddBinaryClause(Literal(+3), Literal(-5)); + + const bool log_info = true; + EXPECT_TRUE(inprocessing->DetectEquivalencesAndStamp(false, log_info)); + EXPECT_TRUE(inprocessing->RemoveFixedAndEquivalentVariables(log_info)); + { + const auto& all_clauses = clause_manager->AllClausesInCreationOrder(); + EXPECT_EQ(all_clauses.size(), 4); + EXPECT_EQ(all_clauses[0]->AsSpan(), Literals({+1, +2, +3, +4})); + EXPECT_EQ(all_clauses[1]->AsSpan(), Literals({})); + EXPECT_EQ(all_clauses[3]->AsSpan(), Literals({+2, +6, -3, +1})); + + // Note that the +1 +1 is not simplified because this clause do not + // need to be rewritten otherwise and we assume initial simplification. + EXPECT_EQ(all_clauses[2]->AsSpan(), Literals({+2, +6, -3, +1, +1})); + } +} + +TEST(InprocessingTest, ClauseSubsumptionAndStrengthening) { + Model model; + auto* sat_solver = model.GetOrCreate(); + auto* clause_manager = model.GetOrCreate(); + auto* inprocessing = model.GetOrCreate(); + + // Lets add some clauses. + // Note that the order currently matter for what is left. + // + // Note that currently the binary clauses are not reprocessed. + // TODO(user): Maybe we should so that we always end up with a reduced set. + sat_solver->SetNumVariables(100); + EXPECT_TRUE(clause_manager->AddClause(Literals({+1, +3, +2}))); + EXPECT_TRUE(clause_manager->AddClause(Literals({+1, +2, -3}))); + EXPECT_TRUE(clause_manager->AddClause(Literals({+1, +3, +2}))); + EXPECT_TRUE(clause_manager->AddClause(Literals({+1, -2, -3}))); + EXPECT_TRUE(clause_manager->AddClause(Literals({+2, +6, -3, +1, +1}))); + EXPECT_TRUE(clause_manager->AddClause(Literals({-3, +6, +2, +1, -5}))); + + const bool log_info = true; + EXPECT_TRUE(inprocessing->DetectEquivalencesAndStamp(false, log_info)); + EXPECT_TRUE(inprocessing->SubsumeAndStrenghtenRound(log_info)); + { + // This function remove empty clauses. + const auto& all_clauses = clause_manager->AllClausesInCreationOrder(); + + // Depending on the order in which clauses are processed (which can + // change as we rely on std::sort()), we have a few cases. + if (all_clauses.size() == 1) { + EXPECT_EQ(all_clauses[0]->AsSpan(), Literals({+1, +2, -3})); + + // We added {+1, +2} and {+1, -3} there. + // TODO(user): make sure we don't add twice the implications. + auto* implication_graph = model.GetOrCreate(); + EXPECT_EQ(implication_graph->num_implications(), 6); + EXPECT_EQ(implication_graph->Implications(Literal(-1)).size(), 3); + EXPECT_THAT(implication_graph->Implications(Literal(-1)), + ::testing::UnorderedElementsAre(Literal(+2), Literal(+2), + Literal(-3))); + } else { + EXPECT_GE(all_clauses.size(), 3); + EXPECT_LE(all_clauses.size(), 4); + EXPECT_EQ(all_clauses[0]->AsSpan(), Literals({+1, +3, +2})); + EXPECT_EQ(all_clauses[1]->AsSpan(), Literals({+1, -2, -3})); + + // Depending on the implication added, we don't get the same clauses. + auto* implication_graph = model.GetOrCreate(); + EXPECT_EQ(implication_graph->num_implications(), 2); + EXPECT_EQ(implication_graph->Implications(Literal(-1)).size(), 1); + if (implication_graph->Implications(Literal(-1))[0] == Literal(+2)) { + EXPECT_EQ(all_clauses[2]->AsSpan(), Literals({+2, +6, +1, +1})); + if (all_clauses.size() == 4) { + EXPECT_EQ(all_clauses[3]->AsSpan(), Literals({+6, +2, +1, -5})); + } + } else { + EXPECT_EQ(implication_graph->Implications(Literal(-1))[0], Literal(-3)); + EXPECT_EQ(all_clauses[2]->AsSpan(), Literals({+6, -3, +1, +1})); + if (all_clauses.size() == 4) { + EXPECT_EQ(all_clauses[3]->AsSpan(), Literals({-3, +6, +1, -5})); + } + } + } + } +} + +TEST(StampingSimplifierTest, StampConstruction) { + Model model; + auto* sat_solver = model.GetOrCreate(); + auto* implication_graph = model.GetOrCreate(); + auto* simplifier = model.GetOrCreate(); + + // Lets add some clauses. + // Note that the order currently matter for what is left. + sat_solver->SetNumVariables(100); + implication_graph->AddImplication(Literal(+1), Literal(+2)); + implication_graph->AddImplication(Literal(+1), Literal(+3)); + implication_graph->AddImplication(Literal(+1), Literal(+4)); + implication_graph->AddImplication(Literal(+2), Literal(+5)); + implication_graph->AddImplication(Literal(+2), Literal(+6)); + implication_graph->AddImplication(Literal(+3), Literal(+7)); + implication_graph->AddImplication(Literal(+4), Literal(+6)); + + EXPECT_TRUE(implication_graph->DetectEquivalences(true)); + + // Lets test some implications. + simplifier->SampleTreeAndFillParent(); + simplifier->ComputeStamps(); + EXPECT_TRUE(simplifier->ImplicationIsInTree(Literal(+1), Literal(+2))); + EXPECT_TRUE(simplifier->ImplicationIsInTree(Literal(+1), Literal(+5))); + EXPECT_TRUE(simplifier->ImplicationIsInTree(Literal(+1), Literal(+6))); + EXPECT_TRUE(simplifier->ImplicationIsInTree(Literal(+1), Literal(+7))); + EXPECT_TRUE(simplifier->ImplicationIsInTree(Literal(-7), Literal(-3))); +} + +TEST(StampingSimplifierTest, BasicSimplification) { + Model model; + auto* sat_solver = model.GetOrCreate(); + auto* clause_manager = model.GetOrCreate(); + auto* implication_graph = model.GetOrCreate(); + auto* simplifier = model.GetOrCreate(); + + // Lets add some clauses. + // Note that the order currently matter for what is left. + sat_solver->SetNumVariables(100); + implication_graph->AddImplication(Literal(+1), Literal(+2)); + implication_graph->AddImplication(Literal(+1), Literal(+3)); + implication_graph->AddImplication(Literal(+1), Literal(+4)); + implication_graph->AddImplication(Literal(+2), Literal(+5)); + implication_graph->AddImplication(Literal(+2), Literal(+6)); + implication_graph->AddImplication(Literal(+3), Literal(+7)); + implication_graph->AddImplication(Literal(+4), Literal(+6)); + + EXPECT_TRUE(implication_graph->DetectEquivalences(true)); + + // Lets add some clause that should be simplifiable + EXPECT_TRUE(clause_manager->AddClause(Literals({+1, +7, +8, +9}))); + EXPECT_TRUE(clause_manager->AddClause(Literals({+1, -6, +8, +9}))); + EXPECT_TRUE(clause_manager->AddClause(Literals({-3, -7, +8, +9}))); + EXPECT_TRUE(clause_manager->AddClause(Literals({-3, +7, +8, +9}))); + + // Lets test some implications. + EXPECT_TRUE(simplifier->DoOneRound(/*log_info=*/true)); + + // Results. I cover all 4 possibilities, 2 strenghtening for clause 0 and 2, + // one subsumption for clause 3 and nothing for clause 1. + const auto& all_clauses = clause_manager->AllClausesInCreationOrder(); + EXPECT_EQ(all_clauses.size(), 4); + EXPECT_EQ(all_clauses[0]->AsSpan(), Literals({+7, +8, +9})); + EXPECT_EQ(all_clauses[1]->AsSpan(), Literals({+1, -6, +8, +9})); + EXPECT_EQ(all_clauses[2]->AsSpan(), Literals({-3, +8, +9})); + EXPECT_EQ(all_clauses[3]->AsSpan(), Literals({})); +} + +TEST(BlockedClauseSimplifierTest, BasicSimplification) { + Model model; + auto* sat_solver = model.GetOrCreate(); + auto* clause_manager = model.GetOrCreate(); + auto* implication_graph = model.GetOrCreate(); + auto* simplifier = model.GetOrCreate(); + + // Lets add some clauses. + // Note that the order currently matter for what is left. + sat_solver->SetNumVariables(100); + implication_graph->AddImplication(Literal(+1), Literal(-7)); + implication_graph->AddImplication(Literal(+1), Literal(-8)); + implication_graph->AddImplication(Literal(+1), Literal(-9)); + + // Lets add some clause that should be blocked + EXPECT_TRUE(clause_manager->AddClause(Literals({-1, +7, -8, +9}))); + EXPECT_TRUE(clause_manager->AddClause(Literals({+1, +7, +8, +9}))); + + simplifier->DoOneRound(/*log_info=*/true); + + clause_manager->DeleteRemovedClauses(); + const auto& all_clauses = clause_manager->AllClausesInCreationOrder(); + EXPECT_EQ(all_clauses.size(), 0); +} + +TEST(BoundedVariableEliminationTest, BasicSimplification) { + Model model; + auto* sat_solver = model.GetOrCreate(); + auto* clause_manager = model.GetOrCreate(); + auto* simplifier = model.GetOrCreate(); + + // Lets add some clauses. + sat_solver->SetNumVariables(100); + EXPECT_TRUE(clause_manager->AddClause(Literals({+1, +2, +3, +7}))); + EXPECT_TRUE(clause_manager->AddClause(Literals({+3, +4, +5, +7}))); + EXPECT_TRUE(clause_manager->AddClause(Literals({-1, +4, +5, -7}))); + EXPECT_TRUE(clause_manager->AddClause(Literals({+3, -2, +5, -7}))); + EXPECT_TRUE(clause_manager->AddClause(Literals({+2, +4, -3, -7}))); + EXPECT_TRUE(clause_manager->AddClause(Literals({+2, +4, -5, -7}))); + EXPECT_TRUE(clause_manager->AddClause(Literals({+2, +3, -4, -7}))); + + simplifier->DoOneRound(/*log_info=*/true); + + // The problem is so simple that everyting should be simplified. + clause_manager->DeleteRemovedClauses(); + const auto& all_clauses = clause_manager->AllClausesInCreationOrder(); + EXPECT_EQ(all_clauses.size(), 0); +} + +} // namespace +} // namespace sat +} // namespace operations_research diff --git a/ortools/sat/scheduling_cuts_test.cc b/ortools/sat/scheduling_cuts_test.cc new file mode 100644 index 0000000000..23a8b92bac --- /dev/null +++ b/ortools/sat/scheduling_cuts_test.cc @@ -0,0 +1,576 @@ +// Copyright 2010-2024 Google LLC +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "ortools/sat/scheduling_cuts.h" + +#include + +#include +#include +#include +#include + +#include "absl/base/log_severity.h" +#include "absl/random/random.h" +#include "absl/types/span.h" +#include "gtest/gtest.h" +#include "ortools/base/gmock.h" +#include "ortools/base/strong_vector.h" +#include "ortools/sat/cp_model.h" +#include "ortools/sat/cp_model.pb.h" +#include "ortools/sat/cp_model_solver.h" +#include "ortools/sat/cuts.h" +#include "ortools/sat/integer.h" +#include "ortools/sat/intervals.h" +#include "ortools/sat/linear_constraint.h" +#include "ortools/sat/linear_constraint_manager.h" +#include "ortools/sat/model.h" +#include "ortools/sat/sat_base.h" +#include "ortools/util/strong_integers.h" + +namespace operations_research { +namespace sat { +namespace { + +using ::testing::EndsWith; +using ::testing::StartsWith; + +TEST(CumulativeEnergyCutGenerator, TestCutTimeTableGenerator) { + Model model; + + const IntegerVariable start1 = model.Add(NewIntegerVariable(0, 3)); + const IntegerVariable end1 = model.Add(NewIntegerVariable(7, 10)); + const IntegerVariable size1 = model.Add(NewIntegerVariable(7, 7)); + const IntervalVariable i1 = model.Add(NewInterval(start1, end1, size1)); + + const BooleanVariable b = model.Add(NewBooleanVariable()); + const IntegerVariable b_view = model.Add(NewIntegerVariable(0, 1)); + auto* integer_encoder = model.GetOrCreate(); + integer_encoder->AssociateToIntegerEqualValue(Literal(b, true), b_view, + IntegerValue(1)); + + const IntegerVariable start2 = model.Add(NewIntegerVariable(3, 6)); + const IntegerVariable end2 = model.Add(NewIntegerVariable(10, 13)); + const IntegerVariable size2 = model.Add(NewIntegerVariable(7, 7)); + const IntervalVariable i2 = + model.Add(NewOptionalInterval(start2, end2, size2, Literal(b, true))); + + const IntegerVariable demand1 = model.Add(NewIntegerVariable(5, 10)); + const IntegerVariable demand2 = model.Add(NewIntegerVariable(3, 10)); + const IntegerVariable capacity = model.Add(NewIntegerVariable(10, 10)); + SchedulingConstraintHelper* helper = + model.GetOrCreate()->GetOrCreateHelper({i1, i2}); + SchedulingDemandHelper* demands_helper = + new SchedulingDemandHelper({demand1, demand2}, helper, &model); + model.TakeOwnership(demands_helper); + CutGenerator cumulative = CreateCumulativeTimeTableCutGenerator( + helper, demands_helper, capacity, &model); + LinearConstraintManager* const manager = + model.GetOrCreate(); + const IntegerVariable num_vars = + model.GetOrCreate()->NumIntegerVariables(); + + auto& lp_values = *model.GetOrCreate(); + lp_values.resize(num_vars.value() * 2, 0.0); + lp_values[start1] = 3.0; // x0 + lp_values[end1] = 10.0; // x1 + lp_values[size1] = 7.0; // x2 + lp_values[b_view] = 1.0; // x3 + lp_values[start2] = 6.0; // x4 + lp_values[end2] = 13.0; // x5 + lp_values[size2] = 7.0; // x6 + lp_values[demand1] = 8.0; // x7 + lp_values[demand2] = 7.0; // x8 + lp_values[capacity] = 10.0; // x9 + + cumulative.generate_cuts(manager); + ASSERT_EQ(1, manager->num_cuts()); + + // 3*X3 1*X7 -1*X9 <= 0 -> Normalized to 3*X3 1*X7 <= 10 + EXPECT_THAT(manager->AllConstraints().front().constraint.DebugString(), + EndsWith("3*X3 1*X7 <= 10")); +} + +TEST(CumulativeEnergyCutGenerator, SameDemand) { + Model model; + + const IntegerVariable start1 = model.Add(NewIntegerVariable(0, 3)); + const IntegerVariable end1 = model.Add(NewIntegerVariable(7, 10)); + const IntegerVariable size1 = model.Add(NewIntegerVariable(7, 7)); + const IntervalVariable i1 = model.Add(NewInterval(start1, end1, size1)); + + const IntegerVariable start2 = model.Add(NewIntegerVariable(3, 6)); + const IntegerVariable end2 = model.Add(NewIntegerVariable(10, 13)); + const IntegerVariable size2 = model.Add(NewIntegerVariable(7, 7)); + const IntervalVariable i2 = model.Add(NewInterval(start2, end2, size2)); + + const IntegerVariable start3 = model.Add(NewIntegerVariable(4, 8)); + const IntegerVariable end3 = model.Add(NewIntegerVariable(11, 15)); + const IntegerVariable size3 = model.Add(NewIntegerVariable(7, 7)); + const IntervalVariable i3 = model.Add(NewInterval(start3, end3, size3)); + + const IntegerVariable demand = model.Add(NewIntegerVariable(5, 10)); + const IntegerVariable demand2 = model.Add(NewIntegerVariable(5, 10)); + const IntegerVariable capacity = model.Add(NewIntegerVariable(10, 10)); + + LinearExpression e1; + e1.vars.push_back(demand); + e1.coeffs.push_back(IntegerValue(7)); + LinearExpression e2; + e2.vars.push_back(demand2); + e2.coeffs.push_back(IntegerValue(7)); + + SchedulingConstraintHelper* helper = + model.GetOrCreate()->GetOrCreateHelper({i1, i2, i3}); + SchedulingDemandHelper* demands_helper = + new SchedulingDemandHelper({demand, demand, demand2}, helper, &model); + model.TakeOwnership(demands_helper); + + CutGenerator cumulative = CreateCumulativeEnergyCutGenerator( + helper, demands_helper, capacity, std::optional(), + &model); + LinearConstraintManager* const manager = + model.GetOrCreate(); + const IntegerVariable num_vars = + model.GetOrCreate()->NumIntegerVariables(); + + auto& lp_values = *model.GetOrCreate(); + lp_values.resize(num_vars.value() * 2, 0.0); + lp_values[start1] = 3.0; // x0 + lp_values[end1] = 10.0; // x1 + lp_values[size1] = 7.0; // x2 + lp_values[start2] = 6.0; // x3 + lp_values[end2] = 13.0; // x4 + lp_values[size2] = 7.0; // x5 + lp_values[start3] = 6.0; // x6 + lp_values[end3] = 13.0; // x7 + lp_values[size3] = 7.0; // x8 + lp_values[demand] = 8.0; // x9 + lp_values[demand2] = 8.0; // x10 + lp_values[capacity] = 10.0; // x11 + + cumulative.generate_cuts(manager); + ASSERT_EQ(5, manager->num_cuts()); + + // CumulativeEnergy cut. + EXPECT_THAT( + manager->AllConstraints()[LinearConstraintManager::ConstraintIndex(0)] + .constraint.DebugString(), + EndsWith("1*X9 <= 5")); + EXPECT_THAT( + manager->AllConstraints()[LinearConstraintManager::ConstraintIndex(1)] + .constraint.DebugString(), + EndsWith("1*X9 1*X10 <= 10")); + EXPECT_THAT( + manager->AllConstraints()[LinearConstraintManager::ConstraintIndex(2)] + .constraint.DebugString(), + EndsWith("3*X9 2*X10 <= 30")); + EXPECT_THAT( + manager->AllConstraints()[LinearConstraintManager::ConstraintIndex(3)] + .constraint.DebugString(), + EndsWith("5*X9 2*X10 <= 40")); + EXPECT_THAT( + manager->AllConstraints()[LinearConstraintManager::ConstraintIndex(4)] + .constraint.DebugString(), + EndsWith("2*X9 3*X10 <= 30")); +} + +TEST(CumulativeEnergyCutGenerator, SameDemandTimeTableGenerator) { + Model model; + + const IntegerVariable start1 = model.Add(NewIntegerVariable(0, 3)); + const IntegerVariable end1 = model.Add(NewIntegerVariable(7, 10)); + const IntegerVariable size1 = model.Add(NewIntegerVariable(7, 7)); + const IntervalVariable i1 = model.Add(NewInterval(start1, end1, size1)); + + const IntegerVariable start2 = model.Add(NewIntegerVariable(3, 6)); + const IntegerVariable end2 = model.Add(NewIntegerVariable(10, 13)); + const IntegerVariable size2 = model.Add(NewIntegerVariable(7, 7)); + const IntervalVariable i2 = model.Add(NewInterval(start2, end2, size2)); + + const IntegerVariable start3 = model.Add(NewIntegerVariable(4, 8)); + const IntegerVariable end3 = model.Add(NewIntegerVariable(11, 15)); + const IntegerVariable size3 = model.Add(NewIntegerVariable(7, 7)); + const IntervalVariable i3 = model.Add(NewInterval(start3, end3, size3)); + + const IntegerVariable demand = model.Add(NewIntegerVariable(5, 10)); + const IntegerVariable demand2 = model.Add(NewIntegerVariable(5, 10)); + const IntegerVariable capacity = model.Add(NewIntegerVariable(10, 10)); + + SchedulingConstraintHelper* helper = + model.GetOrCreate()->GetOrCreateHelper({i1, i2, i3}); + SchedulingDemandHelper* demands_helper = + new SchedulingDemandHelper({demand, demand, demand2}, helper, &model); + model.TakeOwnership(demands_helper); + CutGenerator cumulative = CreateCumulativeTimeTableCutGenerator( + helper, demands_helper, capacity, &model); + LinearConstraintManager* const manager = + model.GetOrCreate(); + const IntegerVariable num_vars = + model.GetOrCreate()->NumIntegerVariables(); + + auto& lp_values = *model.GetOrCreate(); + lp_values.resize(num_vars.value() * 2, 0.0); + lp_values[start1] = 3.0; // x0 + lp_values[end1] = 10.0; // x1 + lp_values[size1] = 7.0; // x2 + lp_values[start2] = 6.0; // x3 + lp_values[end2] = 13.0; // x4 + lp_values[size2] = 7.0; // x5 + lp_values[start3] = 6.0; // x6 + lp_values[end3] = 13.0; // x7 + lp_values[size3] = 7.0; // x8 + lp_values[demand] = 8.0; // x9 + lp_values[demand2] = 8.0; // x10 + lp_values[capacity] = 10.0; // x11 + + cumulative.generate_cuts(manager); + ASSERT_EQ(2, manager->num_cuts()); + + // 1*X9 1*X9 <= X11 -> Normalized to 1*X9 <= 5 + EXPECT_THAT(manager->AllConstraints().front().constraint.DebugString(), + EndsWith("1*X9 <= 5")); + // 1*X9 1*X10 <= X11 -> Normalized to 1*X9 1*X10 <= 10 + EXPECT_THAT(manager->AllConstraints().back().constraint.DebugString(), + EndsWith("1*X9 1*X10 <= 10")); +} + +TEST(CumulativeEnergyCutGenerator, DetectedPrecedence) { + Model model; + auto* intervals_repository = model.GetOrCreate(); + + const IntegerValue one(1); + const IntegerVariable start1 = model.Add(NewIntegerVariable(0, 3)); + const IntegerValue size1(3); + const IntervalVariable i1 = intervals_repository->CreateInterval( + start1, AffineExpression(start1, one, size1), AffineExpression(size1), + kNoLiteralIndex, /*add_linear_relation=*/false); + + const IntegerVariable start2 = model.Add(NewIntegerVariable(1, 5)); + const IntegerValue size2(4); + const IntervalVariable i2 = intervals_repository->CreateInterval( + start2, AffineExpression(start2, one, size2), AffineExpression(size2), + kNoLiteralIndex, /*add_linear_relation=*/false); + CutGenerator disjunctive = CreateNoOverlapPrecedenceCutGenerator( + intervals_repository->GetOrCreateHelper({ + i1, + i2, + }), + &model); + LinearConstraintManager* const manager = + model.GetOrCreate(); + const IntegerVariable num_vars = + model.GetOrCreate()->NumIntegerVariables(); + + auto& lp_values = *model.GetOrCreate(); + lp_values.resize(num_vars.value() * 2, 0.0); + lp_values[start1] = 0.0; + lp_values[NegationOf(start1)] = 0.0; + lp_values[start2] = 2.0; + lp_values[NegationOf(start2)] = -2.0; + + disjunctive.generate_cuts(manager); + ASSERT_EQ(1, manager->num_cuts()); + + EXPECT_THAT(manager->AllConstraints().front().constraint.DebugString(), + EndsWith("1*X0 -1*X1 <= -3")); +} + +TEST(CumulativeEnergyCutGenerator, DetectedPrecedenceRev) { + Model model; + auto* intervals_repository = model.GetOrCreate(); + + const IntegerValue one(1); + const IntegerVariable start1 = model.Add(NewIntegerVariable(0, 3)); + const IntegerValue size1(3); + const IntervalVariable i1 = intervals_repository->CreateInterval( + start1, AffineExpression(start1, one, size1), AffineExpression(size1), + kNoLiteralIndex, /*add_linear_relation=*/false); + + const IntegerVariable start2 = model.Add(NewIntegerVariable(1, 5)); + const IntegerValue size2(4); + const IntervalVariable i2 = intervals_repository->CreateInterval( + start2, AffineExpression(start2, one, size2), AffineExpression(size2), + kNoLiteralIndex, /*add_linear_relation=*/false); + + CutGenerator disjunctive = CreateNoOverlapPrecedenceCutGenerator( + intervals_repository->GetOrCreateHelper({ + i2, + i1, + }), + &model); + LinearConstraintManager* const manager = + model.GetOrCreate(); + const IntegerVariable num_vars = + model.GetOrCreate()->NumIntegerVariables(); + + auto& lp_values = *model.GetOrCreate(); + lp_values.resize(num_vars.value() * 2, 0.0); + lp_values[start1] = 0.0; + lp_values[NegationOf(start1)] = 0.0; + lp_values[start2] = 2.0; + lp_values[NegationOf(start2)] = -2.0; + + disjunctive.generate_cuts(manager); + ASSERT_EQ(1, manager->num_cuts()); + + EXPECT_THAT(manager->AllConstraints().front().constraint.DebugString(), + EndsWith("1*X0 -1*X1 <= -3")); +} + +TEST(CumulativeEnergyCutGenerator, DisjunctionOnStart) { + Model model; + auto* intervals_repository = model.GetOrCreate(); + + const IntegerValue one(1); + const IntegerVariable start1 = model.Add(NewIntegerVariable(0, 5)); + const IntegerValue size1(3); + const IntervalVariable i1 = intervals_repository->CreateInterval( + start1, AffineExpression(start1, one, size1), AffineExpression(size1), + kNoLiteralIndex, /*add_linear_relation=*/false); + + const IntegerVariable start2 = model.Add(NewIntegerVariable(1, 5)); + const IntegerValue size2(4); + const IntervalVariable i2 = intervals_repository->CreateInterval( + start2, AffineExpression(start2, one, size2), AffineExpression(size2), + kNoLiteralIndex, /*add_linear_relation=*/false); + + CutGenerator disjunctive = CreateNoOverlapPrecedenceCutGenerator( + intervals_repository->GetOrCreateHelper({ + i2, + i1, + }), + &model); + LinearConstraintManager* const manager = + model.GetOrCreate(); + const IntegerVariable num_vars = + model.GetOrCreate()->NumIntegerVariables(); + + auto& lp_values = *model.GetOrCreate(); + lp_values.resize(num_vars.value() * 2, 0.0); + lp_values[start1] = 0.0; + lp_values[NegationOf(start1)] = 0.0; + lp_values[start2] = 2.0; + lp_values[NegationOf(start2)] = -2.0; + + disjunctive.generate_cuts(manager); + ASSERT_EQ(1, manager->num_cuts()); + + EXPECT_THAT(manager->AllConstraints().front().constraint.DebugString(), + StartsWith("15 <= 2*X0 5*X1")); +} + +TEST(ComputeMinSumOfEndMinsTest, CombinationOf3) { + Model model; + auto* intervals_repository = model.GetOrCreate(); + + IntegerValue one(1); + IntegerValue two(2); + + const IntegerVariable start1 = model.Add(NewIntegerVariable(0, 10)); + const IntegerValue size1(3); + const IntervalVariable i1 = intervals_repository->CreateInterval( + start1, AffineExpression(start1, one, size1), size1, kNoLiteralIndex, + /*add_linear_relation=*/false); + + const IntegerVariable start2 = model.Add(NewIntegerVariable(0, 10)); + const IntegerValue size2(4); + const IntervalVariable i2 = intervals_repository->CreateInterval( + start2, AffineExpression(start2, one, size2), size2, kNoLiteralIndex, + /*add_linear_relation=*/false); + + const IntegerVariable start3 = model.Add(NewIntegerVariable(0, 10)); + const IntegerValue size3(5); + const IntervalVariable i3 = intervals_repository->CreateInterval( + start3, AffineExpression(start3, one, size3), size3, kNoLiteralIndex, + /*add_linear_relation=*/false); + + SchedulingConstraintHelper* helper = + model.GetOrCreate()->GetOrCreateHelper({i1, i2, i3}); + CtEvent e1(0, helper); + e1.y_size_min = two; + CtEvent e2(1, helper); + e2.y_size_min = one; + CtEvent e3(2, helper); + e3.y_size_min = one; + std::vector events = {{0, e1}, {1, e2}, {1, e3}}; + + IntegerValue min_sum_of_end_mins(0); + IntegerValue min_sum_of_weighted_end_mins(0); + ASSERT_TRUE(ComputeMinSumOfWeightedEndMins( + events, two, min_sum_of_end_mins, min_sum_of_weighted_end_mins, + kMinIntegerValue, kMinIntegerValue)); + EXPECT_EQ(min_sum_of_end_mins, 17); + EXPECT_EQ(min_sum_of_weighted_end_mins, 21); +} + +TEST(ComputeMinSumOfEndMinsTest, CombinationOf3ConstraintStart) { + Model model; + auto* intervals_repository = model.GetOrCreate(); + + IntegerValue one(1); + IntegerValue two(2); + + const IntegerVariable start1 = model.Add(NewIntegerVariable(0, 3)); + const IntegerValue size1(3); + const IntervalVariable i1 = intervals_repository->CreateInterval( + start1, AffineExpression(start1, one, size1), size1, kNoLiteralIndex, + /*add_linear_relation=*/false); + + const IntegerVariable start2 = model.Add(NewIntegerVariable(0, 10)); + const IntegerValue size2(4); + const IntervalVariable i2 = intervals_repository->CreateInterval( + start2, AffineExpression(start2, one, size2), size2, kNoLiteralIndex, + /*add_linear_relation=*/false); + + const IntegerVariable start3 = model.Add(NewIntegerVariable(0, 10)); + const IntegerValue size3(5); + const IntervalVariable i3 = intervals_repository->CreateInterval( + start3, AffineExpression(start3, one, size3), size3, kNoLiteralIndex, + /*add_linear_relation=*/false); + + SchedulingConstraintHelper* helper = + model.GetOrCreate()->GetOrCreateHelper({i1, i2, i3}); + CtEvent e1(0, helper); + e1.y_size_min = two; + CtEvent e2(1, helper); + e2.y_size_min = one; + CtEvent e3(2, helper); + e3.y_size_min = one; + std::vector events = {{0, e1}, {1, e2}, {2, e3}}; + + IntegerValue min_sum_of_end_mins(0); + IntegerValue min_sum_of_weighted_end_mins(0); + ASSERT_TRUE(ComputeMinSumOfWeightedEndMins( + events, two, min_sum_of_end_mins, min_sum_of_weighted_end_mins, + kMinIntegerValue, kMinIntegerValue)); + EXPECT_EQ(min_sum_of_end_mins, 18); + EXPECT_EQ(min_sum_of_weighted_end_mins, 21); +} + +TEST(ComputeMinSumOfEndMinsTest, Infeasible) { + Model model; + auto* intervals_repository = model.GetOrCreate(); + + IntegerValue one(1); + IntegerValue two(2); + + const IntegerVariable start1 = model.Add(NewIntegerVariable(1, 3)); + const IntegerValue size1(3); + const IntervalVariable i1 = intervals_repository->CreateInterval( + start1, AffineExpression(start1, one, size1), size1, kNoLiteralIndex, + /*add_linear_relation=*/false); + + const IntegerVariable start2 = model.Add(NewIntegerVariable(0, 3)); + const IntegerValue size2(4); + const IntervalVariable i2 = intervals_repository->CreateInterval( + start2, AffineExpression(start2, one, size2), size2, kNoLiteralIndex, + /*add_linear_relation=*/false); + + const IntegerVariable start3 = model.Add(NewIntegerVariable(0, 3)); + const IntegerValue size3(5); + const IntervalVariable i3 = intervals_repository->CreateInterval( + start3, AffineExpression(start3, one, size3), size3, kNoLiteralIndex, + /*add_linear_relation=*/false); + + SchedulingConstraintHelper* helper = + model.GetOrCreate()->GetOrCreateHelper({i1, i2, i3}); + CtEvent e1(0, helper); + e1.y_size_min = two; + CtEvent e2(1, helper); + e2.y_size_min = one; + CtEvent e3(2, helper); + e3.y_size_min = one; + std::vector events = {{0, e1}, {1, e2}, {2, e3}}; + + IntegerValue min_sum_of_end_mins(0); + IntegerValue min_sum_of_weighted_end_mins(0); + ASSERT_FALSE(ComputeMinSumOfWeightedEndMins( + events, two, min_sum_of_end_mins, min_sum_of_weighted_end_mins, + kMinIntegerValue, kMinIntegerValue)); +} + +int64_t ExactMakespan(const std::vector& sizes, std::vector& demands, + int capacity) { + const int64_t kHorizon = 1000; + CpModelBuilder builder; + LinearExpr obj; + CumulativeConstraint cumul = builder.AddCumulative(capacity); + for (int i = 0; i < sizes.size(); ++i) { + IntVar s = builder.NewIntVar({0, kHorizon}); + IntervalVar v = builder.NewFixedSizeIntervalVar(s, sizes[i]); + obj += s + sizes[i]; + cumul.AddDemand(v, demands[i]); + } + builder.Minimize(obj); + const CpSolverResponse response = + SolveWithParameters(builder.Build(), "num_search_workers:8"); + EXPECT_EQ(response.status(), CpSolverStatus::OPTIMAL); + return static_cast(response.objective_value()); +} + +int64_t ExactMakespanBruteForce(absl::Span sizes, + std::vector& demands, int capacity) { + const int64_t kHorizon = 1000; + Model model; + auto* intervals_repository = model.GetOrCreate(); + IntegerValue one(1); + + std::vector intervals; + for (int i = 0; i < sizes.size(); ++i) { + const IntegerVariable start = model.Add(NewIntegerVariable(0, kHorizon)); + const IntegerValue size(sizes[i]); + const IntervalVariable interval = intervals_repository->CreateInterval( + start, AffineExpression(start, one, size), size, kNoLiteralIndex, + /*add_linear_relation=*/false); + intervals.push_back(interval); + } + + SchedulingConstraintHelper* helper = + model.GetOrCreate()->GetOrCreateHelper(intervals); + std::vector events; + for (int i = 0; i < demands.size(); ++i) { + CtEvent e(i, helper); + e.y_size_min = demands[i]; + events.emplace_back(i, e); + } + + IntegerValue min_sum_of_end_mins(0); + IntegerValue min_sum_of_weighted_end_mins(0); + EXPECT_TRUE(ComputeMinSumOfWeightedEndMins( + events, IntegerValue(capacity), min_sum_of_end_mins, + min_sum_of_weighted_end_mins, kMinIntegerValue, kMinIntegerValue)); + return min_sum_of_end_mins.value(); +} + +TEST(ComputeMinSumOfEndMinsTest, RandomCases) { + absl::BitGen random; + const int kNumTests = DEBUG_MODE ? 100 : 1000; + const int kNumTasks = 7; + for (int loop = 0; loop < kNumTests; ++loop) { + const int capacity = absl::Uniform(random, 10, 30); + std::vector sizes; + std::vector demands; + for (int t = 0; t < kNumTasks; ++t) { + sizes.push_back(absl::Uniform(random, 2, 15)); + demands.push_back(absl::Uniform(random, 1, capacity)); + } + + EXPECT_EQ(ExactMakespan(sizes, demands, capacity), + ExactMakespanBruteForce(sizes, demands, capacity)); + } +} + +} // namespace +} // namespace sat +} // namespace operations_research diff --git a/ortools/sat/subsolver_test.cc b/ortools/sat/subsolver_test.cc new file mode 100644 index 0000000000..06f549cfe7 --- /dev/null +++ b/ortools/sat/subsolver_test.cc @@ -0,0 +1,105 @@ +// Copyright 2010-2024 Google LLC +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "ortools/sat/subsolver.h" + +#include +#include +#include +#include +#include + +#include "absl/synchronization/mutex.h" +#include "gtest/gtest.h" + +namespace operations_research { +namespace sat { +namespace { + +// Just a trivial example showing how to use the DeterministicLoop() and +// NonDeterministicLoop() functions. +template +void TestLoopFunction() { + struct GlobalState { + int num_task = 0; + const int limit = 100; + + absl::Mutex mutex; + std::vector updates; + + // This one will be always the same after each batch of task. + int64_t max_update_value = 0; + }; + + class TestSubSolver : public SubSolver { + public: + explicit TestSubSolver(GlobalState* state) + : SubSolver("test", FULL_PROBLEM), state_(state) {} + + bool TaskIsAvailable() override { + // Note that the lock is only needed for the non-deterministic test. + absl::MutexLock mutex_lock(&state_->mutex); + return state_->num_task < state_->limit; + } + + std::function GenerateTask(int64_t id) override { + { + // Note that the lock is only needed for the non-deterministic test. + absl::MutexLock mutex_lock(&state_->mutex); + state_->num_task++; + } + return [this, id] { + absl::MutexLock mutex_lock(&state_->mutex); + state_->updates.push_back(id); + }; + } + + void Synchronize() override { + // Note that the lock is only needed for the non-deterministic test. + absl::MutexLock mutex_lock(&state_->mutex); + for (const int64_t i : state_->updates) { + state_->max_update_value = std::max(state_->max_update_value, i); + } + state_->updates.clear(); + } + + private: + GlobalState* state_; + }; + + GlobalState state; + + // The number of subsolver can be independent of the number of threads. Here + // there is actually no need to have 3 of them except for testing the feature. + std::vector> subsolvers; + for (int i = 0; i < 3; ++i) { + subsolvers.push_back(std::make_unique(&state)); + } + + const int num_threads = 4; + if (deterministic) { + const int batch_size = 20; + DeterministicLoop(subsolvers, num_threads, batch_size); + } else { + NonDeterministicLoop(subsolvers, num_threads); + } + EXPECT_EQ(state.max_update_value, state.limit - 1); +} + +TEST(DeterministicLoop, BasicTest) { TestLoopFunction(); } + +TEST(NonDeterministicLoop, BasicTest) { TestLoopFunction(); } + +} // namespace +} // namespace sat +} // namespace operations_research diff --git a/ortools/sat/symmetry_test.cc b/ortools/sat/symmetry_test.cc new file mode 100644 index 0000000000..7bfe774df0 --- /dev/null +++ b/ortools/sat/symmetry_test.cc @@ -0,0 +1,151 @@ +// Copyright 2010-2024 Google LLC +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "ortools/sat/symmetry.h" + +#include +#include +#include +#include +#include + +#include "absl/types/span.h" +#include "gtest/gtest.h" +#include "ortools/algorithms/sparse_permutation.h" +#include "ortools/base/gmock.h" +#include "ortools/sat/sat_base.h" + +namespace operations_research { +namespace sat { +namespace { + +using ::testing::ElementsAre; + +TEST(SymmetryPropagatorTest, Permute) { + const int num_variables = 6; + const int num_literals = 2 * num_variables; + std::unique_ptr perm(new SparsePermutation(num_literals)); + perm->AddToCurrentCycle(Literal(+3).Index().value()); + perm->AddToCurrentCycle(Literal(+2).Index().value()); + perm->AddToCurrentCycle(Literal(-4).Index().value()); + perm->CloseCurrentCycle(); + // Note that the permutation 'p' must be compatible with the negation. + // That is negation(p(l)) = p(negation(l)). This is actually not required + // for this test though. + perm->AddToCurrentCycle(Literal(-3).Index().value()); + perm->AddToCurrentCycle(Literal(-2).Index().value()); + perm->AddToCurrentCycle(Literal(+4).Index().value()); + perm->CloseCurrentCycle(); + + Trail trail; + SymmetryPropagator propagator; + propagator.AddSymmetry(std::move(perm)); + trail.RegisterPropagator(&propagator); + + std::vector literals = Literals({+1, +2, -2, +3}); + std::vector output; + propagator.Permute(0, literals, &output); + EXPECT_THAT(output, + ElementsAre(Literal(+1), Literal(-4), Literal(+4), Literal(+2))); +} + +TEST(SymmetryPropagatorTest, BasicTest) { + const int num_variables = 6; + const int num_literals = 2 * num_variables; + std::unique_ptr perm(new SparsePermutation(num_literals)); + perm->AddToCurrentCycle(Literal(+3).Index().value()); + perm->AddToCurrentCycle(Literal(+2).Index().value()); + perm->AddToCurrentCycle(Literal(-4).Index().value()); + perm->CloseCurrentCycle(); + // Note that the permutation 'p' must be compatible with the negation. + // That is negation(p(l)) = p(negation(l)). + perm->AddToCurrentCycle(Literal(-3).Index().value()); + perm->AddToCurrentCycle(Literal(-2).Index().value()); + perm->AddToCurrentCycle(Literal(+4).Index().value()); + perm->CloseCurrentCycle(); + perm->AddToCurrentCycle(Literal(-5).Index().value()); + perm->AddToCurrentCycle(Literal(+5).Index().value()); + perm->CloseCurrentCycle(); + + Trail trail; + trail.Resize(num_variables); + SymmetryPropagator propagator; + propagator.AddSymmetry(std::move(perm)); + trail.RegisterPropagator(&propagator); + + // We need a mock propagator to inject a reason. + struct MockPropagator : SatPropagator { + MockPropagator() : SatPropagator("MockPropagator") {} + bool Propagate(Trail* trail) final { return true; } + absl::Span Reason(const Trail& /*trail*/, + int /*trail_index*/, + int64_t /*conflict_id*/) const final { + return reason; + } + std::vector reason; + }; + MockPropagator mock_propagator; + trail.RegisterPropagator(&mock_propagator); + + // With such a trail, nothing should propagate because the first non-symmetric + // literal +3 is a decision. + trail.Enqueue(Literal(+3), AssignmentType::kSearchDecision); + trail.Enqueue(Literal(-5), mock_propagator.PropagatorId()); + while (!propagator.PropagationIsDone(trail)) { + EXPECT_TRUE(propagator.Propagate(&trail)); + } + EXPECT_EQ(trail.Index(), 2); + + // Now we take the decision +2 (which is the image of +3). + trail.Enqueue(Literal(+2), AssignmentType::kUnitReason); + + // We need to initialize the reason for -5, because it will be needed during + // the conflict creation that the Propagate() below will trigger. + mock_propagator.reason = Literals({-3}); + + // Because -5 is now the first non-symmetric literal, a conflict is detected + // since +5 can then be propagated. + EXPECT_FALSE(propagator.PropagationIsDone(trail)); + EXPECT_FALSE(propagator.Propagate(&trail)); + + // Let assume that the reason for -5 is the assignment +3 (which make sense + // since it was propagated). The expected conflict is as stated below because + // if -5 and +2 are true, by summetry since we had +3 => -5 we know that +2 => + // 5. + // + // Note: by convention all the literals of a reason or a conflict are false. + EXPECT_THAT(trail.FailingClause(), ElementsAre(Literal(-2), Literal(+5))); + + // Let backtrack to the trail to +3. + trail.Untrail(trail.Index() - 2); + propagator.Untrail(trail, trail.Index()); + + // Let now assume that +3 => +2, by symmetry we can also propagate -4! + while (!propagator.PropagationIsDone(trail)) { + EXPECT_TRUE(propagator.Propagate(&trail)); + } + EXPECT_EQ(trail.Index(), 1); + trail.Enqueue(Literal(+2), mock_propagator.PropagatorId()); + EXPECT_FALSE(propagator.PropagationIsDone(trail)); + EXPECT_TRUE(propagator.Propagate(&trail)); + EXPECT_EQ(trail.Index(), 3); + EXPECT_EQ(trail[2], Literal(-4)); + + // Once again, if the reason for +2 was the assignment +3, we can compute + // the reason for the assignment -4 (it is just the symmetric of the other). + EXPECT_THAT(trail.Reason(Literal(-4).Variable()), ElementsAre(Literal(-2))); +} + +} // namespace +} // namespace sat +} // namespace operations_research diff --git a/ortools/sat/theta_tree_test.cc b/ortools/sat/theta_tree_test.cc new file mode 100644 index 0000000000..3e3ebb8964 --- /dev/null +++ b/ortools/sat/theta_tree_test.cc @@ -0,0 +1,291 @@ +// Copyright 2010-2024 Google LLC +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "ortools/sat/theta_tree.h" + +#include +#include +#include +#include + +#include "benchmark/benchmark.h" +#include "gtest/gtest.h" +#include "ortools/sat/integer.h" +#include "ortools/util/random_engine.h" +#include "ortools/util/strong_integers.h" + +namespace operations_research { +namespace sat { +namespace { + +template +class ThetaTreeTest : public ::testing::Test {}; + +using IntegerTypes = ::testing::Types; +TYPED_TEST_SUITE(ThetaTreeTest, IntegerTypes); + +TYPED_TEST(ThetaTreeTest, EnvelopeOfEmptySet) { + ThetaLambdaTree tree; + tree.Reset(0); + EXPECT_EQ(IntegerTypeMinimumValue(), tree.GetEnvelope()); +} + +template +std::vector IntegerTypeVector(std::vector arg) { + return std::vector(arg.begin(), arg.end()); +} + +TYPED_TEST(ThetaTreeTest, Envelope) { + ThetaLambdaTree tree; + std::vector envelope = + IntegerTypeVector({-10, -7, -6, -4, -2}); + std::vector energy = IntegerTypeVector({2, 1, 3, 2, 2}); + tree.Reset(5); + + for (int i = 0; i < 5; i++) { + tree.AddOrUpdateEvent(i, envelope[i], energy[i], energy[i]); + } + EXPECT_EQ(1, tree.GetEnvelope()); // (-7) + (1+3+2+2) or (-6) + (3+2+2) + EXPECT_EQ(2, tree.GetMaxEventWithEnvelopeGreaterThan(TypeParam(0))); + EXPECT_EQ(4, tree.GetMaxEventWithEnvelopeGreaterThan(TypeParam(-1))); + EXPECT_EQ(0, tree.GetEnvelopeOf(0)); + EXPECT_EQ(1, tree.GetEnvelopeOf(1)); + EXPECT_EQ(1, tree.GetEnvelopeOf(2)); + EXPECT_EQ(0, tree.GetEnvelopeOf(3)); + EXPECT_EQ(0, tree.GetEnvelopeOf(4)); +} + +TYPED_TEST(ThetaTreeTest, EnvelopeOpt) { + ThetaLambdaTree tree; + std::vector envelope = + IntegerTypeVector({-10, -7, -6, -4, -2}); + std::vector energy = IntegerTypeVector({2, 1, 3, 3, 2}); + tree.Reset(5); + + int event, optional_event; + TypeParam energy_max; + + tree.AddOrUpdateEvent(0, envelope[0], energy[0], energy[0]); + tree.AddOrUpdateEvent(1, envelope[1], energy[1], energy[1]); + tree.AddOrUpdateEvent(3, envelope[3], TypeParam(0), energy[3]); + tree.AddOrUpdateEvent(4, envelope[4], energy[4], energy[4]); + EXPECT_EQ(1, tree.GetOptionalEnvelope()); + + tree.GetEventsWithOptionalEnvelopeGreaterThan(TypeParam(0), &event, + &optional_event, &energy_max); + EXPECT_EQ(3, event); + EXPECT_EQ(3, optional_event); + EXPECT_EQ(2, energy_max); + + tree.RemoveEvent(4); + tree.AddOrUpdateEvent(2, envelope[2], energy[2], energy[2]); + EXPECT_EQ(0, tree.GetOptionalEnvelope()); + tree.GetEventsWithOptionalEnvelopeGreaterThan(TypeParam(-1), &event, + &optional_event, &energy_max); + EXPECT_EQ(2, event); + EXPECT_EQ(3, optional_event); + EXPECT_EQ(2, energy_max); + EXPECT_EQ(-4, tree.GetEnvelopeOf(0)); + EXPECT_EQ(-3, tree.GetEnvelopeOf(1)); + EXPECT_EQ(-3, tree.GetEnvelopeOf(2)); +} + +TYPED_TEST(ThetaTreeTest, EnvelopeOptWithAddOptional) { + ThetaLambdaTree tree; + std::vector envelope = + IntegerTypeVector({-10, -7, -6, -4, -2}); + std::vector energy = IntegerTypeVector({2, 1, 3, 3, 2}); + tree.Reset(5); + + int event, optional_event; + TypeParam energy_max; + + tree.AddOrUpdateEvent(0, envelope[0], energy[0], energy[0]); + tree.AddOrUpdateEvent(1, envelope[1], energy[1], energy[1]); + tree.AddOrUpdateOptionalEvent(3, envelope[3], energy[3]); + tree.AddOrUpdateEvent(4, envelope[4], energy[4], energy[4]); + EXPECT_EQ(1, tree.GetOptionalEnvelope()); + + tree.GetEventsWithOptionalEnvelopeGreaterThan(TypeParam(0), &event, + &optional_event, &energy_max); + EXPECT_EQ(3, event); + EXPECT_EQ(3, optional_event); + EXPECT_EQ(2, energy_max); + + tree.RemoveEvent(4); + tree.AddOrUpdateEvent(2, envelope[2], energy[2], energy[2]); + EXPECT_EQ(0, tree.GetOptionalEnvelope()); + tree.GetEventsWithOptionalEnvelopeGreaterThan(TypeParam(-1), &event, + &optional_event, &energy_max); + EXPECT_EQ(2, event); + EXPECT_EQ(3, optional_event); + EXPECT_EQ(2, energy_max); + EXPECT_EQ(-4, tree.GetEnvelopeOf(0)); + EXPECT_EQ(-3, tree.GetEnvelopeOf(1)); + EXPECT_EQ(-3, tree.GetEnvelopeOf(2)); +} + +TYPED_TEST(ThetaTreeTest, AddingAndGettingOptionalEvents) { + ThetaLambdaTree tree; + std::vector envelope = + IntegerTypeVector({0, 3, 4, 6, 8}); + std::vector energy = IntegerTypeVector({2, 1, 3, 3, 2}); + tree.Reset(5); + + tree.AddOrUpdateEvent(0, envelope[0], energy[0], energy[0]); + tree.AddOrUpdateEvent(1, envelope[1], energy[1], energy[1]); + EXPECT_EQ(4, tree.GetEnvelope()); + + // Even with 0 energy, standard update takes task 3's envelope into account. + tree.AddOrUpdateEvent(3, envelope[3], TypeParam(0), energy[3]); + EXPECT_EQ(6, tree.GetEnvelope()); + EXPECT_EQ(9, tree.GetOptionalEnvelope()); + tree.RemoveEvent(3); + + // Changing task 3 to optional makes it disappear from GetEnvelope(). + tree.AddOrUpdateOptionalEvent(3, envelope[3], energy[3]); + EXPECT_EQ(4, tree.GetEnvelope()); // Same as before adding task 3. + EXPECT_EQ(9, tree.GetOptionalEnvelope()); + + // Changing task 3 to optional changes its optional values. + tree.AddOrUpdateEvent(3, envelope[3], TypeParam(1), TypeParam(9)); + tree.AddOrUpdateOptionalEvent(3, envelope[3], energy[3]); + EXPECT_EQ(4, tree.GetEnvelope()); + EXPECT_EQ(9, tree.GetOptionalEnvelope()); +} + +TYPED_TEST(ThetaTreeTest, RemoveAndDelayedAddOrUpdateEventTest) { + ThetaLambdaTree tree; + // The tree encoding is tricky, check that RecomputeTreeForDelayedOperations() + // works for all values from a power of two until the next. + for (int num_events = 4; num_events < 8; ++num_events) { + tree.Reset(num_events); + std::vector envelope; + std::vector energy; + // Event start envelope = event, energy min = 2, energy max = 3 + for (int event = 0; event < num_events; ++event) { + envelope.push_back(TypeParam{event}); + energy.push_back(TypeParam{2}); + } + EXPECT_EQ(tree.GetEnvelope(), IntegerTypeMinimumValue()); + EXPECT_EQ(tree.GetOptionalEnvelope(), IntegerTypeMinimumValue()); + // Envelope of events [0, i) is (0) + 2 * i. + for (int event = 0; event < num_events; ++event) { + tree.DelayedAddOrUpdateEvent(event, envelope[event], energy[event], + energy[event] + 1); + tree.RecomputeTreeForDelayedOperations(); + EXPECT_EQ(tree.GetEnvelope(), 2 * (event + 1)); + EXPECT_EQ(tree.GetOptionalEnvelope(), 2 * (event + 1) + 1); + } + // Envelope of events [i, n) is (n-1) + 2 + (n - i) + for (int event = 0; event < num_events; ++event) { + EXPECT_EQ(tree.GetEnvelope(), 2 * num_events - event); + EXPECT_EQ(tree.GetOptionalEnvelope(), 2 * num_events - event + 1); + tree.DelayedRemoveEvent(event); + tree.RecomputeTreeForDelayedOperations(); + } + EXPECT_EQ(tree.GetEnvelope(), IntegerTypeMinimumValue()); + EXPECT_EQ(tree.GetOptionalEnvelope(), IntegerTypeMinimumValue()); + } +} + +TYPED_TEST(ThetaTreeTest, DelayedAddOrUpdateOptionalEventTest) { + ThetaLambdaTree tree; + // The tree encoding is tricky, check that RecomputeTreeForDelayedOperations() + // works for all values from a power of two until the next. + for (int num_events = 4; num_events < 8; ++num_events) { + tree.Reset(num_events); + std::vector envelope; + std::vector energy; + // Event start envelope = event, event energy max = 2. + for (int event = 0; event < num_events; ++event) { + envelope.push_back(TypeParam{event}); + energy.push_back(TypeParam{2}); + } + EXPECT_EQ(tree.GetEnvelope(), IntegerTypeMinimumValue()); + EXPECT_EQ(tree.GetOptionalEnvelope(), IntegerTypeMinimumValue()); + // Optional envelope of events [0, i) is i + 2. + for (int event = 0; event < num_events; ++event) { + tree.DelayedAddOrUpdateOptionalEvent(event, envelope[event], + energy[event]); + tree.RecomputeTreeForDelayedOperations(); + EXPECT_EQ(tree.GetEnvelope(), IntegerTypeMinimumValue()); + EXPECT_EQ(tree.GetOptionalEnvelope(), event + 2); + } + } +} + +static void BM_update(benchmark::State& state) { + random_engine_t random_; + const int size = state.range(0); + const int num_updates = 4 * size; + ThetaLambdaTree tree; + std::uniform_int_distribution event_dist(0, size - 1); + std::uniform_int_distribution enveloppe_dist(-10000, 10000); + std::uniform_int_distribution energy_dist(0, 10000); + for (auto _ : state) { + tree.Reset(size); + for (int i = 0; i < num_updates; ++i) { + const int event = event_dist(random_); + const IntegerValue enveloppe(enveloppe_dist(random_)); + const IntegerValue energy1(energy_dist(random_)); + const IntegerValue energy2(energy_dist(random_)); + tree.AddOrUpdateEvent(event, enveloppe, std::min(energy1, energy2), + std::max(energy1, energy2)); + } + } + // Number of updates. + state.SetBytesProcessed(static_cast(state.iterations()) * + num_updates); +} + +// Note that we didn't pick only power of two +BENCHMARK(BM_update)->Arg(10)->Arg(20)->Arg(64)->Arg(100)->Arg(256)->Arg(800); + +static void BM_delayed_update(benchmark::State& state) { + random_engine_t random_; + const int size = state.range(0); + const int num_updates = 4 * size; + ThetaLambdaTree tree; + std::uniform_int_distribution event_dist(0, size - 1); + std::uniform_int_distribution enveloppe_dist(-10000, 10000); + std::uniform_int_distribution energy_dist(0, 10000); + for (auto _ : state) { + tree.Reset(size); + for (int i = 0; i < num_updates; ++i) { + const int event = event_dist(random_); + const IntegerValue enveloppe(enveloppe_dist(random_)); + const IntegerValue energy1(energy_dist(random_)); + const IntegerValue energy2(energy_dist(random_)); + tree.DelayedAddOrUpdateEvent(event, enveloppe, std::min(energy1, energy2), + std::max(energy1, energy2)); + } + tree.RecomputeTreeForDelayedOperations(); + } + // Number of updates. + state.SetBytesProcessed(static_cast(state.iterations()) * + num_updates); +} + +// Note that we didn't pick only power of two +BENCHMARK(BM_delayed_update) + ->Arg(10) + ->Arg(20) + ->Arg(64) + ->Arg(100) + ->Arg(256) + ->Arg(800); + +} // namespace +} // namespace sat +} // namespace operations_research diff --git a/ortools/sat/timetable_test.cc b/ortools/sat/timetable_test.cc new file mode 100644 index 0000000000..c8999baedd --- /dev/null +++ b/ortools/sat/timetable_test.cc @@ -0,0 +1,555 @@ +// Copyright 2010-2024 Google LLC +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "ortools/sat/timetable.h" + +#include + +#include +#include +#include + +#include "absl/container/btree_map.h" +#include "absl/log/check.h" +#include "absl/strings/str_join.h" +#include "absl/types/span.h" +#include "gtest/gtest.h" +#include "ortools/base/logging.h" +#include "ortools/sat/all_different.h" +#include "ortools/sat/cumulative.h" +#include "ortools/sat/integer.h" +#include "ortools/sat/integer_search.h" +#include "ortools/sat/intervals.h" +#include "ortools/sat/model.h" +#include "ortools/sat/precedences.h" +#include "ortools/sat/sat_base.h" +#include "ortools/sat/sat_solver.h" + +namespace operations_research { +namespace sat { +namespace { + +struct CumulativeTasks { + int min_duration; + int min_demand; + int min_start; + int max_end; +}; + +struct Task { + int min_start; + int max_end; +}; + +bool TestTimeTablingPropagation(absl::Span tasks, + absl::Span expected, int capacity) { + Model model; + IntegerTrail* integer_trail = model.GetOrCreate(); + PrecedencesPropagator* precedences = + model.GetOrCreate(); + IntervalsRepository* intervals = model.GetOrCreate(); + + const int num_tasks = tasks.size(); + std::vector interval_vars(num_tasks); + std::vector start_exprs(num_tasks); + std::vector duration_exprs(num_tasks); + std::vector end_exprs(num_tasks); + std::vector demands(num_tasks); + const AffineExpression capacity_expr = + AffineExpression(IntegerValue(capacity)); + + const int kStart(0); + const int kHorizon(10000); + + for (int t = 0; t < num_tasks; ++t) { + const CumulativeTasks& task = tasks[t]; + // Build the task variables. + interval_vars[t] = + model.Add(NewInterval(kStart, kHorizon, task.min_duration)); + start_exprs[t] = intervals->Start(interval_vars[t]); + end_exprs[t] = intervals->End(interval_vars[t]); + demands[t] = AffineExpression(IntegerValue(task.min_demand)); + + // Set task initial minimum starting time. + std::vector no_literal_reason; + std::vector no_integer_reason; + EXPECT_TRUE( + integer_trail->Enqueue(start_exprs[t].GreaterOrEqual(task.min_start), + no_literal_reason, no_integer_reason)); + // Set task initial maximum ending time. + EXPECT_TRUE(integer_trail->Enqueue(end_exprs[t].LowerOrEqual(task.max_end), + no_literal_reason, no_integer_reason)); + } + + // Propagate properly the other bounds of the intervals. + EXPECT_TRUE(precedences->Propagate()); + + SchedulingConstraintHelper* helper = model.TakeOwnership( + new SchedulingConstraintHelper(interval_vars, &model)); + SchedulingDemandHelper* demands_helper = + model.TakeOwnership(new SchedulingDemandHelper(demands, helper, &model)); + + // Propagator responsible for filtering start variables. + TimeTablingPerTask timetabling(capacity_expr, helper, demands_helper, &model); + timetabling.RegisterWith(model.GetOrCreate()); + + // Check initial satisfiability + if (!model.GetOrCreate()->Propagate()) return false; + + // Check consistency of data. + CHECK_EQ(num_tasks, expected.size()); + + for (int t = 0; t < num_tasks; ++t) { + // Check starting time. + EXPECT_EQ(expected[t].min_start, integer_trail->LowerBound(start_exprs[t])) + << "task #" << t; + // Check ending time. + EXPECT_EQ(expected[t].max_end, integer_trail->UpperBound(end_exprs[t])) + << "task #" << t; + } + return true; +} + +// This is an infeasible instance on which the edge finder finds nothing. +// Cumulative Time Table finds the contradiction. +TEST(TimeTablingPropagation, UNSAT) { + EXPECT_FALSE(TestTimeTablingPropagation({{3, 2, 0, 4}, {3, 2, 1, 5}}, {}, 3)); +} + +// This is an instance on Time Table pushes a task. +TEST(TimeTablingPropagation, TimeTablePush1) { + EXPECT_TRUE(TestTimeTablingPropagation({{1, 2, 1, 2}, {3, 2, 0, 10}}, + {{1, 2}, {2, 10}}, 3)); +} + +// This is an instance on Time Table pushes a task. +TEST(TimeTablingPropagation, TimeTablePush2) { + EXPECT_TRUE( + TestTimeTablingPropagation({{1, 2, 1, 2}, {1, 2, 3, 4}, {3, 2, 0, 10}}, + {{1, 2}, {3, 4}, {4, 10}}, 3)); +} + +// This is an instance on which Time Table pushes a task. +// Here the two first tasks have the following profile: +// usage ^ +// 2 | ** +// 1 | **--** +// 0 |**------******************> time +// 0 1 2 3 4 5 6 +// The interval [2, 3] has a profile too high to accommodate the third task. +TEST(TimeTablingPropagation, TimeTablePush3) { + EXPECT_TRUE( + TestTimeTablingPropagation({{3, 1, 0, 4}, {3, 1, 1, 5}, {3, 2, 0, 10}}, + {{0, 4}, {1, 5}, {3, 10}}, 3)); +} + +// This is an instance on which Time Table pushes a task. +// Similar to TimeTablePush3, but the two small tasks have the same profile. +TEST(TimeTablingPropagation, TimeTablePush4) { + EXPECT_TRUE( + TestTimeTablingPropagation({{4, 1, 0, 5}, {3, 1, 1, 4}, {3, 2, 0, 10}}, + {{0, 5}, {1, 4}, {4, 10}}, 3)); +} + +// Regression test: there used to be a bug when no profile delta corresponded +// to the start time of a task. +TEST(TimeTablingPropagation, RegressionTest) { + EXPECT_TRUE(TestTimeTablingPropagation({{3, 1, 0, 3}, {2, 1, 2, 5}}, + {{0, 3}, {3, 5}}, 1)); +} + +// Regression test: there used to be a bug that caused Timetabling to stop +// before reaching its fixed-point. +TEST(TimeTablingPropagation, FixedPoint) { + EXPECT_TRUE(TestTimeTablingPropagation( + {{1, 1, 0, 1}, {4, 1, 0, 8}, {2, 1, 1, 5}, {1, 1, 1, 5}}, + {{0, 1}, {3, 8}, {1, 4}, {1, 4}}, 1)); +} + +// Regression test: there used to be a bug when two back to back +// tasks were exceeding the capacity in the partial sum. +TEST(TimeTablingPropagation, PartialSumBug) { + EXPECT_TRUE(TestTimeTablingPropagation({{510, 142, 0, 510}, + {268, 130, 242, 510}, + {74, 147, 510, 584}, + {197, 204, 584, 781}, + {72, 138, 781, 853}, + {170, 231, 853, 1023}, + {181, 131, 1023, 1204}}, + {{0, 510}, + {242, 510}, + {510, 584}, + {584, 781}, + {781, 853}, + {853, 1023}, + {1023, 1204}}, + 315)); +} + +// TODO(user): build automatic FindAll tests for the cumulative constraint. +// Test that we find all the solutions. +TEST(TimeTablingSolve, FindAll) { + // Instance. + const std::vector durations = {1, 2, 3, 3, 3, 3}; + const std::vector demands = {1, 1, 1, 1, 4, 4}; + const int capacity = 4; + const int horizon = 11; + + Model model; + std::vector intervals(durations.size()); + std::vector demand_exprs(durations.size()); + const AffineExpression capacity_expr = + AffineExpression(IntegerValue(capacity)); + + for (int i = 0; i < durations.size(); ++i) { + intervals[i] = model.Add(NewInterval(0, horizon, durations[i])); + demand_exprs[i] = AffineExpression(IntegerValue(demands[i])); + } + + model.Add(Cumulative(intervals, demand_exprs, capacity_expr)); + + int num_solutions_found = 0; + auto* integer_trail = model.GetOrCreate(); + auto* repository = model.GetOrCreate(); + while (true) { + const SatSolver::Status status = + SolveIntegerProblemWithLazyEncoding(&model); + if (status != SatSolver::Status::FEASIBLE) break; + + // Add the solution. + std::vector solution(durations.size()); + for (int i = 0; i < intervals.size(); ++i) { + solution[i] = + integer_trail->LowerBound(repository->Start(intervals[i])).value(); + } + num_solutions_found++; + LOG(INFO) << "Found solution: {" << absl::StrJoin(solution, ", ") << "}."; + + // Loop to the next solution. + model.Add(ExcludeCurrentSolutionAndBacktrack()); + } + + // Test that we have the right number of solutions. + EXPECT_EQ(num_solutions_found, 2040); +} + +TEST(TimeTablingSolve, FindAllWithVaryingCapacity) { + // Instance. + const std::vector durations = {1, 2, 3}; + const std::vector demands = {1, 2, 3}; + const int horizon = 6; + + // Collect the number of solution for each capacity value. + int sum = 0; + for (const int capacity : {3, 4, 5}) { + Model model; + std::vector intervals(durations.size()); + std::vector demand_exprs(durations.size()); + const AffineExpression capacity_expr = + AffineExpression(IntegerValue(capacity)); + + for (int i = 0; i < durations.size(); ++i) { + intervals[i] = model.Add(NewInterval(0, horizon, durations[i])); + demand_exprs[i] = AffineExpression(IntegerValue(demands[i])); + } + + model.Add(Cumulative(intervals, demand_exprs, capacity_expr)); + + int num_solutions_found = 0; + auto* integer_trail = model.GetOrCreate(); + auto* repository = model.GetOrCreate(); + while (true) { + const SatSolver::Status status = + SolveIntegerProblemWithLazyEncoding(&model); + if (status != SatSolver::Status::FEASIBLE) break; + + // Add the solution. + std::vector solution(durations.size()); + for (int i = 0; i < intervals.size(); ++i) { + solution[i] = + integer_trail->LowerBound(repository->Start(intervals[i])).value(); + } + num_solutions_found++; + LOG(INFO) << "Found solution: {" << absl::StrJoin(solution, ", ") << "}."; + + // Loop to the next solution. + model.Add(ExcludeCurrentSolutionAndBacktrack()); + } + + LOG(INFO) << "capacity: " << capacity + << " num_solutions: " << num_solutions_found; + sum += num_solutions_found; + } + + // Now solve with a varying capacity. + Model model; + std::vector intervals(durations.size()); + std::vector demand_exprs(durations.size()); + const AffineExpression capacity_expr = + AffineExpression(model.Add(NewIntegerVariable(0, 5))); + + for (int i = 0; i < durations.size(); ++i) { + intervals[i] = model.Add(NewInterval(0, horizon, durations[i])); + demand_exprs[i] = AffineExpression(IntegerValue(demands[i])); + } + + model.Add(Cumulative(intervals, demand_exprs, capacity_expr)); + + int num_solutions_found = 0; + auto* integer_trail = model.GetOrCreate(); + auto* repository = model.GetOrCreate(); + while (true) { + const SatSolver::Status status = + SolveIntegerProblemWithLazyEncoding(&model); + if (status != SatSolver::Status::FEASIBLE) break; + + // Add the solution. + std::vector solution(durations.size()); + for (int i = 0; i < intervals.size(); ++i) { + solution[i] = + integer_trail->LowerBound(repository->Start(intervals[i])).value(); + } + num_solutions_found++; + LOG(INFO) << "Found solution: {" << absl::StrJoin(solution, ", ") << "}."; + + // Loop to the next solution. + model.Add(ExcludeCurrentSolutionAndBacktrack()); + } + + // Test that we have the right number of solutions. + EXPECT_EQ(num_solutions_found, sum); +} + +TEST(TimeTablingSolve, FindAllWithOptionals) { + // Instance. + // Up to two tasks can be scheduled at the same time. + const std::vector durations = {3, 3, 3}; + const std::vector demands = {2, 2, 2}; + const int capacity = 5; + const int horizon = 3; + const int num_solutions = 7; + + Model model; + std::vector intervals(durations.size()); + std::vector demand_exprs(durations.size()); + std::vector is_present_literals(durations.size()); + const AffineExpression capacity_expr = + AffineExpression(IntegerValue(capacity)); + + for (int i = 0; i < durations.size(); ++i) { + is_present_literals[i] = Literal(model.Add(NewBooleanVariable()), true); + intervals[i] = model.Add( + NewOptionalInterval(0, horizon, durations[i], is_present_literals[i])); + demand_exprs[i] = AffineExpression(IntegerValue(demands[i])); + } + + model.Add(Cumulative(intervals, demand_exprs, capacity_expr)); + + int num_solutions_found = 0; + auto* integer_trail = model.GetOrCreate(); + auto* repository = model.GetOrCreate(); + while (true) { + const SatSolver::Status status = + SolveIntegerProblemWithLazyEncoding(&model); + if (status != SatSolver::Status::FEASIBLE) break; + + // Add the solution. + std::vector solution(durations.size()); + for (int i = 0; i < intervals.size(); ++i) { + if (model.Get(Value(is_present_literals[i]))) { + solution[i] = + integer_trail->LowerBound(repository->Start(intervals[i])).value(); + } else { + solution[i] = -1; + } + } + num_solutions_found++; + LOG(INFO) << "Found solution: {" << absl::StrJoin(solution, ", ") << "}."; + + // Loop to the next solution. + model.Add(ExcludeCurrentSolutionAndBacktrack()); + } + + // Test that we have the right number of solutions. + EXPECT_EQ(num_solutions_found, num_solutions); +} + +// This construct a reservoir corresponding to a well behaved parenthesis +// sequence. +TEST(ReservoirTest, FindAllParenthesis) { + const int n = 3; + const int size = 2 * n; + + Model model; + std::vector vars(size); + std::vector times(size); + std::vector deltas(size); + for (int i = 0; i < size; ++i) { + vars[i] = model.Add(NewIntegerVariable(0, size - 1)); + times[i] = vars[i]; + deltas[i] = IntegerValue((i % 2 == 1) ? -1 : 1); + } + const Literal true_lit = + model.GetOrCreate()->GetTrueLiteral(); + std::vector all_true(size, true_lit); + + model.Add(AllDifferentOnBounds(vars)); + AddReservoirConstraint(times, deltas, all_true, 0, size, &model); + + absl::btree_map sequence_to_count; + int num_solutions_found = 0; + while (true) { + const SatSolver::Status status = + SolveIntegerProblemWithLazyEncoding(&model); + if (status != SatSolver::Status::FEASIBLE) break; + + // Add the solution. + std::string parenthesis_sequence(size, ' '); + for (int i = 0; i < size; ++i) { + const int v = model.Get(Value(vars[i])); + parenthesis_sequence[v] = (i % 2 == 0) ? '(' : ')'; + } + sequence_to_count[parenthesis_sequence]++; + num_solutions_found++; + + // Loop to the next solution. + model.Add(ExcludeCurrentSolutionAndBacktrack()); + } + + // To help debug the code. + for (const auto entry : sequence_to_count) { + LOG(INFO) << entry.first << " : " << entry.second; + } + LOG(INFO) << "decisions: " << model.GetOrCreate()->num_branches(); + LOG(INFO) << "conflicts: " << model.GetOrCreate()->num_failures(); + + // Test that we have the right number of solutions. + // + // The catalan number n, which is 5 for n equal five, count the number of well + // formed parathesis sequence. But we have to multiply this by the permutation + // for the open and closing parenthesis that are matched to their positions: + // n!. + EXPECT_EQ(num_solutions_found, 5 * 6 * 6); +} + +// Now some might be absent. +TEST(ReservoirTest, FindAllParenthesisWithOptionality) { + const int n = 2; + const int size = 2 * n; + + Model model; + std::vector vars(size); + std::vector times(size); + std::vector deltas(size); + std::vector present(size); + for (int i = 0; i < size; ++i) { + vars[i] = model.Add(NewIntegerVariable(0, size - 1)); + times[i] = vars[i]; + deltas[i] = IntegerValue((i % 2 == 1) ? -1 : 1); + present[i] = Literal(model.Add(NewBooleanVariable()), true); + } + + model.Add(AllDifferentOnBounds(vars)); + AddReservoirConstraint(times, deltas, present, 0, size, &model); + + absl::btree_map sequence_to_count; + int num_solutions_found = 0; + while (true) { + const SatSolver::Status status = + SolveIntegerProblemWithLazyEncoding(&model); + if (status != SatSolver::Status::FEASIBLE) break; + + // Add the solution. + std::string parenthesis_sequence(size, '_'); + for (int i = 0; i < size; ++i) { + if (model.Get(Value(present[i])) == 0) continue; + const int v = model.Get(Value(vars[i])); + parenthesis_sequence[v] = (i % 2 == 0) ? '(' : ')'; + } + sequence_to_count[parenthesis_sequence]++; + num_solutions_found++; + + // Loop to the next solution. + model.Add(ExcludeCurrentSolutionAndBacktrack()); + } + + // To help debug the code. + for (const auto entry : sequence_to_count) { + LOG(INFO) << entry.first << " : " << entry.second; + } + LOG(INFO) << "decisions: " << model.GetOrCreate()->num_branches(); + LOG(INFO) << "conflicts: " << model.GetOrCreate()->num_failures(); + + // Test that we have the right number of solutions. + EXPECT_EQ(num_solutions_found, 184); +} + +// Enumerate all fixed sequence of [-1, +1] with a partial sum >= 0 and <= 1. +TEST(ReservoirTest, VariableLevelChange) { + Model model; + const int size = 8; + std::vector times(size); + std::vector deltas(size); + for (int i = 0; i < size; ++i) { + times[i] = IntegerValue(i); + deltas[i] = model.Add(NewIntegerVariable(-1, 1)); + } + const Literal true_lit = + model.GetOrCreate()->GetTrueLiteral(); + std::vector all_true(size, true_lit); + + const int min_level = 0; + const int max_level = 1; + AddReservoirConstraint(times, deltas, all_true, min_level, max_level, &model); + + absl::btree_map sequence_to_count; + int num_solutions_found = 0; + auto* integer_trail = model.GetOrCreate(); + while (true) { + const SatSolver::Status status = + SolveIntegerProblemWithLazyEncoding(&model); + if (status != SatSolver::Status::FEASIBLE) break; + + // Add the solution. + // Test that it is a valid one. + int sum = 0; + std::vector values; + for (int i = 0; i < size; ++i) { + values.push_back(integer_trail->LowerBound(deltas[i]).value()); + sum += values.back(); + EXPECT_GE(sum, min_level); + EXPECT_LE(sum, max_level); + } + sequence_to_count[absl::StrJoin(values, ",")]++; + num_solutions_found++; + + // Loop to the next solution. + model.Add(ExcludeCurrentSolutionAndBacktrack()); + } + + // To help debug the code. + for (const auto entry : sequence_to_count) { + LOG(INFO) << entry.first << " : " << entry.second; + } + LOG(INFO) << "decisions: " << model.GetOrCreate()->num_branches(); + LOG(INFO) << "conflicts: " << model.GetOrCreate()->num_failures(); + + // Test that we have the right number of solutions. + // For each subset of non-zero position, the value are fixed, it must + // be an alternating sequence starting at 1. + EXPECT_EQ(num_solutions_found, 1 << size); +} + +} // namespace +} // namespace sat +} // namespace operations_research diff --git a/ortools/sat/zero_half_cuts_test.cc b/ortools/sat/zero_half_cuts_test.cc new file mode 100644 index 0000000000..6aea31cdcd --- /dev/null +++ b/ortools/sat/zero_half_cuts_test.cc @@ -0,0 +1,114 @@ +// Copyright 2010-2024 Google LLC +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "ortools/sat/zero_half_cuts.h" + +#include +#include + +#include "gtest/gtest.h" +#include "ortools/base/gmock.h" +#include "ortools/lp_data/lp_types.h" +#include "ortools/sat/integer.h" + +namespace operations_research { +namespace sat { +namespace { + +using ::testing::ElementsAre; +using ::testing::IsEmpty; +using ::testing::UnorderedElementsAre; + +TEST(SymmetricDifferenceTest, BasicExample) { + ZeroHalfCutHelper helper; + std::vector a = {2, 1, 4}; + std::vector b = {4, 3, 2, 7}; + helper.Reset(10); + helper.SymmetricDifference(a, &b); + EXPECT_THAT(b, ElementsAre(3, 7, 1)); +} + +TEST(SymmetricDifferenceTest, BasicExample2) { + ZeroHalfCutHelper helper; + std::vector a = {2, 1, 4}; + std::vector b = {}; + helper.Reset(10); + helper.SymmetricDifference(a, &b); + EXPECT_THAT(b, ElementsAre(2, 1, 4)); +} + +TEST(EliminateVarUsingRowTest, BasicExample) { + // We need to construct a binary matrix for this test. + ZeroHalfCutHelper helper; + helper.ProcessVariables({0.0, 0.0, 0.0, 0.0, 0.12, 0.0, 0.0, 0.0, 0.0}, + std::vector(9, IntegerValue(0)), + std::vector(9, IntegerValue(1))); + helper.AddBinaryRow({{{glop::RowIndex(1), IntegerValue(1)}}, + {0, 2, 3, 4, 7}, + /*rhs*/ 1, + /*slack*/ 0.1}); + helper.AddBinaryRow({{{glop::RowIndex(2), IntegerValue(1)}}, + {0, 2, 3, 4, 7}, + /*rhs*/ 0, + /*slack*/ 0.0}); + helper.AddBinaryRow({{{glop::RowIndex(1), IntegerValue(1)}, + {glop::RowIndex(3), IntegerValue(1)}}, + {0, 5, 4, 8}, + /*rhs*/ 1, + /*slack*/ 0.0}); + + typedef std::vector> MultiplierType; + typedef std::vector VectorType; + + // Let use row with index 2 to eliminate the variable 4. + helper.EliminateVarUsingRow(4, 2); + + // The multipliers, cols and parity behave like a xor. + EXPECT_EQ(helper.MatrixRow(0).multipliers, + MultiplierType({{glop::RowIndex(3), IntegerValue(1)}})); + EXPECT_EQ(helper.MatrixRow(0).cols, VectorType({2, 3, 7, 5, 8})); + EXPECT_EQ(helper.MatrixRow(0).rhs_parity, 0); + EXPECT_EQ(helper.MatrixRow(0).slack, 0.1); + + EXPECT_EQ(helper.MatrixRow(1).multipliers, + MultiplierType({{glop::RowIndex(1), IntegerValue(1)}, + {glop::RowIndex(2), IntegerValue(1)}, + {glop::RowIndex(3), IntegerValue(1)}})); + EXPECT_EQ(helper.MatrixRow(1).cols, VectorType({2, 3, 7, 5, 8})); + EXPECT_EQ(helper.MatrixRow(1).rhs_parity, 1); + EXPECT_EQ(helper.MatrixRow(1).slack, 0.0); + + // The column is eliminated like a singleton column and the lp value become + // the slack. + EXPECT_EQ(helper.MatrixRow(2).multipliers, + MultiplierType({{glop::RowIndex(1), IntegerValue(1)}, + {glop::RowIndex(3), IntegerValue(1)}})); + EXPECT_EQ(helper.MatrixRow(2).cols, VectorType({5, 8})); + EXPECT_EQ(helper.MatrixRow(2).rhs_parity, 1); + EXPECT_EQ(helper.MatrixRow(2).slack, 0.12); + + // The transposed information is up to date. + EXPECT_THAT(helper.MatrixCol(0), IsEmpty()); + EXPECT_THAT(helper.MatrixCol(1), IsEmpty()); + EXPECT_THAT(helper.MatrixCol(2), UnorderedElementsAre(0, 1)); + EXPECT_THAT(helper.MatrixCol(3), UnorderedElementsAre(0, 1)); + EXPECT_THAT(helper.MatrixCol(4), IsEmpty()); + EXPECT_THAT(helper.MatrixCol(5), UnorderedElementsAre(0, 1, 2)); + EXPECT_THAT(helper.MatrixCol(6), IsEmpty()); + EXPECT_THAT(helper.MatrixCol(7), UnorderedElementsAre(0, 1)); + EXPECT_THAT(helper.MatrixCol(8), UnorderedElementsAre(0, 1, 2)); +} + +} // namespace +} // namespace sat +} // namespace operations_research From e55f003faa3546dd270c3c57cb58b271bcb4b756 Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Mon, 23 Sep 2024 09:53:42 +0200 Subject: [PATCH 005/105] sat: export form google3 --- ortools/sat/2d_orthogonal_packing_testing.cc | 76 ++- ortools/sat/2d_orthogonal_packing_testing.h | 8 + ortools/sat/2d_packing_brute_force.cc | 9 +- ortools/sat/2d_rectangle_presolve.cc | 416 ++++++++++++- ortools/sat/2d_rectangle_presolve.h | 128 ++++ ortools/sat/2d_rectangle_presolve_test.cc | 584 +++++++++++++------ ortools/sat/BUILD.bazel | 6 +- ortools/sat/clause.cc | 10 +- ortools/sat/cp_model.cc | 10 +- ortools/sat/cp_model_expand.cc | 67 ++- ortools/sat/cp_model_lns.cc | 360 ++++++++---- ortools/sat/cp_model_lns.h | 113 ++-- ortools/sat/cp_model_loader.cc | 33 +- ortools/sat/cp_model_presolve.cc | 76 ++- ortools/sat/cp_model_presolve.h | 3 + ortools/sat/cp_model_solver.cc | 72 ++- ortools/sat/cp_model_solver_helpers.cc | 53 +- ortools/sat/cp_model_solver_helpers.h | 1 - ortools/sat/cuts.cc | 3 +- ortools/sat/diffn_util.cc | 10 +- ortools/sat/diffn_util.h | 12 +- ortools/sat/integer.cc | 14 +- ortools/sat/integer_expr.cc | 5 +- ortools/sat/integer_search.cc | 29 +- ortools/sat/integer_search.h | 4 +- ortools/sat/linear_constraint.cc | 15 +- ortools/sat/linear_programming_constraint.cc | 68 ++- ortools/sat/linear_programming_constraint.h | 3 +- ortools/sat/presolve_context.cc | 94 +-- ortools/sat/presolve_context.h | 22 +- ortools/sat/probing.cc | 3 +- ortools/sat/pseudo_costs.cc | 68 +-- ortools/sat/pseudo_costs.h | 5 +- ortools/sat/sat_parameters.proto | 11 +- ortools/sat/sat_solver.cc | 173 ++++-- ortools/sat/sat_solver.h | 19 +- ortools/sat/stat_tables.cc | 19 +- ortools/sat/stat_tables.h | 9 +- ortools/sat/subsolver.cc | 28 +- ortools/sat/subsolver.h | 37 ++ ortools/sat/util.cc | 2 +- ortools/sat/util.h | 67 +-- ortools/util/BUILD.bazel | 1 + ortools/util/saturated_arithmetic.h | 13 + ortools/util/strong_integers.h | 1 + 45 files changed, 2016 insertions(+), 744 deletions(-) diff --git a/ortools/sat/2d_orthogonal_packing_testing.cc b/ortools/sat/2d_orthogonal_packing_testing.cc index 95eddf91cb..597e718db3 100644 --- a/ortools/sat/2d_orthogonal_packing_testing.cc +++ b/ortools/sat/2d_orthogonal_packing_testing.cc @@ -14,6 +14,9 @@ #include "ortools/sat/2d_orthogonal_packing_testing.h" #include +#include +#include +#include #include #include @@ -34,7 +37,7 @@ std::vector GenerateNonConflictingRectangles( rectangles.reserve(num_rectangles); rectangles.push_back( {.x_min = 0, .x_max = kSizeMax, .y_min = 0, .y_max = kSizeMax}); - for (int i = 0; i < num_rectangles; ++i) { + while (rectangles.size() < num_rectangles) { std::swap(rectangles.back(), rectangles[absl::Uniform(random, 0ull, rectangles.size() - 1)]); const Rectangle& rec = rectangles.back(); @@ -49,6 +52,9 @@ std::vector GenerateNonConflictingRectangles( .x_max = rec.x_max, .y_min = rec.y_min, .y_max = rec.y_max}; + if (new_range.Area() == 0 || new_range2.Area() == 0) { + continue; + } rectangles.pop_back(); rectangles.push_back(new_range); rectangles.push_back(new_range2); @@ -63,6 +69,9 @@ std::vector GenerateNonConflictingRectangles( .x_max = rec.x_max, .y_min = cut, .y_max = rec.y_max}; + if (new_range.Area() == 0 || new_range2.Area() == 0) { + continue; + } rectangles.pop_back(); rectangles.push_back(new_range); rectangles.push_back(new_range2); @@ -71,6 +80,71 @@ std::vector GenerateNonConflictingRectangles( return rectangles; } +std::vector GenerateNonConflictingRectanglesWithPacking( + std::pair bb, int average_num_boxes, + absl::BitGenRef random) { + const double p = 0.01; + std::vector rectangles; + int num_retries = 0; + double average_size = + std::sqrt(bb.first.value() * bb.second.value() / average_num_boxes); + const int64_t n_x = static_cast(average_size / p); + const int64_t n_y = static_cast(average_size / p); + while (num_retries < 4) { + num_retries++; + + std::pair sizes; + do { + sizes.first = std::binomial_distribution<>(n_x, p)(random); + } while (sizes.first == 0 || sizes.first > bb.first); + do { + sizes.second = std::binomial_distribution<>(n_y, p)(random); + } while (sizes.second == 0 || sizes.second > bb.second); + + std::vector possible_x_positions = {0}; + std::vector possible_y_positions = {0}; + for (const Rectangle& rec : rectangles) { + possible_x_positions.push_back(rec.x_max); + possible_y_positions.push_back(rec.y_max); + } + std::sort(possible_x_positions.begin(), possible_x_positions.end()); + std::sort(possible_y_positions.begin(), possible_y_positions.end()); + bool found_position = false; + for (const IntegerValue x : possible_x_positions) { + for (const IntegerValue y : possible_y_positions) { + if (x + sizes.first > bb.first || y + sizes.second > bb.second) { + continue; + } + const Rectangle rec = {.x_min = x, + .x_max = x + sizes.first, + .y_min = y, + .y_max = y + sizes.second}; + bool conflict = false; + for (const Rectangle r : rectangles) { + if (!r.IsDisjoint(rec)) { + conflict = true; + break; + } + } + if (conflict) { + continue; + } else { + rectangles.push_back(rec); + found_position = true; + break; + } + } + if (found_position) { + break; + } + } + if (found_position) { + num_retries = 0; + } + } + return rectangles; +} + std::vector MakeItemsFromRectangles( absl::Span rectangles, double slack_factor, absl::BitGenRef random) { diff --git a/ortools/sat/2d_orthogonal_packing_testing.h b/ortools/sat/2d_orthogonal_packing_testing.h index 68e514f260..72de30a910 100644 --- a/ortools/sat/2d_orthogonal_packing_testing.h +++ b/ortools/sat/2d_orthogonal_packing_testing.h @@ -14,11 +14,13 @@ #ifndef OR_TOOLS_SAT_2D_ORTHOGONAL_PACKING_TESTING_H_ #define OR_TOOLS_SAT_2D_ORTHOGONAL_PACKING_TESTING_H_ +#include #include #include "absl/random/bit_gen_ref.h" #include "absl/types/span.h" #include "ortools/sat/diffn_util.h" +#include "ortools/sat/integer.h" namespace operations_research { namespace sat { @@ -26,6 +28,12 @@ namespace sat { std::vector GenerateNonConflictingRectangles(int num_rectangles, absl::BitGenRef random); +// Alternative way of generating random rectangles. This one generate random +// rectangles and try to pack them using the left-bottom-first order. +std::vector GenerateNonConflictingRectanglesWithPacking( + std::pair bb, int average_num_boxes, + absl::BitGenRef random); + std::vector MakeItemsFromRectangles( absl::Span rectangles, double slack_factor, absl::BitGenRef random); diff --git a/ortools/sat/2d_packing_brute_force.cc b/ortools/sat/2d_packing_brute_force.cc index d6381a9954..957c8a79ca 100644 --- a/ortools/sat/2d_packing_brute_force.cc +++ b/ortools/sat/2d_packing_brute_force.cc @@ -681,8 +681,13 @@ BruteForceResult BruteForceOrthogonalPacking( for (const PermutableItem& item : items) { result[item.index] = item.position; } - // VLOG_EVERY_N_SEC(3, 3) << "Found a feasible packing by brute force. Dot:\n " - // << RenderDot(bounding_box_size, result); + VLOG_EVERY_N_SEC(3, 3) << "Found a feasible packing by brute force. Dot:\n " + << RenderDot( + Rectangle{.x_min = 0, + .x_max = bounding_box_size.first, + .y_min = 0, + .y_max = bounding_box_size.second}, + result); return {.status = BruteForceResult::Status::kFoundSolution, .positions_for_solution = result}; } diff --git a/ortools/sat/2d_rectangle_presolve.cc b/ortools/sat/2d_rectangle_presolve.cc index 9fefb17ff6..94ec802c99 100644 --- a/ortools/sat/2d_rectangle_presolve.cc +++ b/ortools/sat/2d_rectangle_presolve.cc @@ -14,17 +14,24 @@ #include "ortools/sat/2d_rectangle_presolve.h" #include +#include #include #include #include +#include #include #include +#include "absl/algorithm/container.h" +#include "absl/container/btree_map.h" #include "absl/container/flat_hash_map.h" +#include "absl/container/flat_hash_set.h" #include "absl/log/check.h" #include "absl/strings/str_cat.h" #include "absl/types/span.h" #include "ortools/base/logging.h" +#include "ortools/base/stl_util.h" +#include "ortools/graph/strongly_connected_components.h" #include "ortools/sat/diffn_util.h" #include "ortools/sat/integer.h" @@ -234,8 +241,6 @@ struct Edge { IntegerValue y_start; IntegerValue size; - enum class EdgePosition { TOP, BOTTOM, LEFT, RIGHT }; - static Edge GetEdge(const Rectangle& rectangle, EdgePosition pos) { switch (pos) { case EdgePosition::TOP: @@ -266,6 +271,15 @@ struct Edge { return x_start == other.x_start && y_start == other.y_start && size == other.size; } + + static bool CompareXThenY(const Edge& a, const Edge& b) { + return std::tie(a.x_start, a.y_start, a.size) < + std::tie(b.x_start, b.y_start, b.size); + } + static bool CompareYThenX(const Edge& a, const Edge& b) { + return std::tie(a.y_start, a.x_start, a.size) < + std::tie(b.y_start, b.x_start, b.size); + } }; } // namespace @@ -290,8 +304,6 @@ bool ReduceNumberofBoxes(std::vector* mandatory_rectangles, absl::flat_hash_map left_edges_to_rectangle; absl::flat_hash_map right_edges_to_rectangle; - using EdgePosition = Edge::EdgePosition; - bool changed_optional = false; bool changed_mandatory = false; @@ -403,5 +415,401 @@ bool ReduceNumberofBoxes(std::vector* mandatory_rectangles, return changed_mandatory; } +Neighbours BuildNeighboursGraph(absl::Span rectangles) { + // To build a graph of neighbours, we build a sorted vector for each one of + // the edges (top, bottom, etc) of the rectangles. Then we merge the bottom + // and top vectors and iterate on it. Due to the sorting order, segments where + // the bottom of a rectangle touches the top of another one must consecutive. + std::vector> edges_to_rectangle[4]; + std::vector> neighbours; + neighbours.reserve(2 * rectangles.size()); + for (int edge_int = 0; edge_int < 4; ++edge_int) { + const EdgePosition edge_position = static_cast(edge_int); + edges_to_rectangle[edge_position].reserve(rectangles.size()); + } + + for (int i = 0; i < rectangles.size(); ++i) { + const Rectangle& rectangle = rectangles[i]; + for (int edge_int = 0; edge_int < 4; ++edge_int) { + const EdgePosition edge_position = static_cast(edge_int); + const Edge edge = Edge::GetEdge(rectangle, edge_position); + edges_to_rectangle[edge_position].push_back({edge, i}); + } + } + for (int edge_int = 0; edge_int < 4; ++edge_int) { + const EdgePosition edge_position = static_cast(edge_int); + const bool sort_x_then_y = edge_position == EdgePosition::LEFT || + edge_position == EdgePosition::RIGHT; + const auto cmp = + sort_x_then_y + ? [](const std::pair& a, + const std::pair& + b) { return Edge::CompareXThenY(a.first, b.first); } + : [](const std::pair& a, const std::pair& b) { + return Edge::CompareYThenX(a.first, b.first); + }; + absl::c_sort(edges_to_rectangle[edge_position], cmp); + } + + constexpr struct EdgeData { + EdgePosition edge; + EdgePosition opposite_edge; + bool (*cmp)(const Edge&, const Edge&); + } edge_data[4] = {{.edge = EdgePosition::BOTTOM, + .opposite_edge = EdgePosition::TOP, + .cmp = &Edge::CompareYThenX}, + {.edge = EdgePosition::TOP, + .opposite_edge = EdgePosition::BOTTOM, + .cmp = &Edge::CompareYThenX}, + {.edge = EdgePosition::LEFT, + .opposite_edge = EdgePosition::RIGHT, + .cmp = &Edge::CompareXThenY}, + {.edge = EdgePosition::RIGHT, + .opposite_edge = EdgePosition::LEFT, + .cmp = &Edge::CompareXThenY}}; + + for (int edge_int = 0; edge_int < 4; ++edge_int) { + const EdgePosition edge_position = edge_data[edge_int].edge; + const EdgePosition opposite_edge_position = + edge_data[edge_int].opposite_edge; + auto it = edges_to_rectangle[edge_position].begin(); + for (const auto& [edge, index] : + edges_to_rectangle[opposite_edge_position]) { + while (it != edges_to_rectangle[edge_position].end() && + edge_data[edge_int].cmp(it->first, edge)) { + ++it; + } + if (it == edges_to_rectangle[edge_position].end()) { + break; + } + if (edge_position == EdgePosition::BOTTOM || + edge_position == EdgePosition::TOP) { + while (it != edges_to_rectangle[edge_position].end() && + it->first.y_start == edge.y_start && + it->first.x_start < edge.x_start + edge.size) { + neighbours.push_back({index, opposite_edge_position, it->second}); + neighbours.push_back({it->second, edge_position, index}); + ++it; + } + } else { + while (it != edges_to_rectangle[edge_position].end() && + it->first.x_start == edge.x_start && + it->first.y_start < edge.y_start + edge.size) { + neighbours.push_back({index, opposite_edge_position, it->second}); + neighbours.push_back({it->second, edge_position, index}); + ++it; + } + } + } + } + + gtl::STLSortAndRemoveDuplicates(&neighbours); + return Neighbours(rectangles, neighbours); +} + +std::vector> SplitInConnectedComponents( + const Neighbours& neighbours) { + class GraphView { + public: + explicit GraphView(const Neighbours& neighbours) + : neighbours_(neighbours) {} + absl::Span operator[](int node) const { + temp_.clear(); + for (int edge = 0; edge < 4; ++edge) { + const auto edge_neighbors = neighbours_.GetSortedNeighbors( + node, static_cast(edge)); + for (int neighbor : edge_neighbors) { + temp_.push_back(neighbor); + } + } + return temp_; + } + + private: + const Neighbours& neighbours_; + mutable std::vector temp_; + }; + + std::vector> components; + FindStronglyConnectedComponents(neighbours.NumRectangles(), + GraphView(neighbours), &components); + return components; +} + +namespace { +IntegerValue GetClockwiseStart(EdgePosition edge, const Rectangle& rectangle) { + switch (edge) { + case EdgePosition::LEFT: + return rectangle.y_min; + case EdgePosition::RIGHT: + return rectangle.y_max; + case EdgePosition::BOTTOM: + return rectangle.x_max; + case EdgePosition::TOP: + return rectangle.x_min; + } +} + +IntegerValue GetClockwiseEnd(EdgePosition edge, const Rectangle& rectangle) { + switch (edge) { + case EdgePosition::LEFT: + return rectangle.y_max; + case EdgePosition::RIGHT: + return rectangle.y_min; + case EdgePosition::BOTTOM: + return rectangle.x_min; + case EdgePosition::TOP: + return rectangle.x_max; + } +} + +// Given a list of rectangles and their neighbours graph, find the list of +// vertical and horizontal segments that touches a single rectangle edge. Or, +// view in another way, the pieces of an edge that is touching the empty space. +// For example, this corresponds to the "0" segments in the example below: +// +// 000000 +// 0****0 000000 +// 0****0 0****0 +// 0****0 0****0 +// 00******00000****00000 +// 0********************0 +// 0********************0 +// 0000000000000000000000 +void GetAllSegmentsTouchingVoid( + absl::Span rectangles, const Neighbours& neighbours, + std::vector>& vertical_edges_on_boundary, + std::vector>& horizontal_edges_on_boundary) { + for (int i = 0; i < rectangles.size(); ++i) { + const Rectangle& rectangle = rectangles[i]; + for (int edge_int = 0; edge_int < 4; ++edge_int) { + const EdgePosition edge = static_cast(edge_int); + const auto box_neighbors = neighbours.GetSortedNeighbors(i, edge); + if (box_neighbors.empty()) { + if (edge == EdgePosition::LEFT || edge == EdgePosition::RIGHT) { + vertical_edges_on_boundary.push_back( + {Edge::GetEdge(rectangle, edge), i}); + } else { + horizontal_edges_on_boundary.push_back( + {Edge::GetEdge(rectangle, edge), i}); + } + continue; + } + IntegerValue previous_pos = GetClockwiseStart(edge, rectangle); + for (int n = 0; n <= box_neighbors.size(); ++n) { + IntegerValue neighbor_start; + const Rectangle* neighbor; + if (n == box_neighbors.size()) { + // On the last iteration we consider instead of the next neighbor the + // end of the current box. + neighbor_start = GetClockwiseEnd(edge, rectangle); + } else { + const int neighbor_idx = box_neighbors[n]; + neighbor = &rectangles[neighbor_idx]; + neighbor_start = GetClockwiseStart(edge, *neighbor); + } + switch (edge) { + case EdgePosition::LEFT: + if (neighbor_start > previous_pos) { + vertical_edges_on_boundary.push_back( + {Edge{.x_start = rectangle.x_min, + .y_start = previous_pos, + .size = neighbor_start - previous_pos}, + i}); + } + break; + case EdgePosition::RIGHT: + if (neighbor_start < previous_pos) { + vertical_edges_on_boundary.push_back( + {Edge{.x_start = rectangle.x_max, + .y_start = neighbor_start, + .size = previous_pos - neighbor_start}, + i}); + } + break; + case EdgePosition::BOTTOM: + if (neighbor_start < previous_pos) { + horizontal_edges_on_boundary.push_back( + {Edge{.x_start = neighbor_start, + .y_start = rectangle.y_min, + .size = previous_pos - neighbor_start}, + i}); + } + break; + case EdgePosition::TOP: + if (neighbor_start > previous_pos) { + horizontal_edges_on_boundary.push_back( + {Edge{.x_start = previous_pos, + .y_start = rectangle.y_max, + .size = neighbor_start - previous_pos}, + i}); + } + break; + } + if (n != box_neighbors.size()) { + previous_pos = GetClockwiseEnd(edge, *neighbor); + } + } + } + } +} + +// Trace a boundary (interior or exterior) that contains the edge described by +// starting_edge_position and starting_step_point. This method removes the edges +// that were added to the boundary from `segments_to_follow`. +ShapePath TraceBoundary( + const EdgePosition& starting_edge_position, + std::pair starting_step_point, + std::array, + std::pair>, + 4>& segments_to_follow) { + // The boundary is composed of edges on the `segments_to_follow` map. So all + // we need is to find and glue them together on the right order. + ShapePath path; + + auto extracted = + segments_to_follow[starting_edge_position].extract(starting_step_point); + CHECK(!extracted.empty()); + const int first_index = extracted.mapped().second; + + std::pair cur = starting_step_point; + int cur_index = first_index; + // Now we navigate from one edge to the next. To avoid going back, we remove + // used edges from the hash map. + while (true) { + path.step_points.push_back(cur); + + bool can_go[4] = {false, false, false, false}; + EdgePosition direction_to_take = EdgePosition::LEFT; + for (int edge_int = 0; edge_int < 4; ++edge_int) { + const EdgePosition edge = static_cast(edge_int); + if (segments_to_follow[edge].contains(cur)) { + can_go[edge] = true; + direction_to_take = edge; + } + } + + if (can_go == absl::Span{false, false, false, false}) { + // Cannot move anywhere, we closed the loop. + break; + } + + // Handle one pathological case. + if (can_go[EdgePosition::LEFT] && can_go[EdgePosition::RIGHT]) { + // Corner case (literally): + // ******** + // ******** + // ******** + // ******** + // ^ +++++++++ + // | +++++++++ + // | +++++++++ + // +++++++++ + // + // In this case we keep following the same box. + auto it_x = segments_to_follow[EdgePosition::LEFT].find(cur); + if (cur_index == it_x->second.second) { + direction_to_take = EdgePosition::LEFT; + } else { + direction_to_take = EdgePosition::RIGHT; + } + } else if (can_go[EdgePosition::TOP] && can_go[EdgePosition::BOTTOM]) { + auto it_y = segments_to_follow[EdgePosition::TOP].find(cur); + if (cur_index == it_y->second.second) { + direction_to_take = EdgePosition::TOP; + } else { + direction_to_take = EdgePosition::BOTTOM; + } + } + + auto extracted = segments_to_follow[direction_to_take].extract(cur); + cur_index = extracted.mapped().second; + switch (direction_to_take) { + case EdgePosition::LEFT: + cur.first -= extracted.mapped().first; + segments_to_follow[EdgePosition::RIGHT].erase( + cur); // Forbid going back + break; + case EdgePosition::RIGHT: + cur.first += extracted.mapped().first; + segments_to_follow[EdgePosition::LEFT].erase(cur); // Forbid going back + break; + case EdgePosition::TOP: + cur.second += extracted.mapped().first; + segments_to_follow[EdgePosition::BOTTOM].erase( + cur); // Forbid going back + break; + case EdgePosition::BOTTOM: + cur.second -= extracted.mapped().first; + segments_to_follow[EdgePosition::TOP].erase(cur); // Forbid going back + break; + } + path.touching_box_index.push_back(cur_index); + } + path.touching_box_index.push_back(cur_index); + + return path; +} +} // namespace + +std::vector BoxesToShapes(absl::Span rectangles, + const Neighbours& neighbours) { + std::vector> vertical_edges_on_boundary; + std::vector> horizontal_edges_on_boundary; + GetAllSegmentsTouchingVoid(rectangles, neighbours, vertical_edges_on_boundary, + horizontal_edges_on_boundary); + + std::array, + std::pair>, + 4> + segments_to_follow; + + for (const auto& [edge, box_index] : vertical_edges_on_boundary) { + segments_to_follow[EdgePosition::TOP][{edge.x_start, edge.y_start}] = { + edge.size, box_index}; + segments_to_follow[EdgePosition::BOTTOM][{ + edge.x_start, edge.y_start + edge.size}] = {edge.size, box_index}; + } + for (const auto& [edge, box_index] : horizontal_edges_on_boundary) { + segments_to_follow[EdgePosition::RIGHT][{edge.x_start, edge.y_start}] = { + edge.size, box_index}; + segments_to_follow[EdgePosition::LEFT][{ + edge.x_start + edge.size, edge.y_start}] = {edge.size, box_index}; + } + + const auto components = SplitInConnectedComponents(neighbours); + std::vector result(components.size()); + std::vector box_to_component(rectangles.size()); + for (int i = 0; i < components.size(); ++i) { + for (const int box_index : components[i]) { + box_to_component[box_index] = i; + } + } + while (!segments_to_follow[EdgePosition::LEFT].empty()) { + // Get edge most to the bottom left + const int box_index = + segments_to_follow[EdgePosition::RIGHT].begin()->second.second; + const std::pair starting_step_point = + segments_to_follow[EdgePosition::RIGHT].begin()->first; + const int component_index = box_to_component[box_index]; + + // The left-most vertical edge of the connected component must be of its + // exterior boundary. So we must always see the exterior boundary before + // seeing any holes. + const bool is_hole = !result[component_index].boundary.step_points.empty(); + ShapePath& path = is_hole ? result[component_index].holes.emplace_back() + : result[component_index].boundary; + path = TraceBoundary(EdgePosition::RIGHT, starting_step_point, + segments_to_follow); + if (is_hole) { + // Follow the usual convention that holes are in the inverse orientation + // of the external boundary. + absl::c_reverse(path.step_points); + absl::c_reverse(path.touching_box_index); + } + } + return result; +} + } // namespace sat } // namespace operations_research diff --git a/ortools/sat/2d_rectangle_presolve.h b/ortools/sat/2d_rectangle_presolve.h index d5cefb9c26..362f458ecf 100644 --- a/ortools/sat/2d_rectangle_presolve.h +++ b/ortools/sat/2d_rectangle_presolve.h @@ -14,10 +14,16 @@ #ifndef OR_TOOLS_SAT_2D_RECTANGLE_PRESOLVE_H_ #define OR_TOOLS_SAT_2D_RECTANGLE_PRESOLVE_H_ +#include +#include #include +#include "absl/algorithm/container.h" +#include "absl/container/flat_hash_map.h" +#include "absl/container/inlined_vector.h" #include "absl/types/span.h" #include "ortools/sat/diffn_util.h" +#include "ortools/sat/integer.h" namespace operations_research { namespace sat { @@ -44,6 +50,128 @@ bool PresolveFixed2dRectangles( bool ReduceNumberofBoxes(std::vector* mandatory_rectangles, std::vector* optional_rectangles); +enum EdgePosition { TOP = 0, RIGHT = 1, BOTTOM = 2, LEFT = 3 }; + +template +void AbslStringify(Sink& sink, EdgePosition e) { + switch (e) { + case EdgePosition::TOP: + sink.Append("TOP"); + break; + case EdgePosition::RIGHT: + sink.Append("RIGHT"); + break; + case EdgePosition::BOTTOM: + sink.Append("BOTTOM"); + break; + case EdgePosition::LEFT: + sink.Append("LEFT"); + break; + } +} + +// Given a set of non-overlapping rectangles, precompute a data-structure that +// allow for each rectangle to find the adjacent rectangle along an edge. +// +// Note that it only consider adjacent rectangles whose segments has a +// intersection of non-zero size. In particular, rectangles as following are not +// considered adjacent: +// +// ******** +// ******** +// ******** +// ******** +// +++++++++ +// +++++++++ +// +++++++++ +// +++++++++ +// +// Precondition: All rectangles must be disjoint. +class Neighbours { + public: + class CompareClockwise { + public: + explicit CompareClockwise(EdgePosition edge) : edge_(edge) {} + + bool operator()(const Rectangle& a, const Rectangle& b) const { + switch (edge_) { + case EdgePosition::BOTTOM: + return std::tie(a.x_min, a.x_max) > std::tie(b.x_min, b.x_max); + case EdgePosition::TOP: + return std::tie(a.x_min, a.x_max) < std::tie(b.x_min, b.x_max); + case EdgePosition::LEFT: + return std::tie(a.y_min, a.y_max) < std::tie(b.y_min, b.y_max); + case EdgePosition::RIGHT: + return std::tie(a.y_min, a.y_max) > std::tie(b.y_min, b.y_max); + } + } + EdgePosition edge_; + }; + + explicit Neighbours( + absl::Span rectangles, + absl::Span> neighbors) + : size_(rectangles.size()) { + for (const auto& [box_index, edge, neighbor] : neighbors) { + neighbors_[edge][box_index].push_back(neighbor); + } + for (int edge = 0; edge < 4; ++edge) { + for (auto& [box_index, neighbors] : neighbors_[edge]) { + absl::c_sort(neighbors, [&rectangles, edge](int a, int b) { + return CompareClockwise(static_cast(edge))( + rectangles[a], rectangles[b]); + }); + } + } + } + + int NumRectangles() const { return size_; } + + // Neighbors are sorted in the clockwise order. + absl::Span GetSortedNeighbors(int rectangle_index, + EdgePosition edge) const { + if (auto it = neighbors_[edge].find(rectangle_index); + it != neighbors_[edge].end()) { + return it->second; + } else { + return {}; + } + } + + private: + absl::flat_hash_map> neighbors_[4]; + int size_; +}; + +Neighbours BuildNeighboursGraph(absl::Span rectangles); + +std::vector> SplitInConnectedComponents( + const Neighbours& neighbours); + +// Generally, given a set of non-overlapping rectangles and a path that doesn't +// cross itself, the path can be cut into segments that touch only one single +// rectangle in the interior of the region delimited by the path. This struct +// holds a path cut into such segments. In particular, for the contour of an +// union of rectangles, the path is a subset of the union of all the rectangle's +// edges. +struct ShapePath { + // The two vectors should have exactly the same size. + std::vector> step_points; + // touching_box_index[i] contains the index of the unique interior rectangle + // touching the segment step_points[i]->step_points[(i+1)%size]. + std::vector touching_box_index; +}; + +struct SingleShape { + ShapePath boundary; + std::vector holes; +}; + +// Given a set of rectangles, split it into connected components and transform +// each individual set into a shape described by its boundary and holes paths. +std::vector BoxesToShapes(absl::Span rectangles, + const Neighbours& neighbours); + } // namespace sat } // namespace operations_research diff --git a/ortools/sat/2d_rectangle_presolve_test.cc b/ortools/sat/2d_rectangle_presolve_test.cc index bbe5a5bee6..0c47b13152 100644 --- a/ortools/sat/2d_rectangle_presolve_test.cc +++ b/ortools/sat/2d_rectangle_presolve_test.cc @@ -23,6 +23,7 @@ #include #include +#include "absl/algorithm/container.h" #include "absl/container/flat_hash_map.h" #include "absl/container/flat_hash_set.h" #include "absl/log/check.h" @@ -178,9 +179,11 @@ TEST(RectanglePresolve, RandomTest) { // Presolve the fixed items. PresolveFixed2dRectangles(input_in_range, &new_fixed_rectangles); - LOG(INFO) << "Presolved:\n" - << RenderDot(std::nullopt, fixed_rectangles) << "To:\n" - << RenderDot(std::nullopt, new_fixed_rectangles); + if (run == 0) { + LOG(INFO) << "Presolved:\n" + << RenderDot(std::nullopt, fixed_rectangles) << "To:\n" + << RenderDot(std::nullopt, new_fixed_rectangles); + } CHECK_LE(new_fixed_rectangles.size(), kFixedRectangleSize); @@ -323,160 +326,409 @@ TEST(BuildNeighboursGraphTest, RandomTest) { } } -ShapePath TraceBoundaryNaive( - std::pair starting_corner, - absl::Span rectangles) { - // First build a grid that tells by which box each 1x1 rectangle is occupied - // or -1 if empty. - constexpr int kBoundingBoxSize = 100; - std::vector> grid( - kBoundingBoxSize + 1, std::vector(kBoundingBoxSize + 1, -1)); +struct ContourPoint { + IntegerValue x; + IntegerValue y; + int next_box_index; + EdgePosition next_direction; - for (int n = 0; n < rectangles.size(); n++) { - const Rectangle& r = rectangles[n]; - CHECK_GE(r.x_min, 0); - CHECK_LE(r.x_max, kBoundingBoxSize); - CHECK_GE(r.y_min, 0); - CHECK_LE(r.y_max, kBoundingBoxSize); - for (IntegerValue i = r.x_min; i < r.x_max; i++) { - for (IntegerValue j = r.y_min; j < r.y_max; j++) { - grid[i.value()][j.value()] = n; + bool operator!=(const ContourPoint& other) const { + return x != other.x || y != other.y || + next_box_index != other.next_box_index || + next_direction != other.next_direction; + } +}; + +// This function runs in O(log N). +ContourPoint NextByClockwiseOrder(const ContourPoint& point, + absl::Span rectangles, + const Neighbours& neighbours) { + // This algorithm is very verbose, but it is about handling four cases. In the + // schema below, "-->" is the current direction, "X" the next point and + // the dashed arrow the next direction. + // + // Case 1: + // ++++++++ + // ^ ++++++++ + // : ++++++++ + // : ++++++++ + // ++++++++ + // ---> X ++++++++ + // ****************** + // ****************** + // ****************** + // ****************** + // + // Case 2: + // ^ ++++++++ + // : ++++++++ + // : ++++++++ + // ++++++++ + // ---> X ++++++++ + // *************++++++++ + // *************++++++++ + // ************* + // ************* + // + // Case 3: + // ---> X ...> + // *************++++++++ + // *************++++++++ + // *************++++++++ + // *************++++++++ + // + // Case 4: + // ---> X + // ************* : + // ************* : + // ************* : + // ************* \/ + ContourPoint result; + const Rectangle& cur_rectangle = rectangles[point.next_box_index]; + + EdgePosition cur_edge; + bool clockwise; + // Much of the code below need to know two things: in which direction we are + // going and what edge of which rectangle we are touching. For example, in the + // "Case 4" drawing above we are going RIGHT and touching the TOP edge of the + // current rectangle. This switch statement finds this `cur_edge`. + switch (point.next_direction) { + case EdgePosition::TOP: + if (cur_rectangle.x_max == point.x) { + cur_edge = EdgePosition::RIGHT; + clockwise = false; + } else { + cur_edge = EdgePosition::LEFT; + clockwise = true; } - } + break; + case EdgePosition::BOTTOM: + if (cur_rectangle.x_min == point.x) { + cur_edge = EdgePosition::LEFT; + clockwise = false; + } else { + cur_edge = EdgePosition::RIGHT; + clockwise = true; + } + break; + case EdgePosition::LEFT: + if (cur_rectangle.y_max == point.y) { + cur_edge = EdgePosition::TOP; + clockwise = false; + } else { + cur_edge = EdgePosition::BOTTOM; + clockwise = true; + } + break; + case EdgePosition::RIGHT: + if (cur_rectangle.y_min == point.y) { + cur_edge = EdgePosition::BOTTOM; + clockwise = false; + } else { + cur_edge = EdgePosition::TOP; + clockwise = true; + } + break; } - // Now collect all the boundary edges: an occupied cell that touches an - // unoccupied one. - absl::flat_hash_map, int> x_edges; - absl::flat_hash_map, int> y_edges; - for (int i = -1; i < kBoundingBoxSize; i++) { - for (int j = -1; j < kBoundingBoxSize; j++) { - if (i != -1) { - if ((j == -1 || grid[i][j] == -1) && grid[i][j + 1] != -1) { - x_edges[{i, j + 1}] = grid[i][j + 1]; - } - if (j != -1 && grid[i][j + 1] == -1 && grid[i][j] != -1) { - x_edges[{i, j + 1}] = grid[i][j]; - } - } - if (j != -1) { - if ((i == -1 || grid[i][j] == -1) && grid[i + 1][j] != -1) { - y_edges[{i + 1, j}] = grid[i + 1][j]; - } - if (i != -1 && grid[i + 1][j] == -1 && grid[i][j] != -1) { - y_edges[{i + 1, j}] = grid[i][j]; - } - } - } - } + // Test case 1. We need to find the next box after the current point in the + // edge we are following in the current direction. + const auto cur_edge_neighbors = + neighbours.GetSortedNeighbors(point.next_box_index, cur_edge); - ShapePath path; - std::pair cur = starting_corner; - int cur_index; - if (x_edges.contains(starting_corner)) { - cur_index = x_edges.at(starting_corner); - } else if (y_edges.contains(starting_corner)) { - cur_index = y_edges.at(starting_corner); - } else { - LOG(FATAL) << "Should not happen: {" << starting_corner.first << "," - << starting_corner.second << "} " - << RenderDot(std::nullopt, rectangles); - } - const int first_index = cur_index; - - auto is_aligned = [](const std::pair& p1, - const std::pair& p2, - const std::pair& p3) { - return ((p1.first == p2.first) == (p2.first == p3.first)) && - ((p1.second == p2.second) == (p2.second == p3.second)); - }; - - // Grow the path by a segment of size one. - const auto add_segment = - [&path, &is_aligned](const std::pair& segment, - int index) { - if (path.step_points.size() > 1 && - is_aligned(path.step_points[path.step_points.size() - 1], - path.step_points[path.step_points.size() - 2], - segment) && - path.touching_box_index.back() == index) { - path.step_points.back() = segment; + const Rectangle fake_box_for_lower_bound = { + .x_min = point.x, .x_max = point.x, .y_min = point.y, .y_max = point.y}; + const auto clockwise_cmp = Neighbours::CompareClockwise(cur_edge); + auto it = absl::c_lower_bound( + cur_edge_neighbors, -1, + [&fake_box_for_lower_bound, rectangles, clockwise_cmp, clockwise](int a, + int b) { + const Rectangle& rectangle_a = + (a == -1 ? fake_box_for_lower_bound : rectangles[a]); + const Rectangle& rectangle_b = + (b == -1 ? fake_box_for_lower_bound : rectangles[b]); + if (clockwise) { + return clockwise_cmp(rectangle_a, rectangle_b); } else { - if (!path.step_points.empty()) { - path.touching_box_index.push_back(index); - } - path.step_points.push_back(segment); + return clockwise_cmp(rectangle_b, rectangle_a); } - }; + }); - // Now we navigate from one edge to the next. To avoid going back, we remove - // used edges from the hash map. - do { - add_segment(cur, cur_index); - - // Find the next segment. - if (x_edges.contains({cur.first, cur.second}) && - x_edges.contains({cur.first - 1, cur.second}) && - !path.touching_box_index.empty()) { - // Corner case (literally): - // ******** - // ******** - // ******** - // ******** - // +++++++++ - // +++++++++ - // +++++++++ - // +++++++++ - // - // In this case we keep following the same box. - auto it_x = x_edges.find({cur.first, cur.second}); - if (cur_index == it_x->second) { - auto extract = x_edges.extract({cur.first, cur.second}); - cur = {cur.first + 1, cur.second}; - cur_index = extract.mapped(); - } else { - auto extract = x_edges.extract({cur.first - 1, cur.second}); - cur = extract.key(); - cur_index = extract.mapped(); - } - } else if (y_edges.contains({cur.first, cur.second}) && - y_edges.contains({cur.first, cur.second - 1}) && - !path.touching_box_index.empty()) { - auto it_y = y_edges.find({cur.first, cur.second}); - if (cur_index == it_y->second) { - auto extract = y_edges.extract({cur.first, cur.second}); - cur = {cur.first, cur.second + 1}; - cur_index = extract.mapped(); - } else { - auto extract = y_edges.extract({cur.first, cur.second - 1}); - cur = extract.key(); - cur_index = extract.mapped(); - } - } else if (auto extract = y_edges.extract({cur.first, cur.second}); - !extract.empty()) { - cur = {cur.first, cur.second + 1}; - cur_index = extract.mapped(); - } else if (auto extract = x_edges.extract({cur.first - 1, cur.second}); - !extract.empty()) { - cur = extract.key(); - cur_index = extract.mapped(); - } else if (auto extract = x_edges.extract({cur.first, cur.second}); - !extract.empty()) { - cur = {cur.first + 1, cur.second}; - cur_index = extract.mapped(); - } else if (auto extract = y_edges.extract({cur.first, cur.second - 1}); - !extract.empty()) { - cur = extract.key(); - cur_index = extract.mapped(); - } else { - LOG(FATAL) << "Should not happen: {" << cur.first << "," << cur.second - << "} " << RenderContour(std::nullopt, rectangles, path); + if (it != cur_edge_neighbors.end()) { + // We found box in the current edge. We are in case 1. + result.next_box_index = *it; + const Rectangle& next_rectangle = rectangles[*it]; + switch (point.next_direction) { + case EdgePosition::TOP: + result.x = point.x; + result.y = next_rectangle.y_min; + if (cur_edge == EdgePosition::LEFT) { + result.next_direction = EdgePosition::LEFT; + } else { + result.next_direction = EdgePosition::RIGHT; + } + break; + case EdgePosition::BOTTOM: + result.x = point.x; + result.y = next_rectangle.y_max; + if (cur_edge == EdgePosition::LEFT) { + result.next_direction = EdgePosition::LEFT; + } else { + result.next_direction = EdgePosition::RIGHT; + } + break; + case EdgePosition::LEFT: + result.y = point.y; + result.x = next_rectangle.x_max; + if (cur_edge == EdgePosition::TOP) { + result.next_direction = EdgePosition::TOP; + } else { + result.next_direction = EdgePosition::BOTTOM; + } + break; + case EdgePosition::RIGHT: + result.y = point.y; + result.x = next_rectangle.x_min; + if (cur_edge == EdgePosition::TOP) { + result.next_direction = EdgePosition::TOP; + } else { + result.next_direction = EdgePosition::BOTTOM; + } + break; } - } while (cur != starting_corner); + return result; + } - add_segment(cur, cur_index); - path.touching_box_index.push_back(first_index); - return path; + // We now know we are not in Case 1, so know the next (x, y) position: it is + // the corner of the current rectangle in the direction we are going. + switch (point.next_direction) { + case EdgePosition::TOP: + result.x = point.x; + result.y = cur_rectangle.y_max; + break; + case EdgePosition::BOTTOM: + result.x = point.x; + result.y = cur_rectangle.y_min; + break; + case EdgePosition::LEFT: + result.x = cur_rectangle.x_min; + result.y = point.y; + break; + case EdgePosition::RIGHT: + result.x = cur_rectangle.x_max; + result.y = point.y; + break; + } + + // Case 2 and 3. + const auto next_edge_neighbors = + neighbours.GetSortedNeighbors(point.next_box_index, point.next_direction); + if (!next_edge_neighbors.empty()) { + // We are looking for the neighbor on the edge of the current box. + const int candidate_index = + clockwise ? next_edge_neighbors.front() : next_edge_neighbors.back(); + const Rectangle& next_rectangle = rectangles[candidate_index]; + switch (point.next_direction) { + case EdgePosition::TOP: + case EdgePosition::BOTTOM: + if (next_rectangle.x_min < point.x && point.x < next_rectangle.x_max) { + // Case 2 + result.next_box_index = candidate_index; + if (cur_edge == EdgePosition::LEFT) { + result.next_direction = EdgePosition::LEFT; + } else { + result.next_direction = EdgePosition::RIGHT; + } + return result; + } else if (next_rectangle.x_min == point.x && + cur_edge == EdgePosition::LEFT) { + // Case 3 + result.next_box_index = candidate_index; + result.next_direction = point.next_direction; + return result; + } else if (next_rectangle.x_max == point.x && + cur_edge == EdgePosition::RIGHT) { + // Case 3 + result.next_box_index = candidate_index; + result.next_direction = point.next_direction; + return result; + } + break; + case EdgePosition::LEFT: + case EdgePosition::RIGHT: + if (next_rectangle.y_min < point.y && point.y < next_rectangle.y_max) { + result.next_box_index = candidate_index; + if (cur_edge == EdgePosition::TOP) { + result.next_direction = EdgePosition::TOP; + } else { + result.next_direction = EdgePosition::BOTTOM; + } + return result; + } else if (next_rectangle.y_max == point.y && + cur_edge == EdgePosition::TOP) { + result.next_box_index = candidate_index; + result.next_direction = point.next_direction; + return result; + } else if (next_rectangle.y_min == point.y && + cur_edge == EdgePosition::BOTTOM) { + result.next_box_index = candidate_index; + result.next_direction = point.next_direction; + return result; + } + break; + } + } + + // Now we must be in the case 4. + result.next_box_index = point.next_box_index; + switch (point.next_direction) { + case EdgePosition::TOP: + case EdgePosition::BOTTOM: + if (cur_edge == EdgePosition::LEFT) { + result.next_direction = EdgePosition::RIGHT; + } else { + result.next_direction = EdgePosition::LEFT; + } + break; + case EdgePosition::LEFT: + case EdgePosition::RIGHT: + if (cur_edge == EdgePosition::TOP) { + result.next_direction = EdgePosition::BOTTOM; + } else { + result.next_direction = EdgePosition::TOP; + } + break; + } + return result; +} + +// Returns a path delimiting a boundary of the union of a set of rectangles. It +// should work for both the exterior boundary and the boundaries of the holes +// inside the union. The path will start on `starting_point` and follow the +// boundary on clockwise order. +// +// `starting_point` should be a point in the boundary and `starting_box_index` +// the index of a rectangle with one edge containing `starting_point`. +// +// The resulting `path` satisfy: +// - path.step_points.front() == path.step_points.back() == starting_point +// - path.touching_box_index.front() == path.touching_box_index.back() == +// == starting_box_index +// +ShapePath TraceBoundary( + const std::pair& starting_step_point, + int starting_box_index, absl::Span rectangles, + const Neighbours& neighbours) { + // First find which direction we need to go to follow the border in the + // clockwise order. + const Rectangle& initial_rec = rectangles[starting_box_index]; + bool touching_edge[4]; + touching_edge[EdgePosition::LEFT] = + initial_rec.x_min == starting_step_point.first; + touching_edge[EdgePosition::RIGHT] = + initial_rec.x_max == starting_step_point.first; + touching_edge[EdgePosition::TOP] = + initial_rec.y_max == starting_step_point.second; + touching_edge[EdgePosition::BOTTOM] = + initial_rec.y_min == starting_step_point.second; + + EdgePosition next_direction; + if (touching_edge[EdgePosition::LEFT]) { + if (touching_edge[EdgePosition::TOP]) { + next_direction = EdgePosition::RIGHT; + } else { + next_direction = EdgePosition::TOP; + } + } else if (touching_edge[EdgePosition::RIGHT]) { + if (touching_edge[EdgePosition::BOTTOM]) { + next_direction = EdgePosition::LEFT; + } else { + next_direction = EdgePosition::BOTTOM; + } + } else if (touching_edge[EdgePosition::TOP]) { + next_direction = EdgePosition::LEFT; + } else if (touching_edge[EdgePosition::BOTTOM]) { + next_direction = EdgePosition::RIGHT; + } else { + LOG(FATAL) + << "TraceBoundary() got a `starting_step_point` that is not in an edge " + "of the rectangle of `starting_box_index`. This is not allowed."; + } + const ContourPoint starting_point = {.x = starting_step_point.first, + .y = starting_step_point.second, + .next_box_index = starting_box_index, + .next_direction = next_direction}; + ShapePath result; + for (ContourPoint point = starting_point; true; + point = NextByClockwiseOrder(point, rectangles, neighbours)) { + if (result.step_points.size() > 3 && + result.step_points.back() == result.step_points.front() && + point.x == result.step_points[1].first && + point.y == result.step_points[1].second) { + break; + } + if (!result.step_points.empty() && + point.x == result.step_points.back().first && + point.y == result.step_points.back().second) { + // There is a special corner-case of the algorithm using the neighbours. + // Consider the following set-up: + // + // ******** | + // ******** | + // ******** +----> + // ########++++++++ + // ########++++++++ + // ########++++++++ + // + // In this case, the only way the algorithm could reach the "+" box is via + // the "#" box, but which is doesn't contribute to the path. The algorithm + // returns a technically correct zero-size interval, which might be useful + // for callers that want to count the "#" box as visited, but this is not + // our case. + result.touching_box_index.back() = point.next_box_index; + } else { + result.touching_box_index.push_back(point.next_box_index); + result.step_points.push_back({point.x, point.y}); + } + } + return result; +} + +std::string RenderShapes(std::optional bb, + absl::Span rectangles, + const std::vector& shapes) { + const std::vector colors = {"black", "white", "orange", + "cyan", "yellow", "purple"}; + std::stringstream ss; + ss << " edge[headclip=false, tailclip=false, penwidth=40];\n"; + int count = 0; + for (int i = 0; i < shapes.size(); ++i) { + std::string_view shape_color = colors[i % colors.size()]; + for (int j = 0; j < shapes[i].boundary.step_points.size(); ++j) { + std::pair p = + shapes[i].boundary.step_points[j]; + ss << " p" << count << "[pos=\"" << 2 * p.first << "," << 2 * p.second + << "!\" shape=point]\n"; + if (j != shapes[i].boundary.step_points.size() - 1) { + ss << " p" << count << "->p" << count + 1 << " [color=\"" + << shape_color << "\"];\n"; + } + ++count; + } + for (const ShapePath& hole : shapes[i].holes) { + for (int j = 0; j < hole.step_points.size(); ++j) { + std::pair p = hole.step_points[j]; + ss << " p" << count << "[pos=\"" << 2 * p.first << "," << 2 * p.second + << "!\" shape=point]\n"; + if (j != hole.step_points.size() - 1) { + ss << " p" << count << "->p" << count + 1 << " [color=\"" + << shape_color << "\", penwidth=20];\n"; + } + ++count; + } + } + } + return RenderDot(bb, rectangles, ss.str()); } TEST(ContourTest, Random) { @@ -514,29 +766,25 @@ TEST(ContourTest, Random) { } } - const ShapePath shape = - TraceBoundary(min_coord, min_index, fixed_rectangles, neighbours); - absl::flat_hash_set seen; - std::vector component; - std::vector index_map(input.size()); - for (const int box_index : components[0]) { - component.push_back(fixed_rectangles[box_index]); - index_map[box_index] = component.size() - 1; + auto s = BoxesToShapes(fixed_rectangles, neighbours); + for (int i = 0; i < s.size(); ++i) { + const ShapePath& shape = s[i].boundary; + const ShapePath expected_shape = + TraceBoundary(shape.step_points[0], shape.touching_box_index[0], + fixed_rectangles, neighbours); + if (shape.step_points != expected_shape.step_points) { + LOG(ERROR) << "Fast algo:\n" + << RenderContour(bb, fixed_rectangles, shape); + LOG(ERROR) << "Naive algo:\n" + << RenderContour(bb, fixed_rectangles, expected_shape); + LOG(FATAL) << "Found different solutions between naive and fast algo!"; + } + EXPECT_EQ(shape.step_points, expected_shape.step_points); + EXPECT_EQ(shape.touching_box_index, expected_shape.touching_box_index); } - const ShapePath expected_shape = - TraceBoundaryNaive(shape.step_points[0], component); - if (shape.step_points != expected_shape.step_points) { - LOG(ERROR) << "Fast algo:\n" - << RenderContour(bb, fixed_rectangles, shape); - LOG(ERROR) << "Naive algo:\n" - << RenderContour(bb, component, expected_shape); - LOG(FATAL) << "Found different solutions between naive and fast algo!"; - } - EXPECT_EQ(shape.step_points, expected_shape.step_points); - for (int i = 0; i < shape.step_points.size(); ++i) { - EXPECT_EQ(index_map[shape.touching_box_index[i]], - expected_shape.touching_box_index[i]); + if (run == 0) { + LOG(INFO) << RenderShapes(bb, fixed_rectangles, s); } } } diff --git a/ortools/sat/BUILD.bazel b/ortools/sat/BUILD.bazel index ee14f5a97d..87ac5cd71e 100644 --- a/ortools/sat/BUILD.bazel +++ b/ortools/sat/BUILD.bazel @@ -346,7 +346,6 @@ cc_library( ":clause", ":cp_model_cc_proto", ":cp_model_checker", - ":cp_model_lns", ":cp_model_loader", ":cp_model_mapping", ":cp_model_postsolve", @@ -788,10 +787,8 @@ cc_library( ":util", "//ortools/base", "//ortools/base:stl_util", - "//ortools/base:types", "//ortools/port:proto_utils", "//ortools/util:logging", - "//ortools/util:saturated_arithmetic", "//ortools/util:sorted_interval_list", "@com_google_absl//absl/container:btree", "@com_google_absl//absl/container:flat_hash_map", @@ -800,6 +797,7 @@ cc_library( "@com_google_absl//absl/log:check", "@com_google_absl//absl/meta:type_traits", "@com_google_absl//absl/strings", + "@com_google_absl//absl/types:span", "@com_google_protobuf//:protobuf", ], ) @@ -2536,6 +2534,7 @@ cc_library( "//ortools/base:stl_util", "//ortools/graph:strongly_connected_components", "@com_google_absl//absl/algorithm:container", + "@com_google_absl//absl/container:btree", "@com_google_absl//absl/container:flat_hash_map", "@com_google_absl//absl/container:flat_hash_set", "@com_google_absl//absl/container:inlined_vector", @@ -2555,6 +2554,7 @@ cc_test( ":diffn_util", ":integer", "//ortools/base:gmock_main", + "@com_google_absl//absl/algorithm:container", "@com_google_absl//absl/container:flat_hash_map", "@com_google_absl//absl/container:flat_hash_set", "@com_google_absl//absl/log", diff --git a/ortools/sat/clause.cc b/ortools/sat/clause.cc index 1b952d62bb..fca9d35be0 100644 --- a/ortools/sat/clause.cc +++ b/ortools/sat/clause.cc @@ -500,6 +500,7 @@ void ClauseManager::DeleteRemovedClauses() { void BinaryImplicationGraph::Resize(int num_variables) { SCOPED_TIME_STAT(&stats_); + bfs_stack_.resize(num_variables << 1); implications_.resize(num_variables << 1); implies_something_.resize(num_variables << 1); might_have_dups_.resize(num_variables << 1); @@ -948,7 +949,7 @@ void BinaryImplicationGraph::MinimizeConflictFirst( for (const LiteralIndex i : is_marked_.PositionsSetAtLeastOnce()) { // TODO(user): if this is false, then we actually have a conflict of size 2. // This can only happen if the binary clause was not propagated properly - // if for instance we do chronological bactracking without re-enqueing the + // if for instance we do chronological bactracking without re-enqueuing the // consequence of a binary clause. if (trail.Assignment().LiteralIsTrue(Literal(i))) { marked->Set(Literal(i).Variable()); @@ -2053,9 +2054,7 @@ BinaryImplicationGraph::HeuristicAmoPartition(std::vector* literals) { } void BinaryImplicationGraph::MarkDescendants(Literal root) { - bfs_stack_.resize(implications_.size()); auto* const stack = bfs_stack_.data(); - const int amo_size = static_cast(at_most_ones_.size()); auto is_marked = is_marked_.const_view(); auto is_redundant = is_redundant_.const_view(); if (is_redundant[root]) return; @@ -2063,6 +2062,7 @@ void BinaryImplicationGraph::MarkDescendants(Literal root) { int stack_size = 1; stack[0] = root; is_marked_.Set(root); + const int amo_size = static_cast(at_most_ones_.size()); for (int j = 0; j < stack_size; ++j) { const Literal current = stack[j]; if (!implies_something_[current]) continue; @@ -2094,8 +2094,8 @@ std::vector BinaryImplicationGraph::ExpandAtMostOne( std::vector clique(at_most_one.begin(), at_most_one.end()); // Optim. - for (int i = 0; i < clique.size(); ++i) { - if (implications_[clique[i]].empty() || is_redundant_[clique[i]]) { + for (const Literal l : clique) { + if (implications_[l].empty() || is_redundant_[l]) { return clique; } } diff --git a/ortools/sat/cp_model.cc b/ortools/sat/cp_model.cc index 94e097d4f8..392db732ea 100644 --- a/ortools/sat/cp_model.cc +++ b/ortools/sat/cp_model.cc @@ -763,9 +763,8 @@ void CpModelBuilder::FixVariable(BoolVar var, bool value) { Constraint CpModelBuilder::AddBoolOr(absl::Span literals) { ConstraintProto* const proto = cp_model_.add_constraints(); - for (const BoolVar& lit : literals) { - proto->mutable_bool_or()->add_literals(lit.index_); - } + BoolArgumentProto* const bool_or = proto->mutable_bool_or(); + for (const BoolVar& lit : literals) bool_or->add_literals(lit.index_); return Constraint(proto); } @@ -783,9 +782,8 @@ Constraint CpModelBuilder::AddAtMostOne(absl::Span literals) { Constraint CpModelBuilder::AddExactlyOne(absl::Span literals) { ConstraintProto* const proto = cp_model_.add_constraints(); - for (const BoolVar& lit : literals) { - proto->mutable_exactly_one()->add_literals(lit.index_); - } + BoolArgumentProto* const exactly_one = proto->mutable_exactly_one(); + for (const BoolVar& lit : literals) exactly_one->add_literals(lit.index_); return Constraint(proto); } diff --git a/ortools/sat/cp_model_expand.cc b/ortools/sat/cp_model_expand.cc index 53d0b3f60e..e9cceb9d4d 100644 --- a/ortools/sat/cp_model_expand.cc +++ b/ortools/sat/cp_model_expand.cc @@ -29,6 +29,7 @@ #include "absl/log/check.h" #include "absl/meta/type_traits.h" #include "absl/strings/str_cat.h" +#include "absl/types/span.h" #include "google/protobuf/message.h" #include "ortools/base/logging.h" #include "ortools/base/stl_util.h" @@ -126,7 +127,7 @@ void ExpandReservoirUsingCircuit(int64_t sum_of_positive_demand, { circuit->add_tails(num_events); circuit->add_heads(num_events); - circuit->add_literals(context->NewBoolVar()); + circuit->add_literals(context->NewBoolVar("reservoir expansion")); } for (int i = 0; i < num_events; ++i) { @@ -141,7 +142,7 @@ void ExpandReservoirUsingCircuit(int64_t sum_of_positive_demand, // We use the available index 'num_events'. { // Circuit starts at i, level_vars[i] == demand_expr[i]. - const int start_var = context->NewBoolVar(); + const int start_var = context->NewBoolVar("reservoir expansion"); circuit->add_tails(num_events); circuit->add_heads(i); circuit->add_literals(start_var); @@ -163,7 +164,7 @@ void ExpandReservoirUsingCircuit(int64_t sum_of_positive_demand, // Circuit ends at i, no extra constraint there. circuit->add_tails(i); circuit->add_heads(num_events); - circuit->add_literals(context->NewBoolVar()); + circuit->add_literals(context->NewBoolVar("reservoir expansion")); } for (int j = 0; j < num_events; ++j) { @@ -179,7 +180,7 @@ void ExpandReservoirUsingCircuit(int64_t sum_of_positive_demand, // reservoir except if the set of time point is exactly the same! // otherwise if we miss one, then A "after" B in one circuit do not // implies that there is no C in between in another! - const int arc_i_j = context->NewBoolVar(); + const int arc_i_j = context->NewBoolVar("reservoir expansion"); circuit->add_tails(i); circuit->add_heads(j); circuit->add_literals(arc_i_j); @@ -755,13 +756,13 @@ void ExpandLinMax(ConstraintProto* ct, PresolveContext* context) { std::vector enforcement_literals; enforcement_literals.reserve(num_exprs); if (num_exprs == 2) { - const int new_bool = context->NewBoolVar(); + const int new_bool = context->NewBoolVar("lin max expansion"); enforcement_literals.push_back(new_bool); enforcement_literals.push_back(NegatedRef(new_bool)); } else { ConstraintProto* exactly_one = context->working_model->add_constraints(); for (int i = 0; i < num_exprs; ++i) { - const int new_bool = context->NewBoolVar(); + const int new_bool = context->NewBoolVar("lin max expansion"); exactly_one->mutable_exactly_one()->add_literals(new_bool); enforcement_literals.push_back(new_bool); } @@ -1194,7 +1195,7 @@ void ExpandAutomaton(ConstraintProto* ct, PresolveContext* context) { out_encoding.clear(); if (states.size() == 2) { - const int var = context->NewBoolVar(); + const int var = context->NewBoolVar("automaton expansion"); out_encoding[states[0]] = var; out_encoding[states[1]] = NegatedRef(var); } else if (states.size() > 2) { @@ -1243,7 +1244,7 @@ void ExpandAutomaton(ConstraintProto* ct, PresolveContext* context) { } } - out_encoding[state] = context->NewBoolVar(); + out_encoding[state] = context->NewBoolVar("automaton expansion"); } } } @@ -1302,7 +1303,7 @@ void ExpandAutomaton(ConstraintProto* ct, PresolveContext* context) { // expand this small table with 3 columns (i.e. compress, negate, etc...). std::vector tuple_literals; if (num_tuples == 2) { - const int bool_var = context->NewBoolVar(); + const int bool_var = context->NewBoolVar("automaton expansion"); tuple_literals.push_back(bool_var); tuple_literals.push_back(NegatedRef(bool_var)); } else { @@ -1320,7 +1321,7 @@ void ExpandAutomaton(ConstraintProto* ct, PresolveContext* context) { } else if (out_count[out_states[i]] == 1 && !out_encoding.empty()) { tuple_literal = out_encoding[out_states[i]]; } else { - tuple_literal = context->NewBoolVar(); + tuple_literal = context->NewBoolVar("automaton expansion"); } tuple_literals.push_back(tuple_literal); @@ -1818,7 +1819,7 @@ void CompressAndExpandPositiveTable(ConstraintProto* ct, if (ct->enforcement_literal().size() == 1) { table_is_active_literal = ct->enforcement_literal(0); } else if (ct->enforcement_literal().size() > 1) { - table_is_active_literal = context->NewBoolVar(); + table_is_active_literal = context->NewBoolVar("table expansion"); // Adds table_is_active <=> and(enforcement_literals). BoolArgumentProto* bool_or = @@ -1850,7 +1851,7 @@ void CompressAndExpandPositiveTable(ConstraintProto* ct, break; } if (create_new_var) { - tuple_literals[i] = context->NewBoolVar(); + tuple_literals[i] = context->NewBoolVar("table expansion"); } exactly_one->add_literals(tuple_literals[i]); } @@ -2134,6 +2135,36 @@ void ExpandComplexLinearConstraint(int c, ConstraintProto* ct, if (ct->linear().domain().size() <= 2) return; if (ct->linear().vars().size() == 1) return; + // If we have a hint for all variables of this linear constraint, finds in + // which bucket it fall. + int hint_bucket = -1; + bool set_hint_of_bucket_variables = false; + if (context->HintIsLoaded()) { + set_hint_of_bucket_variables = true; + int64_t hint_activity = 0; + const int num_terms = ct->linear().vars().size(); + const absl::Span hint = context->SolutionHint(); + for (int i = 0; i < num_terms; ++i) { + const int var = ct->linear().vars(i); + DCHECK_LT(var, hint.size()); + if (!context->VarHasSolutionHint(var)) { + set_hint_of_bucket_variables = false; + break; + } + hint_activity += ct->linear().coeffs(i) * hint[var]; + } + if (set_hint_of_bucket_variables) { + for (int i = 0; i < ct->linear().domain_size(); i += 2) { + const int64_t lb = ct->linear().domain(i); + const int64_t ub = ct->linear().domain(i + 1); + if (hint_activity >= lb && hint_activity <= ub) { + hint_bucket = i; + break; + } + } + } + } + const SatParameters& params = context->params(); if (params.encode_complex_linear_constraint_with_integer()) { // Integer encoding. @@ -2155,7 +2186,10 @@ void ExpandComplexLinearConstraint(int c, ConstraintProto* ct, if (ct->enforcement_literal().empty() && ct->linear().domain_size() == 4) { // We cover the special case of no enforcement and two choices by creating // a single Boolean. - single_bool = context->NewBoolVar(); + single_bool = context->NewBoolVar("complex linear expansion"); + if (set_hint_of_bucket_variables) { + context->SetNewVariableHint(single_bool, hint_bucket == 0); + } } else { clause = context->working_model->add_constraints()->mutable_bool_or(); for (const int ref : ct->enforcement_literal()) { @@ -2173,7 +2207,10 @@ void ExpandComplexLinearConstraint(int c, ConstraintProto* ct, int subdomain_literal; if (clause != nullptr) { - subdomain_literal = context->NewBoolVar(); + subdomain_literal = context->NewBoolVar("complex linear expansion"); + if (set_hint_of_bucket_variables) { + context->SetNewVariableHint(subdomain_literal, hint_bucket == i); + } clause->add_literals(subdomain_literal); domain_literals.push_back(subdomain_literal); } else { @@ -2196,7 +2233,7 @@ void ExpandComplexLinearConstraint(int c, ConstraintProto* ct, if (enforcement_literals.size() == 1) { linear_is_enforced = enforcement_literals[0]; } else { - linear_is_enforced = context->NewBoolVar(); + linear_is_enforced = context->NewBoolVar("complex linear expansion"); BoolArgumentProto* maintain_linear_is_enforced = context->working_model->add_constraints()->mutable_bool_or(); for (const int e_lit : enforcement_literals) { diff --git a/ortools/sat/cp_model_lns.cc b/ortools/sat/cp_model_lns.cc index 7ec0ff90c7..5e8104a47f 100644 --- a/ortools/sat/cp_model_lns.cc +++ b/ortools/sat/cp_model_lns.cc @@ -44,6 +44,7 @@ #include "ortools/sat/cp_model.pb.h" #include "ortools/sat/cp_model_mapping.h" #include "ortools/sat/cp_model_presolve.h" +#include "ortools/sat/cp_model_solver_helpers.h" #include "ortools/sat/cp_model_utils.h" #include "ortools/sat/integer.h" #include "ortools/sat/linear_constraint_manager.h" @@ -1278,18 +1279,18 @@ void GetRandomSubset(double relative_size, std::vector* base, } // namespace Neighborhood RelaxRandomVariablesGenerator::Generate( - const CpSolverResponse& initial_solution, double difficulty, + const CpSolverResponse& initial_solution, SolveData& data, absl::BitGenRef random) { std::vector fixed_variables = helper_.ActiveVariables(); - GetRandomSubset(1.0 - difficulty, &fixed_variables, random); + GetRandomSubset(1.0 - data.difficulty, &fixed_variables, random); return helper_.FixGivenVariables( initial_solution, {fixed_variables.begin(), fixed_variables.end()}); } Neighborhood RelaxRandomConstraintsGenerator::Generate( - const CpSolverResponse& initial_solution, double difficulty, + const CpSolverResponse& initial_solution, SolveData& data, absl::BitGenRef random) { - if (helper_.DifficultyMeansFullNeighborhood(difficulty)) { + if (helper_.DifficultyMeansFullNeighborhood(data.difficulty)) { return helper_.FullNeighborhood(); } @@ -1308,7 +1309,7 @@ Neighborhood RelaxRandomConstraintsGenerator::Generate( const int num_active_vars = helper_.ActiveVariablesWhileHoldingLock().size(); - const int target_size = std::ceil(difficulty * num_active_vars); + const int target_size = std::ceil(data.difficulty * num_active_vars); if (target_size == num_active_vars) return helper_.FullNeighborhood(); // TODO(user): Clean-up when target_size == 0. @@ -1333,7 +1334,7 @@ Neighborhood RelaxRandomConstraintsGenerator::Generate( // Note that even if difficulty means full neighborhood, we go through the // generation process to never get out of a connected components. Neighborhood VariableGraphNeighborhoodGenerator::Generate( - const CpSolverResponse& initial_solution, double difficulty, + const CpSolverResponse& initial_solution, SolveData& data, absl::BitGenRef random) { const int num_model_vars = helper_.ModelProto().variables_size(); std::vector visited_variables_set(num_model_vars, false); @@ -1354,7 +1355,7 @@ Neighborhood VariableGraphNeighborhoodGenerator::Generate( helper_.ActiveVariablesWhileHoldingLock().size(); const int num_objective_variables = helper_.ActiveObjectiveVariablesWhileHoldingLock().size(); - const int target_size = std::ceil(difficulty * num_active_vars); + const int target_size = std::ceil(data.difficulty * num_active_vars); if (target_size == num_active_vars) return helper_.FullNeighborhood(); const int first_var = @@ -1403,7 +1404,7 @@ Neighborhood VariableGraphNeighborhoodGenerator::Generate( // Note that even if difficulty means full neighborhood, we go through the // generation process to never get out of a connected components. Neighborhood ArcGraphNeighborhoodGenerator::Generate( - const CpSolverResponse& initial_solution, double difficulty, + const CpSolverResponse& initial_solution, SolveData& data, absl::BitGenRef random) { const int num_model_vars = helper_.ModelProto().variables_size(); if (num_model_vars == 0) return helper_.NoNeighborhood(); @@ -1424,7 +1425,7 @@ Neighborhood ArcGraphNeighborhoodGenerator::Generate( vars_to_constraints = helper_.VarToConstraint(); } - const int target_size = std::ceil(difficulty * num_active_vars); + const int target_size = std::ceil(data.difficulty * num_active_vars); if (target_size == 0) return helper_.NoNeighborhood(); // We pick a variable from the objective. @@ -1488,7 +1489,7 @@ Neighborhood ArcGraphNeighborhoodGenerator::Generate( // Note that even if difficulty means full neighborhood, we go through the // generation process to never get out of a connected components. Neighborhood ConstraintGraphNeighborhoodGenerator::Generate( - const CpSolverResponse& initial_solution, double difficulty, + const CpSolverResponse& initial_solution, SolveData& data, absl::BitGenRef random) { const int num_model_constraints = helper_.ModelProto().constraints_size(); if (num_model_constraints == 0) { @@ -1507,7 +1508,7 @@ Neighborhood ConstraintGraphNeighborhoodGenerator::Generate( absl::ReaderMutexLock graph_lock(&helper_.graph_mutex_); const int num_active_vars = helper_.ActiveVariablesWhileHoldingLock().size(); - const int target_size = std::ceil(difficulty * num_active_vars); + const int target_size = std::ceil(data.difficulty * num_active_vars); if (target_size == num_active_vars) return helper_.FullNeighborhood(); // Start by a random constraint. @@ -1556,7 +1557,7 @@ Neighborhood ConstraintGraphNeighborhoodGenerator::Generate( } Neighborhood DecompositionGraphNeighborhoodGenerator::Generate( - const CpSolverResponse& initial_solution, double difficulty, + const CpSolverResponse& initial_solution, SolveData& data, absl::BitGenRef random) { int max_width = 0; int size_at_min_width_after_100; @@ -1572,7 +1573,7 @@ Neighborhood DecompositionGraphNeighborhoodGenerator::Generate( const int num_active_vars = helper_.ActiveVariablesWhileHoldingLock().size(); - const int target_size = std::ceil(difficulty * num_active_vars); + const int target_size = std::ceil(data.difficulty * num_active_vars); if (target_size == num_active_vars) return helper_.FullNeighborhood(); const int num_vars = helper_.VarToConstraint().size(); @@ -1721,94 +1722,209 @@ Neighborhood DecompositionGraphNeighborhoodGenerator::Generate( namespace { -// Given a (sub)set of binary variables and their initial solution values, -// returns a local branching constraint over these variables, that is: -// sum_{i : s[i] == 0} x_i + sum_{i : s[i] == 1} (1 - x_i) <= k -// where s is the initial solution and k is the neighborhood size. Requires all -// variables and initial solution values to be binary. -ConstraintProto LocalBranchingConstraint( - const std::vector& variable_indices, - const std::vector& initial_solution, const int neighborhood_size) { - DCHECK_EQ(variable_indices.size(), initial_solution.size()); - DCHECK_GE(neighborhood_size, 0); - ConstraintProto local_branching_constraint; - local_branching_constraint.set_name("local_branching"); - LinearConstraintProto* linear = local_branching_constraint.mutable_linear(); - int lhs_constant_value = 0; - for (int i = 0; i < variable_indices.size(); ++i) { - if (initial_solution[i] == 0) { - linear->add_coeffs(1); - linear->add_vars(variable_indices[i]); - } else { - DCHECK_EQ(initial_solution[i], 1); - linear->add_coeffs(-1); - linear->add_vars(variable_indices[i]); - lhs_constant_value++; - } +// Create a constraint sum (X - LB) + sum (UB - X) <= rhs. +ConstraintProto DistanceToBoundsSmallerThanConstraint( + const std::vector>& dist_to_lower_bound, + const std::vector>& dist_to_upper_bound, + const int64_t rhs) { + DCHECK_GE(rhs, 0); + ConstraintProto new_constraint; + LinearConstraintProto* linear = new_constraint.mutable_linear(); + int64_t lhs_constant_value = 0; + for (const auto [var, lb] : dist_to_lower_bound) { + // We add X - LB + linear->add_coeffs(1); + linear->add_vars(var); + lhs_constant_value -= lb; } - linear->add_domain(-lhs_constant_value); - linear->add_domain(-lhs_constant_value + neighborhood_size); - return local_branching_constraint; + for (const auto [var, ub] : dist_to_upper_bound) { + // We add UB - X + lhs_constant_value += ub; + linear->add_coeffs(-1); + linear->add_vars(var); + } + linear->add_domain(std::numeric_limits::min()); + linear->add_domain(rhs - lhs_constant_value); + return new_constraint; } } // namespace Neighborhood LocalBranchingLpBasedNeighborhoodGenerator::Generate( - const CpSolverResponse& initial_solution, double difficulty, + const CpSolverResponse& initial_solution, SolveData& data, absl::BitGenRef random) { - std::vector active_variables = helper_.ActiveVariables(); + const std::vector active_variables = helper_.ActiveVariables(); + if (active_variables.empty()) return helper_.NoNeighborhood(); - // Collect active binary variables and corresponding initial solution values. - // TODO(user): Extend to integer variables. - std::vector binary_var_indices; - std::vector non_binary_var_indices; - std::vector binary_var_initial_solution; - for (const int active_var_index : active_variables) { - const IntegerVariableProto& var = - helper_.ModelProto().variables(active_var_index); - if (var.domain_size() == 2 && var.domain(0) == 0 && var.domain(1) == 1) { - binary_var_indices.push_back(active_var_index); - binary_var_initial_solution.push_back( - initial_solution.solution(active_var_index)); - } else { - non_binary_var_indices.push_back(active_var_index); + { + // Quick corner case in case the difficulty is too high. This is mainly + // useful when testing with only that kind of LNS to abort early on + // super-easy problems. + const int size = active_variables.size(); + if (static_cast(std::ceil(data.difficulty * size)) == size) { + return helper_.FullNeighborhood(); } } - if (binary_var_indices.empty()) { + + // These are candidate for relaxation. The score will be filled later. Active + // variable not kept in candidate will be added to other_variables. + std::vector> candidates_with_score; + std::vector other_variables; + + // Our extra relaxation constraint will be: sums of distance to the respective + // bound smaller than a constant that depends on the difficulty. + std::vector> dist_to_lower_bound; + std::vector> dist_to_upper_bound; + + // For the "easy" part of the extra constraint, we either look only at the + // binary variables. Or we extend that to all variables at their bound. + const bool only_look_at_binary = absl::Bernoulli(random, 0.5); + + // We copy the model early to have access to reduced domains. + // TODO(user): that might not be the most efficient if we abort just below. + CpModelProto local_cp_model = helper_.UpdatedModelProtoCopy(); + + // Loop over active variables. + bool some_non_binary_at_bound = false; + for (const int var : active_variables) { + DCHECK_LT(var, initial_solution.solution().size()); + DCHECK_LT(var, local_cp_model.variables().size()); + const IntegerVariableProto& var_proto = local_cp_model.variables(var); + const int64_t base_value = initial_solution.solution(var); + const bool is_binary = var_proto.domain_size() == 2 && + var_proto.domain(0) == 0 && var_proto.domain(1) == 1; + if (only_look_at_binary && !is_binary) { + other_variables.push_back(var); + continue; + } + + DCHECK(!var_proto.domain().empty()); + const int64_t domain_min = var_proto.domain(0); + const int64_t domain_max = var_proto.domain(var_proto.domain().size() - 1); + if (base_value <= domain_min) { + if (!is_binary) some_non_binary_at_bound = true; + candidates_with_score.push_back({var, 0.0}); + dist_to_lower_bound.push_back({var, domain_min}); + } else if (base_value >= domain_max) { + if (!is_binary) some_non_binary_at_bound = true; + candidates_with_score.push_back({var, 0.0}); + dist_to_upper_bound.push_back({var, domain_max}); + } else { + other_variables.push_back(var); + } + } + + bool use_hamming_for_others = false; + if (!other_variables.empty() && absl::Bernoulli(random, 0.5)) { + use_hamming_for_others = true; + } + if (!use_hamming_for_others && candidates_with_score.empty()) { return helper_.NoNeighborhood(); } - const int target_size = - static_cast(std::ceil(difficulty * binary_var_indices.size())); + // With this option, we will create a bunch of Boolean variable + // and add the constraints : "bool==0 => var == value_in_base_solution". + if (use_hamming_for_others) { + for (const int var : other_variables) { + const int indicator = local_cp_model.variables().size(); + auto* var_proto = local_cp_model.add_variables(); + var_proto->add_domain(0); + var_proto->add_domain(1); + auto* new_ct = local_cp_model.add_constraints(); + new_ct->add_enforcement_literal(NegatedRef(indicator)); + + const int64_t base_value = initial_solution.solution(var); + new_ct->mutable_linear()->add_domain(base_value); + new_ct->mutable_linear()->add_domain(base_value); + new_ct->mutable_linear()->add_vars(var); + new_ct->mutable_linear()->add_coeffs(1); + + // Add it to the distance constraint. + dist_to_lower_bound.push_back({indicator, 0}); + candidates_with_score.push_back({var, 0.0}); + } + + // Clear other_variables so that they are not added at the end. + other_variables.clear(); + } + + // Constrain the distance to the bounds. + const int size = dist_to_upper_bound.size() + dist_to_lower_bound.size(); + const int target_size = static_cast(std::ceil(data.difficulty * size)); + DCHECK_LE(target_size, candidates_with_score.size()); + *local_cp_model.add_constraints() = DistanceToBoundsSmallerThanConstraint( + dist_to_lower_bound, dist_to_upper_bound, target_size); - // Create and solve local branching LP. - CpModelProto local_branching_model = helper_.UpdatedModelProtoCopy(); - *local_branching_model.add_constraints() = LocalBranchingConstraint( - binary_var_indices, binary_var_initial_solution, target_size); Model model("lb_relax_lns_lp"); auto* const params = model.GetOrCreate(); + // Parameters to enable solving the LP only. params->set_num_workers(1); params->set_linearization_level(2); params->set_stop_after_root_propagation(true); params->set_add_lp_constraints_lazily(false); + // Parameters to attempt to speed up solve. params->set_cp_model_presolve(false); params->set_cp_model_probing_level(0); + // Parameters to limit time spent in the solve. The max number of iterations // is relaxed from the default since we rely more on deterministic time. params->set_root_lp_iterations(100000); + + // TODO(user): This is a lot longer than a normal LNS, so it might cause + // issue with the current round-robbin selection based on number of calls. params->set_max_deterministic_time(10); + model.GetOrCreate()->ResetLimitFromParameters(*params); if (global_time_limit_ != nullptr) { global_time_limit_->UpdateLocalLimit(model.GetOrCreate()); } - solve_callback_(local_branching_model, &model); - // Skip LNS if no (full) feasible solution was found for the LP. - const auto lp_constraints = - model.GetOrCreate(); - for (const LinearProgrammingConstraint* lp_constraint : *lp_constraints) { - if (!lp_constraint->HasSolution()) { + // Tricky: we want the inner_objective_lower_bound in the response to be in + // term of the current problem, not the user facing one. + if (local_cp_model.has_objective()) { + local_cp_model.mutable_objective()->set_integer_before_offset(0); + local_cp_model.mutable_objective()->set_integer_after_offset(0); + local_cp_model.mutable_objective()->set_integer_scaling_factor(0); + } + + // Solve. + // + // TODO(user): Shall we pass the objective upper bound so we have more + // chance to fix variable via reduced cost fixing. + // + // TODO(user): Does the current solution can provide a warm-start for the + // LP? + auto* response_manager = model.GetOrCreate(); + response_manager->InitializeObjective(local_cp_model); + LoadCpModel(local_cp_model, &model); + SolveLoadedCpModel(local_cp_model, &model); + + // Update dtime. + data.deterministic_time += + model.GetOrCreate()->GetElapsedDeterministicTime(); + + // Analyze the status of this first "solve". + // + // TODO(user): If we run into this case, it also means that every other LNS + // that tries to more variable than here will never be able to improve. + if (local_cp_model.has_objective()) { + const CpSolverResponse response = response_manager->GetResponse(); + if (response.status() == CpSolverStatus::INFEASIBLE) { + data.status = CpSolverStatus::INFEASIBLE; + AddSolveData(data); + return helper_.NoNeighborhood(); + } + + const int64_t inner_lb = response.inner_objective_lower_bound(); + const int64_t current_inner_obj = ComputeInnerObjective( + local_cp_model.objective(), initial_solution.solution()); + if (inner_lb >= current_inner_obj) { + // In this case, we cannot improve on the base solution. + // We could try to find a different solution for diversity, but we do have + // other neighborhood for that. Lets abort early. + data.status = CpSolverStatus::OPTIMAL; // We cannot improve. + AddSolveData(data); return helper_.NoNeighborhood(); } } @@ -1817,28 +1933,52 @@ Neighborhood LocalBranchingLpBasedNeighborhoodGenerator::Generate( // random noise for tie breaking. const auto var_mapping = model.GetOrCreate(); const auto lp_solution = model.GetOrCreate(); - std::vector differences; - for (int i = 0; i < binary_var_indices.size(); ++i) { - double difference = - std::abs(lp_solution->at(var_mapping->Integer(binary_var_indices[i])) - - binary_var_initial_solution[i]); - differences.push_back(difference + - absl::Uniform(random, 0.0, 1e-6)); + if (lp_solution->empty()) { + // We likely didn't solve the LP at all, so lets not use this neighborhood. + return helper_.NoNeighborhood(); + } + for (auto& [var, score] : candidates_with_score) { + const IntegerVariable integer = var_mapping->Integer(var); + DCHECK_LT(integer, lp_solution->size()); + DCHECK_LT(var, initial_solution.solution().size()); + const double difference = + std::abs(lp_solution->at(var_mapping->Integer(var)) - + initial_solution.solution(var)); + score = difference + absl::Uniform(random, 0.0, 1e-6); } // Take the target_size variables with largest differences. - std::vector vars_to_relax(binary_var_indices.size()); - absl::c_iota(vars_to_relax, 0); - absl::c_sort(vars_to_relax, [&differences](const int i, const int j) { - return differences[i] > differences[j]; + absl::c_sort(candidates_with_score, [](const std::pair& a, + const std::pair& b) { + return a.second > b.second; }); - vars_to_relax.resize(target_size); - // For now, we include all non-binary variables in the relaxation, since their - // values are likely tied to the binary values. - vars_to_relax.insert(vars_to_relax.end(), non_binary_var_indices.begin(), - non_binary_var_indices.end()); - return helper_.RelaxGivenVariables(initial_solution, vars_to_relax); + std::vector vars_to_relax; + vars_to_relax.reserve(target_size); + DCHECK_LE(target_size, candidates_with_score.size()); + for (int i = 0; i < target_size; ++i) { + vars_to_relax.push_back(candidates_with_score[i].first); + } + + // We will also relax all "other variables". We assume their values are likely + // tied to the other ones. + vars_to_relax.insert(vars_to_relax.end(), other_variables.begin(), + other_variables.end()); + Neighborhood result = + helper_.RelaxGivenVariables(initial_solution, vars_to_relax); + + // Lets the name reflect the type. + // + // TODO(user): Unfortunately like this we have a common difficulty for all + // variant, we should probably fix that. + result.source_info = "lb_relax_lns"; + absl::StrAppend(&result.source_info, + some_non_binary_at_bound ? "_int" : "_bool"); + if (use_hamming_for_others) { + absl::StrAppend(&result.source_info, "_h"); + } + + return result; } namespace { @@ -2007,22 +2147,22 @@ Neighborhood GenerateSchedulingNeighborhoodFromRelaxedIntervals( } Neighborhood RandomIntervalSchedulingNeighborhoodGenerator::Generate( - const CpSolverResponse& initial_solution, double difficulty, + const CpSolverResponse& initial_solution, SolveData& data, absl::BitGenRef random) { std::vector intervals_to_relax = helper_.GetActiveIntervals(initial_solution); - GetRandomSubset(difficulty, &intervals_to_relax, random); + GetRandomSubset(data.difficulty, &intervals_to_relax, random); return GenerateSchedulingNeighborhoodFromRelaxedIntervals( intervals_to_relax, {}, initial_solution, random, helper_); } Neighborhood RandomPrecedenceSchedulingNeighborhoodGenerator::Generate( - const CpSolverResponse& initial_solution, double difficulty, + const CpSolverResponse& initial_solution, SolveData& data, absl::BitGenRef random) { std::vector> precedences = helper_.GetSchedulingPrecedences({}, initial_solution, random); - GetRandomSubset(1.0 - difficulty, &precedences, random); + GetRandomSubset(1.0 - data.difficulty, &precedences, random); return GenerateSchedulingNeighborhoodFromIntervalPrecedences( precedences, initial_solution, helper_); } @@ -2041,7 +2181,7 @@ void AppendVarsFromAllIntervalIndices(absl::Span indices, } // namespace Neighborhood SchedulingTimeWindowNeighborhoodGenerator::Generate( - const CpSolverResponse& initial_solution, double difficulty, + const CpSolverResponse& initial_solution, SolveData& data, absl::BitGenRef random) { const std::vector active_intervals = helper_.GetActiveIntervals(initial_solution); @@ -2049,7 +2189,7 @@ Neighborhood SchedulingTimeWindowNeighborhoodGenerator::Generate( if (active_intervals.empty()) return helper_.FullNeighborhood(); const TimePartition partition = PartitionIndicesAroundRandomTimeWindow( - active_intervals, helper_.ModelProto(), initial_solution, difficulty, + active_intervals, helper_.ModelProto(), initial_solution, data.difficulty, random); std::vector intervals_to_relax; intervals_to_relax.reserve(partition.selected_indices.size()); @@ -2074,7 +2214,7 @@ Neighborhood SchedulingTimeWindowNeighborhoodGenerator::Generate( } Neighborhood SchedulingResourceWindowsNeighborhoodGenerator::Generate( - const CpSolverResponse& initial_solution, double difficulty, + const CpSolverResponse& initial_solution, SolveData& data, absl::BitGenRef random) { std::vector intervals_to_relax; std::vector variables_to_fix; @@ -2082,8 +2222,8 @@ Neighborhood SchedulingResourceWindowsNeighborhoodGenerator::Generate( for (const std::vector& intervals : intervals_in_constraints_) { active_intervals = helper_.KeepActiveIntervals(intervals, initial_solution); const TimePartition partition = PartitionIndicesAroundRandomTimeWindow( - active_intervals, helper_.ModelProto(), initial_solution, difficulty, - random); + active_intervals, helper_.ModelProto(), initial_solution, + data.difficulty, random); intervals_to_relax.insert(intervals_to_relax.end(), partition.selected_indices.begin(), partition.selected_indices.end()); @@ -2109,11 +2249,11 @@ Neighborhood SchedulingResourceWindowsNeighborhoodGenerator::Generate( } Neighborhood RandomRectanglesPackingNeighborhoodGenerator::Generate( - const CpSolverResponse& initial_solution, double difficulty, + const CpSolverResponse& initial_solution, SolveData& data, absl::BitGenRef random) { std::vector> rectangles_to_freeze = helper_.GetActiveRectangles(initial_solution); - GetRandomSubset(1.0 - difficulty, &rectangles_to_freeze, random); + GetRandomSubset(1.0 - data.difficulty, &rectangles_to_freeze, random); absl::flat_hash_set variables_to_freeze; for (const auto& [x, y] : rectangles_to_freeze) { @@ -2125,11 +2265,11 @@ Neighborhood RandomRectanglesPackingNeighborhoodGenerator::Generate( } Neighborhood RandomPrecedencesPackingNeighborhoodGenerator::Generate( - const CpSolverResponse& initial_solution, double difficulty, + const CpSolverResponse& initial_solution, SolveData& data, absl::BitGenRef random) { std::vector> rectangles_to_relax = helper_.GetActiveRectangles(initial_solution); - GetRandomSubset(difficulty, &rectangles_to_relax, random); + GetRandomSubset(data.difficulty, &rectangles_to_relax, random); std::vector intervals_to_relax; for (const auto& [x, y] : rectangles_to_relax) { intervals_to_relax.push_back(x); @@ -2142,7 +2282,7 @@ Neighborhood RandomPrecedencesPackingNeighborhoodGenerator::Generate( } Neighborhood SlicePackingNeighborhoodGenerator::Generate( - const CpSolverResponse& initial_solution, double difficulty, + const CpSolverResponse& initial_solution, SolveData& data, absl::BitGenRef random) { const std::vector> active_rectangles = helper_.GetActiveRectangles(initial_solution); @@ -2154,8 +2294,8 @@ Neighborhood SlicePackingNeighborhoodGenerator::Generate( } const TimePartition partition = PartitionIndicesAroundRandomTimeWindow( - projected_intervals, helper_.ModelProto(), initial_solution, difficulty, - random); + projected_intervals, helper_.ModelProto(), initial_solution, + data.difficulty, random); std::vector indices_to_fix(active_rectangles.size(), true); for (const int index : partition.selected_indices) { indices_to_fix[index] = false; @@ -2177,7 +2317,7 @@ Neighborhood SlicePackingNeighborhoodGenerator::Generate( } Neighborhood RoutingRandomNeighborhoodGenerator::Generate( - const CpSolverResponse& initial_solution, double difficulty, + const CpSolverResponse& initial_solution, SolveData& data, absl::BitGenRef random) { const std::vector> all_paths = helper_.GetRoutingPaths(initial_solution); @@ -2190,13 +2330,13 @@ Neighborhood RoutingRandomNeighborhoodGenerator::Generate( std::vector fixed_variables(all_path_variables.begin(), all_path_variables.end()); std::sort(fixed_variables.begin(), fixed_variables.end()); - GetRandomSubset(1.0 - difficulty, &fixed_variables, random); + GetRandomSubset(1.0 - data.difficulty, &fixed_variables, random); return helper_.FixGivenVariables( initial_solution, {fixed_variables.begin(), fixed_variables.end()}); } Neighborhood RoutingPathNeighborhoodGenerator::Generate( - const CpSolverResponse& initial_solution, double difficulty, + const CpSolverResponse& initial_solution, SolveData& data, absl::BitGenRef random) { std::vector> all_paths = helper_.GetRoutingPaths(initial_solution); @@ -2209,7 +2349,7 @@ Neighborhood RoutingPathNeighborhoodGenerator::Generate( // Select variables to relax. const int num_variables_to_relax = - static_cast(all_path_variables.size() * difficulty); + static_cast(all_path_variables.size() * data.difficulty); absl::flat_hash_set relaxed_variables; while (relaxed_variables.size() < num_variables_to_relax) { DCHECK(!all_paths.empty()); @@ -2242,7 +2382,7 @@ Neighborhood RoutingPathNeighborhoodGenerator::Generate( } Neighborhood RoutingFullPathNeighborhoodGenerator::Generate( - const CpSolverResponse& initial_solution, double difficulty, + const CpSolverResponse& initial_solution, SolveData& data, absl::BitGenRef random) { std::vector> all_paths = helper_.GetRoutingPaths(initial_solution); @@ -2259,7 +2399,7 @@ Neighborhood RoutingFullPathNeighborhoodGenerator::Generate( // Select variables to relax. const int num_variables_to_relax = - static_cast(all_path_variables.size() * difficulty); + static_cast(all_path_variables.size() * data.difficulty); absl::flat_hash_set relaxed_variables; // Relax the start and end of each path to ease relocation. @@ -2313,14 +2453,14 @@ bool RelaxationInducedNeighborhoodGenerator::ReadyToGenerate() const { } Neighborhood RelaxationInducedNeighborhoodGenerator::Generate( - const CpSolverResponse& /*initial_solution*/, double difficulty, + const CpSolverResponse& /*initial_solution*/, SolveData& data, absl::BitGenRef random) { Neighborhood neighborhood = helper_.FullNeighborhood(); neighborhood.is_generated = false; const ReducedDomainNeighborhood reduced_domains = GetRinsRensNeighborhood(response_manager_, lp_solutions_, - incomplete_solutions_, difficulty, random); + incomplete_solutions_, data.difficulty, random); if (reduced_domains.fixed_vars.empty() && reduced_domains.reduced_domain_vars.empty()) { diff --git a/ortools/sat/cp_model_lns.h b/ortools/sat/cp_model_lns.h index 31bd7ea0e3..dd92ff04d3 100644 --- a/ortools/sat/cp_model_lns.h +++ b/ortools/sat/cp_model_lns.h @@ -356,36 +356,6 @@ class NeighborhoodGenerator { : name_(name), helper_(*helper), difficulty_(0.5) {} virtual ~NeighborhoodGenerator() = default; - // Generates a "local" subproblem for the given seed. - // - // The difficulty will be in [0, 1] and is related to the asked neighborhood - // size (and thus local problem difficulty). A difficulty of 0.0 means empty - // neighborhood and a difficulty of 1.0 means the full problem. The algorithm - // should try to generate a neighborhood according to this difficulty which - // will be dynamically adjusted depending on whether or not we can solve the - // subproblem in a given time limit. - // - // The given initial_solution should contain a feasible solution to the - // initial CpModelProto given to this class. Any solution to the returned - // CPModelProto should also be valid solution to the same initial model. - // - // This function should be thread-safe. - virtual Neighborhood Generate(const CpSolverResponse& initial_solution, - double difficulty, absl::BitGenRef random) = 0; - - // Returns true if the neighborhood generator can generate a neighborhood. - virtual bool ReadyToGenerate() const; - - // Uses UCB1 algorithm to compute the score (Multi armed bandit problem). - // Details are at - // https://lilianweng.github.io/lil-log/2018/01/23/the-multi-armed-bandit-problem-and-its-solutions.html. - // 'total_num_calls' should be the sum of calls across all generators part of - // the multi armed bandit problem. - // If the generator is called less than 10 times then the method returns - // infinity as score in order to get more data about the generator - // performance. - double GetUCBScore(int64_t total_num_calls) const; - // Adds solve data about one "solved" neighborhood. struct SolveData { // The status of the sub-solve. @@ -423,6 +393,37 @@ class NeighborhoodGenerator { o.base_objective, o.new_objective); } }; + + // Generates a "local" subproblem for the given seed. + // + // The data,difficulty will be in [0, 1] and is related to the asked + // neighborhood size (and thus local problem difficulty). A difficulty of 0.0 + // means empty neighborhood and a difficulty of 1.0 means the full problem. + // The algorithm should try to generate a neighborhood according to this + // difficulty which will be dynamically adjusted depending on whether or not + // we can solve the subproblem in a given time limit. + // + // The given initial_solution should contain a feasible solution to the + // initial CpModelProto given to this class. Any solution to the returned + // CPModelProto should also be valid solution to the same initial model. + // + // This function should be thread-safe. + virtual Neighborhood Generate(const CpSolverResponse& initial_solution, + SolveData& data, absl::BitGenRef random) = 0; + + // Returns true if the neighborhood generator can generate a neighborhood. + virtual bool ReadyToGenerate() const; + + // Uses UCB1 algorithm to compute the score (Multi armed bandit problem). + // Details are at + // https://lilianweng.github.io/lil-log/2018/01/23/the-multi-armed-bandit-problem-and-its-solutions.html. + // 'total_num_calls' should be the sum of calls across all generators part of + // the multi armed bandit problem. + // If the generator is called less than 10 times then the method returns + // infinity as score in order to get more data about the generator + // performance. + double GetUCBScore(int64_t total_num_calls) const; + void AddSolveData(SolveData data) { absl::MutexLock mutex_lock(&generator_mutex_); solve_data_.push_back(data); @@ -478,6 +479,7 @@ class NeighborhoodGenerator { const std::string name_; const NeighborhoodGeneratorHelper& helper_; mutable absl::Mutex generator_mutex_; + double deterministic_limit_ = 0.1; private: std::vector solve_data_; @@ -485,7 +487,6 @@ class NeighborhoodGenerator { // Current parameters to be used when generating/solving a neighborhood with // this generator. Only updated on Synchronize(). AdaptiveParameterValue difficulty_; - double deterministic_limit_ = 0.1; // Current statistics of the last solved neighborhood. // Only updated on Synchronize(). @@ -507,7 +508,7 @@ class RelaxRandomVariablesGenerator : public NeighborhoodGenerator { NeighborhoodGeneratorHelper const* helper, absl::string_view name) : NeighborhoodGenerator(name, helper) {} Neighborhood Generate(const CpSolverResponse& initial_solution, - double difficulty, absl::BitGenRef random) final; + SolveData& data, absl::BitGenRef random) final; }; // Pick a random subset of constraints and relax all the variables of these @@ -522,7 +523,7 @@ class RelaxRandomConstraintsGenerator : public NeighborhoodGenerator { NeighborhoodGeneratorHelper const* helper, absl::string_view name) : NeighborhoodGenerator(name, helper) {} Neighborhood Generate(const CpSolverResponse& initial_solution, - double difficulty, absl::BitGenRef random) final; + SolveData& data, absl::BitGenRef random) final; }; // Pick a random subset of variables that are constructed by a BFS in the @@ -538,7 +539,7 @@ class VariableGraphNeighborhoodGenerator : public NeighborhoodGenerator { NeighborhoodGeneratorHelper const* helper, absl::string_view name) : NeighborhoodGenerator(name, helper) {} Neighborhood Generate(const CpSolverResponse& initial_solution, - double difficulty, absl::BitGenRef random) final; + SolveData& data, absl::BitGenRef random) final; }; // This randomly extend a working set of variable by one variable directly @@ -549,7 +550,7 @@ class ArcGraphNeighborhoodGenerator : public NeighborhoodGenerator { NeighborhoodGeneratorHelper const* helper, absl::string_view name) : NeighborhoodGenerator(name, helper) {} Neighborhood Generate(const CpSolverResponse& initial_solution, - double difficulty, absl::BitGenRef random) final; + SolveData& data, absl::BitGenRef random) final; }; // Pick a random subset of constraint and relax all of their variables. We are a @@ -562,7 +563,7 @@ class ConstraintGraphNeighborhoodGenerator : public NeighborhoodGenerator { NeighborhoodGeneratorHelper const* helper, absl::string_view name) : NeighborhoodGenerator(name, helper) {} Neighborhood Generate(const CpSolverResponse& initial_solution, - double difficulty, absl::BitGenRef random) final; + SolveData& data, absl::BitGenRef random) final; }; // The idea here is to try to generate a random neighborhood incrementally in @@ -582,7 +583,7 @@ class DecompositionGraphNeighborhoodGenerator : public NeighborhoodGenerator { NeighborhoodGeneratorHelper const* helper, absl::string_view name) : NeighborhoodGenerator(name, helper) {} Neighborhood Generate(const CpSolverResponse& initial_solution, - double difficulty, absl::BitGenRef random) final; + SolveData& data, absl::BitGenRef random) final; }; // Solves a local branching LP and greedily picks a set of variables with the @@ -594,20 +595,20 @@ class DecompositionGraphNeighborhoodGenerator : public NeighborhoodGenerator { class LocalBranchingLpBasedNeighborhoodGenerator : public NeighborhoodGenerator { public: - // TODO(user): Restructure code so that we avoid circular dependency with - // solving functions. For now, we use solve_callback. - explicit LocalBranchingLpBasedNeighborhoodGenerator( + LocalBranchingLpBasedNeighborhoodGenerator( NeighborhoodGeneratorHelper const* helper, absl::string_view name, - std::function solve_callback, ModelSharedTimeLimit* const global_time_limit) : NeighborhoodGenerator(name, helper), - solve_callback_(std::move(solve_callback)), - global_time_limit_(global_time_limit) {} + global_time_limit_(global_time_limit) { + // Given that we spend time generating a good neighborhood it sounds + // reasonable to spend a bit more time solving it too. + deterministic_limit_ = 0.5; + } + Neighborhood Generate(const CpSolverResponse& initial_solution, - double difficulty, absl::BitGenRef random) final; + SolveData& data, absl::BitGenRef random) final; private: - const std::function solve_callback_; ModelSharedTimeLimit* const global_time_limit_; }; @@ -640,7 +641,7 @@ class RandomIntervalSchedulingNeighborhoodGenerator : NeighborhoodGenerator(name, helper) {} Neighborhood Generate(const CpSolverResponse& initial_solution, - double difficulty, absl::BitGenRef random) final; + SolveData& data, absl::BitGenRef random) final; }; // Only make sense for scheduling problem. This select a random set of @@ -656,7 +657,7 @@ class RandomPrecedenceSchedulingNeighborhoodGenerator : NeighborhoodGenerator(name, helper) {} Neighborhood Generate(const CpSolverResponse& initial_solution, - double difficulty, absl::BitGenRef random) final; + SolveData& data, absl::BitGenRef random) final; }; // Similar to SchedulingNeighborhoodGenerator except the set of intervals that @@ -668,7 +669,7 @@ class SchedulingTimeWindowNeighborhoodGenerator : public NeighborhoodGenerator { : NeighborhoodGenerator(name, helper) {} Neighborhood Generate(const CpSolverResponse& initial_solution, - double difficulty, absl::BitGenRef random) final; + SolveData& data, absl::BitGenRef random) final; }; // Similar to SchedulingTimeWindowNeighborhoodGenerator except that it relaxes @@ -685,7 +686,7 @@ class SchedulingResourceWindowsNeighborhoodGenerator intervals_in_constraints_(intervals_in_constraints) {} Neighborhood Generate(const CpSolverResponse& initial_solution, - double difficulty, absl::BitGenRef random) final; + SolveData& data, absl::BitGenRef random) final; private: const std::vector> intervals_in_constraints_; @@ -702,7 +703,7 @@ class RandomRectanglesPackingNeighborhoodGenerator : NeighborhoodGenerator(name, helper) {} Neighborhood Generate(const CpSolverResponse& initial_solution, - double difficulty, absl::BitGenRef random) final; + SolveData& data, absl::BitGenRef random) final; }; // Only make sense for problems with no_overlap_2d constraints. This select a @@ -717,7 +718,7 @@ class RandomPrecedencesPackingNeighborhoodGenerator : NeighborhoodGenerator(name, helper) {} Neighborhood Generate(const CpSolverResponse& initial_solution, - double difficulty, absl::BitGenRef random) final; + SolveData& data, absl::BitGenRef random) final; }; // Only make sense for problems with no_overlap_2d constraints. This select a @@ -730,7 +731,7 @@ class SlicePackingNeighborhoodGenerator : public NeighborhoodGenerator { : NeighborhoodGenerator(name, helper) {} Neighborhood Generate(const CpSolverResponse& initial_solution, - double difficulty, absl::BitGenRef random) final; + SolveData& data, absl::BitGenRef random) final; }; // This routing based LNS generator will relax random arcs in all the paths of @@ -742,7 +743,7 @@ class RoutingRandomNeighborhoodGenerator : public NeighborhoodGenerator { : NeighborhoodGenerator(name, helper) {} Neighborhood Generate(const CpSolverResponse& initial_solution, - double difficulty, absl::BitGenRef random) final; + SolveData& data, absl::BitGenRef random) final; }; // This routing based LNS generator will relax small sequences of arcs randomly @@ -754,7 +755,7 @@ class RoutingPathNeighborhoodGenerator : public NeighborhoodGenerator { : NeighborhoodGenerator(name, helper) {} Neighborhood Generate(const CpSolverResponse& initial_solution, - double difficulty, absl::BitGenRef random) final; + SolveData& data, absl::BitGenRef random) final; }; // This routing based LNS generator aims are relaxing one full path, and make @@ -771,7 +772,7 @@ class RoutingFullPathNeighborhoodGenerator : public NeighborhoodGenerator { : NeighborhoodGenerator(name, helper) {} Neighborhood Generate(const CpSolverResponse& initial_solution, - double difficulty, absl::BitGenRef random) final; + SolveData& data, absl::BitGenRef random) final; }; // Generates a neighborhood by fixing the variables to solutions reported in @@ -806,7 +807,7 @@ class RelaxationInducedNeighborhoodGenerator : public NeighborhoodGenerator { // Both initial solution and difficulty values are ignored. Neighborhood Generate(const CpSolverResponse& initial_solution, - double difficulty, absl::BitGenRef random) final; + SolveData& data, absl::BitGenRef random) final; // Returns true if the required solutions are available. bool ReadyToGenerate() const override; diff --git a/ortools/sat/cp_model_loader.cc b/ortools/sat/cp_model_loader.cc index f4d3a3728b..5cd81bfc2c 100644 --- a/ortools/sat/cp_model_loader.cc +++ b/ortools/sat/cp_model_loader.cc @@ -16,7 +16,6 @@ #include #include #include -#include #include #include #include @@ -1212,6 +1211,7 @@ void SplitAndLoadIntermediateConstraints(bool lb_required, bool ub_required, void LoadLinearConstraint(const ConstraintProto& ct, Model* m) { auto* mapping = m->GetOrCreate(); + if (ct.linear().vars().empty()) { const Domain rhs = ReadDomainFromProto(ct.linear()); if (rhs.Contains(0)) return; @@ -1429,22 +1429,27 @@ void LoadLinearConstraint(const ConstraintProto& ct, Model* m) { // We have a linear with a complex Domain, we need to create extra Booleans. - // In this case, we can create just one Boolean instead of two since one - // is the negation of the other. - const bool special_case = - ct.enforcement_literal().empty() && ct.linear().domain_size() == 4; - // For enforcement => var \in domain, we can potentially reuse the encoding // literal directly rather than creating new ones. - const bool is_linear1 = !special_case && vars.size() == 1 && coeffs[0] == 1; + const bool is_linear1 = vars.size() == 1 && coeffs[0] == 1; + bool special_case = false; std::vector clause; std::vector for_enumeration; auto* encoding = m->GetOrCreate(); - for (int i = 0; i < ct.linear().domain_size(); i += 2) { + const int domain_size = ct.linear().domain_size(); + for (int i = 0; i < domain_size; i += 2) { const int64_t lb = ct.linear().domain(i); const int64_t ub = ct.linear().domain(i + 1); + // Skip non-reachable intervals. + if (min_sum > ub) continue; + if (max_sum < lb) continue; + + // Skip trivial constraint. Note that when this happens, all the intervals + // before where non-reachable. + if (min_sum >= lb && max_sum <= ub) return; + if (is_linear1) { if (lb == ub) { clause.push_back( @@ -1461,9 +1466,17 @@ void LoadLinearConstraint(const ConstraintProto& ct, Model* m) { } } + // If there is just two terms and no enforcement, we don't need to create an + // extra boolean as the second case can be controlled by the negation of the + // first. + if (ct.enforcement_literal().empty() && clause.size() == 1 && + i + 1 == domain_size) { + special_case = true; + } + const Literal subdomain_literal( - special_case && i > 0 ? clause.back().Negated() - : Literal(m->Add(NewBooleanVariable()), true)); + special_case ? clause.back().Negated() + : Literal(m->Add(NewBooleanVariable()), true)); clause.push_back(subdomain_literal); for_enumeration.push_back(subdomain_literal); diff --git a/ortools/sat/cp_model_presolve.cc b/ortools/sat/cp_model_presolve.cc index fe6309fd48..f7e2c33576 100644 --- a/ortools/sat/cp_model_presolve.cc +++ b/ortools/sat/cp_model_presolve.cc @@ -2192,7 +2192,7 @@ bool CpModelPresolver::RemoveSingletonInLinear(ConstraintProto* ct) { if (ct->enforcement_literal().size() == 1) { indicator = ct->enforcement_literal(0); } else { - indicator = context_->NewBoolVar(); + indicator = context_->NewBoolVar("indicator"); auto* new_ct = context_->working_model->add_constraints(); *new_ct->mutable_enforcement_literal() = ct->enforcement_literal(); new_ct->mutable_bool_or()->add_literals(indicator); @@ -8758,7 +8758,7 @@ bool CpModelPresolver::ProcessEncodingFromLinear( // All false means associated_lit is false too. // But not for the rhs case if we are not in exactly one. if (in_exactly_one || value != rhs) { - // TODO(user): Insted of bool_or + implications, we could add an + // TODO(user): Instead of bool_or + implications, we could add an // exactly one! Experiment with this. In particular it might capture // more structure for later heuristic to add the exactly one instead. // This also applies to automata/table/element expansion. @@ -12625,6 +12625,34 @@ void CpModelPresolver::InitializeMappingModelVariables() { context_->working_model->variables()); } +void CpModelPresolver::ExpandCpModelAndCanonicalizeConstraints() { + const int num_constraints_before_expansion = + context_->working_model->constraints_size(); + ExpandCpModel(context_); + if (context_->ModelIsUnsat()) return; + + // TODO(user): Make sure we can't have duplicate in these constraint. + // These are due to ExpandCpModel() were we create such constraint with + // duplicate. The problem is that some code assumes these are presolved + // before being called. + const int num_constraints = context_->working_model->constraints().size(); + for (int c = num_constraints_before_expansion; c < num_constraints; ++c) { + ConstraintProto* ct = context_->working_model->mutable_constraints(c); + const auto type = ct->constraint_case(); + if (type == ConstraintProto::kAtMostOne || + type == ConstraintProto::kExactlyOne) { + if (PresolveOneConstraint(c)) { + context_->UpdateConstraintVariableUsage(c); + } + if (context_->ModelIsUnsat()) return; + } else if (type == ConstraintProto::kLinear) { + if (CanonicalizeLinear(ct)) { + context_->UpdateConstraintVariableUsage(c); + } + } + } +} + // The presolve works as follow: // // First stage: @@ -12692,7 +12720,7 @@ CpSolverStatus CpModelPresolver::Presolve() { // If presolve is false, just run expansion. if (!context_->params().cp_model_presolve()) { - ExpandCpModel(context_); + ExpandCpModelAndCanonicalizeConstraints(); if (context_->ModelIsUnsat()) return InfeasibleStatus(); // We still write back the canonical objective has we don't deal well @@ -12746,26 +12774,8 @@ CpSolverStatus CpModelPresolver::Presolve() { // Call expansion. if (!context_->ModelIsExpanded()) { ExtractEncodingFromLinear(); - ExpandCpModel(context_); + ExpandCpModelAndCanonicalizeConstraints(); if (context_->ModelIsUnsat()) return InfeasibleStatus(); - - // TODO(user): Make sure we can't have duplicate in these constraint. - // These are due to ExpandCpModel() were we create such constraint with - // duplicate. The problem is that some code assumes these are presolved - // before being called. - const int num_constraints = context_->working_model->constraints().size(); - for (int c = 0; c < num_constraints; ++c) { - ConstraintProto* ct = context_->working_model->mutable_constraints(c); - const auto type = ct->constraint_case(); - if (type == ConstraintProto::kAtMostOne || - type == ConstraintProto::kExactlyOne) { - if (PresolveOneConstraint(c)) { - context_->UpdateConstraintVariableUsage(c); - } - if (context_->ModelIsUnsat()) return InfeasibleStatus(); - } - } - // We need to re-evaluate the degree because some presolve rule only // run after expansion. const int num_vars = context_->working_model->variables().size(); @@ -12805,7 +12815,7 @@ CpSolverStatus CpModelPresolver::Presolve() { } } - // Extract redundant at most one constraint form the linear ones. + // Extract redundant at most one constraint from the linear ones. // // TODO(user): more generally if we do some probing, the same relation will // be detected (and more). Also add an option to turn this off? @@ -12900,6 +12910,26 @@ CpSolverStatus CpModelPresolver::Presolve() { context_->WriteObjectiveToProto(); } + // Now that everything that could possibly be fixed was fixed, make sure we + // don't leave any linear constraint with fixed variables. + for (int c = 0; c < context_->working_model->constraints_size(); ++c) { + ConstraintProto& ct = *context_->working_model->mutable_constraints(c); + bool need_canonicalize = false; + if (ct.constraint_case() == ConstraintProto::kLinear) { + for (const int v : ct.linear().vars()) { + if (context_->IsFixed(v)) { + need_canonicalize = true; + break; + } + } + } + if (need_canonicalize) { + if (CanonicalizeLinear(&ct)) { + context_->UpdateConstraintVariableUsage(c); + } + } + } + // Take care of linear constraint with a complex rhs. FinalExpansionForLinearConstraint(context_); diff --git a/ortools/sat/cp_model_presolve.h b/ortools/sat/cp_model_presolve.h index 000b864153..7c9dff19cc 100644 --- a/ortools/sat/cp_model_presolve.h +++ b/ortools/sat/cp_model_presolve.h @@ -106,6 +106,9 @@ class CpModelPresolver { // Runs the probing. void Probe(); + // Runs the expansion and fix constraints that became non-canonical. + void ExpandCpModelAndCanonicalizeConstraints(); + // Presolve functions. // // They should return false only if the constraint <-> variable graph didn't diff --git a/ortools/sat/cp_model_solver.cc b/ortools/sat/cp_model_solver.cc index 6a8c52ad46..f47ec0aa4d 100644 --- a/ortools/sat/cp_model_solver.cc +++ b/ortools/sat/cp_model_solver.cc @@ -118,6 +118,10 @@ ABSL_FLAG(bool, cp_model_ignore_hints, false, "If true, ignore any supplied hints."); ABSL_FLAG(bool, cp_model_fingerprint_model, true, "Fingerprint the model."); +ABSL_FLAG(bool, cp_model_check_intermediate_solutions, false, + "When true, all intermediate solutions found by the solver will be " + "checked. This can be expensive, therefore it is off by default."); + namespace operations_research { namespace sat { @@ -1074,7 +1078,13 @@ class LnsSolver : public SubSolver { ~LnsSolver() override { shared_->stat_tables.AddTimingStat(*this); - shared_->stat_tables.AddLnsStat(name(), *generator_); + shared_->stat_tables.AddLnsStat( + name(), + /*num_fully_solved_calls=*/generator_->num_fully_solved_calls(), + /*num_calls=*/generator_->num_calls(), + /*num_improving_calls=*/generator_->num_improving_calls(), + /*difficulty=*/generator_->difficulty(), + /*deterministic_limit=*/generator_->deterministic_limit()); } bool TaskIsAvailable() override { @@ -1130,7 +1140,7 @@ class LnsSolver : public SubSolver { } Neighborhood neighborhood = - generator_->Generate(base_response, data.difficulty, random); + generator_->Generate(base_response, data, random); if (!neighborhood.is_generated) return; @@ -1309,7 +1319,8 @@ class LnsSolver : public SubSolver { solution_values.end()); } - data.deterministic_time = local_time_limit->GetElapsedDeterministicTime(); + data.deterministic_time += + local_time_limit->GetElapsedDeterministicTime(); bool new_solution = false; bool display_lns_info = VLOG_IS_ON(2); @@ -1604,17 +1615,12 @@ void SolveCpModelParallel(SharedClasses* shared, Model* global_model) { helper, name_filter.LastName()), lns_params, helper, shared)); } - if (params.use_lb_relax_lns() && name_filter.Keep("lb_relax_lns")) { + if (params.use_lb_relax_lns() && + params.num_workers() >= params.lb_relax_num_workers_threshold() && + name_filter.Keep("lb_relax_lns")) { reentrant_interleaved_subsolvers.push_back(std::make_unique( std::make_unique( - helper, name_filter.LastName(), - [](const CpModelProto cp_model, Model* model) { - model->GetOrCreate() - ->InitializeObjective(cp_model); - LoadCpModel(cp_model, model); - SolveLoadedCpModel(cp_model, model); - }, - shared->time_limit), + helper, name_filter.LastName(), shared->time_limit), lns_params, helper, shared)); } @@ -1727,14 +1733,14 @@ void SolveCpModelParallel(SharedClasses* shared, Model* global_model) { // Compared to LNS, these are not re-entrant, so we need to schedule the // correct number for parallelism. if (shared->model_proto.has_objective()) { - // If not forced by the parameters, we want one LS every two threads that + // If not forced by the parameters, we want one LS every 3 threads that // work on interleaved stuff. Note that by default they are many LNS, so // that shouldn't be too many. const int num_thread_for_interleaved_workers = params.num_workers() - full_worker_subsolvers.size(); int num_violation_ls = params.has_num_violation_ls() ? params.num_violation_ls() - : (num_thread_for_interleaved_workers + 1) / 2; + : (num_thread_for_interleaved_workers + 2) / 3; // If there is no rentrant solver, maybe increase the number to reach max // parallelism. @@ -1749,7 +1755,7 @@ void SolveCpModelParallel(SharedClasses* shared, Model* global_model) { const absl::string_view lin_ls_name = "ls_lin"; const int num_ls_lin = - name_filter.Keep(lin_ls_name) ? num_violation_ls / 3 : 0; + name_filter.Keep(lin_ls_name) ? (num_violation_ls + 1) / 3 : 0; const int num_ls_default = name_filter.Keep(ls_name) ? num_violation_ls - num_ls_lin : 0; @@ -2405,24 +2411,41 @@ CpSolverResponse SolveCpModel(const CpModelProto& model_proto, Model* model) { // We either check all solutions, or only the last one. // Checking all solution might be expensive if we creates many. auto check_solution = [&model_proto, ¶ms, mapping_proto, - &postsolve_mapping](CpSolverResponse* response) { - if (response->solution().empty()) return; + &postsolve_mapping](const CpSolverResponse& response) { + if (response.solution().empty()) return; + + bool solution_is_feasible = true; if (params.cp_model_presolve()) { // We pass presolve data for more informative message in case the solution // is not feasible. - CHECK(SolutionIsFeasible(model_proto, response->solution(), mapping_proto, - &postsolve_mapping)); + solution_is_feasible = SolutionIsFeasible( + model_proto, response.solution(), mapping_proto, &postsolve_mapping); } else { - CHECK(SolutionIsFeasible(model_proto, response->solution())); + solution_is_feasible = + SolutionIsFeasible(model_proto, response.solution()); + } + + // We dump the response when infeasible, this might help debugging. + if (!solution_is_feasible) { + const std::string file = absl::StrCat( + absl::GetFlag(FLAGS_cp_model_dump_prefix), "wrong_response.pb.txt"); + LOG(INFO) << "Dumping infeasible response proto to '" << file << "'."; + CHECK(WriteModelProtoToFile(response, file)); + + // Crash. + LOG(FATAL) << "Infeasible solution!" + << " source': " << response.solution_info() << "'" + << " dumped CpSolverResponse to '" << file << "'."; } }; if (DEBUG_MODE || absl::GetFlag(FLAGS_cp_model_check_intermediate_solutions)) { - shared_response_manager->AddResponsePostprocessor( - std::move(check_solution)); + shared_response_manager->AddSolutionCallback(std::move(check_solution)); } else { shared_response_manager->AddFinalResponsePostprocessor( - std::move(check_solution)); + [checker = std::move(check_solution)](CpSolverResponse* response) { + checker(*response); + }); } // Solution postsolving. @@ -2553,7 +2576,8 @@ CpSolverResponse SolveCpModel(const CpModelProto& model_proto, Model* model) { // We ignore the multithreading parameter in this case. #else // __PORTABLE_PLATFORM__ if (params.num_workers() > 1 || params.interleave_search() || - !params.subsolvers().empty() || params.use_ls_only()) { + !params.subsolvers().empty() || !params.filter_subsolvers().empty() || + params.use_ls_only()) { SolveCpModelParallel(&shared, model); #endif // __PORTABLE_PLATFORM__ } else { diff --git a/ortools/sat/cp_model_solver_helpers.cc b/ortools/sat/cp_model_solver_helpers.cc index c8f60ffddd..3e51e066ec 100644 --- a/ortools/sat/cp_model_solver_helpers.cc +++ b/ortools/sat/cp_model_solver_helpers.cc @@ -108,10 +108,6 @@ ABSL_FLAG( "we will interpret this as an internal solution which can be used for " "debugging. For instance we use it to identify wrong cuts/reasons."); -ABSL_FLAG(bool, cp_model_check_intermediate_solutions, false, - "When true, all intermediate solutions found by the solver will be " - "checked. This can be expensive, therefore it is off by default."); - namespace operations_research { namespace sat { @@ -307,12 +303,6 @@ std::vector GetSolutionValues(const CpModelProto& model_proto, } } } - - if (DEBUG_MODE || - absl::GetFlag(FLAGS_cp_model_check_intermediate_solutions)) { - // TODO(user): Checks against initial model. - CHECK(SolutionIsFeasible(model_proto, solution)); - } return solution; } @@ -660,7 +650,6 @@ void RegisterVariableBoundsLevelZeroImport( std::vector new_upper_bounds; shared_bounds_manager->GetChangedBounds( id, &model_variables, &new_lower_bounds, &new_upper_bounds); - bool new_bounds_have_been_imported = false; for (int i = 0; i < model_variables.size(); ++i) { const int model_var = model_variables[i]; @@ -675,7 +664,6 @@ void RegisterVariableBoundsLevelZeroImport( sat_solver->NotifyThatModelIsUnsat(); return false; } - new_bounds_have_been_imported = true; trail->EnqueueWithUnitReason(lit); continue; } @@ -691,7 +679,6 @@ void RegisterVariableBoundsLevelZeroImport( const bool changed_ub = new_ub < old_ub; if (!changed_lb && !changed_ub) continue; - new_bounds_have_been_imported = true; if (VLOG_IS_ON(3)) { const IntegerVariableProto& var_proto = model_proto.variables(model_var); @@ -715,9 +702,9 @@ void RegisterVariableBoundsLevelZeroImport( return false; } } - if (new_bounds_have_been_imported && !sat_solver->FinishPropagation()) { - return false; - } + + // Note that we will propagate if they are new bounds separately. + // See BeforeTakingDecision(). return true; }; model->GetOrCreate()->callbacks.push_back( @@ -764,7 +751,7 @@ void RegisterObjectiveBoundsImport( const auto import_objective_bounds = [name, solver, integer_trail, objective, shared_response_manager]() { if (solver->AssumptionLevel() != 0) return true; - bool propagate = false; + bool tighter_bounds = false; const IntegerValue external_lb = shared_response_manager->GetInnerObjectiveLowerBound(); @@ -776,7 +763,7 @@ void RegisterObjectiveBoundsImport( {}, {})) { return false; } - propagate = true; + tighter_bounds = true; } const IntegerValue external_ub = @@ -789,18 +776,20 @@ void RegisterObjectiveBoundsImport( {}, {})) { return false; } - propagate = true; + tighter_bounds = true; } - if (!propagate) return true; + // Note that we will propagate if they are new bounds separately. + // See BeforeTakingDecision(). + if (tighter_bounds) { + VLOG(3) << "'" << name << "' imports objective bounds: external [" + << objective->ScaleIntegerObjective(external_lb) << ", " + << objective->ScaleIntegerObjective(external_ub) << "], current [" + << objective->ScaleIntegerObjective(current_lb) << ", " + << objective->ScaleIntegerObjective(current_ub) << "]"; + } - VLOG(3) << "'" << name << "' imports objective bounds: external [" - << objective->ScaleIntegerObjective(external_lb) << ", " - << objective->ScaleIntegerObjective(external_ub) << "], current [" - << objective->ScaleIntegerObjective(current_lb) << ", " - << objective->ScaleIntegerObjective(current_ub) << "]"; - - return solver->FinishPropagation(); + return true; }; model->GetOrCreate()->callbacks.push_back( @@ -1007,15 +996,21 @@ void LoadBaseModel(const CpModelProto& model_proto, Model* model) { VLOG(3) << num_ignored_constraints << " constraints were skipped."; } if (!unsupported_types.empty()) { - VLOG(1) << "There is unsupported constraints types in this model: "; + auto* logger = model->GetOrCreate(); + SOLVER_LOG(logger, + "There is unsupported constraints types in this model: "); std::vector names; for (const ConstraintProto::ConstraintCase type : unsupported_types) { names.push_back(ConstraintCaseName(type)); } std::sort(names.begin(), names.end()); for (const absl::string_view name : names) { - VLOG(1) << " - " << name; + SOLVER_LOG(logger, " - ", name); } + + // TODO(user): This is wrong. We should support a MODEL_INVALID end of solve + // in the SharedResponseManager. + SOLVER_LOG(logger, "BUG: We will wrongly report INFEASIBLE now."); return unsat(); } diff --git a/ortools/sat/cp_model_solver_helpers.h b/ortools/sat/cp_model_solver_helpers.h index 14e34ab310..a2220f3b95 100644 --- a/ortools/sat/cp_model_solver_helpers.h +++ b/ortools/sat/cp_model_solver_helpers.h @@ -32,7 +32,6 @@ #include "ortools/util/logging.h" ABSL_DECLARE_FLAG(bool, cp_model_dump_models); -ABSL_DECLARE_FLAG(bool, cp_model_check_intermediate_solutions); ABSL_DECLARE_FLAG(std::string, cp_model_dump_prefix); ABSL_DECLARE_FLAG(bool, cp_model_dump_submodels); diff --git a/ortools/sat/cuts.cc b/ortools/sat/cuts.cc index da89753b9e..289d4da62c 100644 --- a/ortools/sat/cuts.cc +++ b/ortools/sat/cuts.cc @@ -235,8 +235,7 @@ bool CutData::AllCoefficientsArePositive() const { void CutData::Canonicalize() { num_relevant_entries = 0; max_magnitude = 0; - for (int i = 0; i < terms.size(); ++i) { - CutTerm& entry = terms[i]; + for (CutTerm& entry : terms) { max_magnitude = std::max(max_magnitude, IntTypeAbs(entry.coeff)); if (entry.HasRelevantLpValue()) { std::swap(terms[num_relevant_entries], entry); diff --git a/ortools/sat/diffn_util.cc b/ortools/sat/diffn_util.cc index 0cb6fbb67c..ba8875ff6a 100644 --- a/ortools/sat/diffn_util.cc +++ b/ortools/sat/diffn_util.cc @@ -25,6 +25,7 @@ #include #include #include +#include #include #include #include @@ -411,11 +412,6 @@ absl::Span FilterBoxesThatAreTooLarge( return boxes.subspan(0, new_size); } -std::ostream& operator<<(std::ostream& out, const IndexedInterval& interval) { - return out << "[" << interval.start << ".." << interval.end << " (#" - << interval.index << ")]"; -} - void ConstructOverlappingSets(bool already_sorted, std::vector* intervals, std::vector>* result) { @@ -1539,7 +1535,8 @@ FindRectanglesResult FindRectanglesWithEnergyConflictMC( } std::string RenderDot(std::optional bb, - absl::Span solution) { + absl::Span solution, + std::string_view extra_dot_payload) { const std::vector colors = {"red", "green", "blue", "cyan", "yellow", "purple"}; std::stringstream ss; @@ -1559,6 +1556,7 @@ std::string RenderDot(std::optional bb, << "!\" shape=box width=" << 2 * solution[i].SizeX() << " height=" << 2 * solution[i].SizeY() << "]\n"; } + ss << extra_dot_payload; ss << "}\n"; return ss.str(); } diff --git a/ortools/sat/diffn_util.h b/ortools/sat/diffn_util.h index 0c3eac4c62..3aa6933807 100644 --- a/ortools/sat/diffn_util.h +++ b/ortools/sat/diffn_util.h @@ -76,6 +76,8 @@ struct Rectangle { std::tie(other.x_min, other.x_max, other.y_min, other.y_max); } + bool operator!=(const Rectangle& other) const { return !(other == *this); } + static Rectangle GetEmpty() { return Rectangle{.x_min = IntegerValue(0), .x_max = IntegerValue(0), @@ -190,8 +192,13 @@ struct IndexedInterval { return a.start < b.start; } }; + + template + friend void AbslStringify(Sink& sink, const IndexedInterval& interval) { + absl::Format(&sink, "[%v..%v] (#%v)", interval.start, interval.end, + interval.index); + } }; -std::ostream& operator<<(std::ostream& out, const IndexedInterval& interval); // Given n fixed intervals, returns the subsets of intervals that overlap during // at least one time unit. Note that we only return "maximal" subset and filter @@ -599,7 +606,8 @@ FindRectanglesResult FindRectanglesWithEnergyConflictMC( // Render a packing solution as a Graphviz dot file. Only works in the "neato" // or "fdp" Graphviz backends. std::string RenderDot(std::optional bb, - absl::Span solution); + absl::Span solution, + std::string_view extra_dot_payload = ""); // Given a bounding box and a list of rectangles inside that bounding box, // returns a list of rectangles partitioning the empty area inside the bounding diff --git a/ortools/sat/integer.cc b/ortools/sat/integer.cc index ea6b4885af..85fe9849af 100644 --- a/ortools/sat/integer.cc +++ b/ortools/sat/integer.cc @@ -1686,13 +1686,13 @@ bool IntegerTrail::EnqueueInternal( } const int prev_trail_index = var_trail_index_[i_lit.var]; + var_lbs_[i_lit.var] = i_lit.bound; + var_trail_index_[i_lit.var] = integer_trail_.size(); integer_trail_.push_back({/*bound=*/i_lit.bound, /*var=*/i_lit.var, /*prev_trail_index=*/prev_trail_index, /*reason_index=*/reason_index}); - var_lbs_[i_lit.var] = i_lit.bound; - var_trail_index_[i_lit.var] = integer_trail_.size() - 1; return true; } @@ -1737,13 +1737,13 @@ bool IntegerTrail::EnqueueAssociatedIntegerLiteral(IntegerLiteral i_lit, const int reason_index = AppendReasonToInternalBuffers({literal_reason.Negated()}, {}); const int prev_trail_index = var_trail_index_[i_lit.var]; + var_lbs_[i_lit.var] = i_lit.bound; + var_trail_index_[i_lit.var] = integer_trail_.size(); integer_trail_.push_back({/*bound=*/i_lit.bound, /*var=*/i_lit.var, /*prev_trail_index=*/prev_trail_index, /*reason_index=*/reason_index}); - var_lbs_[i_lit.var] = i_lit.bound; - var_trail_index_[i_lit.var] = integer_trail_.size() - 1; return true; } @@ -2113,9 +2113,10 @@ void GenericLiteralWatcher::CallOnNextPropagate(int id) { void GenericLiteralWatcher::UpdateCallingNeeds(Trail* trail) { // Process any new Literal on the trail. + const int literal_limit = literal_to_watcher_.size(); while (propagation_trail_index_ < trail->Index()) { const Literal literal = (*trail)[propagation_trail_index_++]; - if (literal.Index() >= literal_to_watcher_.size()) continue; + if (literal.Index() >= literal_limit) continue; for (const auto entry : literal_to_watcher_[literal]) { if (!in_queue_[entry.id]) { in_queue_[entry.id] = true; @@ -2128,8 +2129,9 @@ void GenericLiteralWatcher::UpdateCallingNeeds(Trail* trail) { } // Process the newly changed variables lower bounds. + const int var_limit = var_to_watcher_.size(); for (const IntegerVariable var : modified_vars_.PositionsSetAtLeastOnce()) { - if (var.value() >= var_to_watcher_.size()) continue; + if (var.value() >= var_limit) continue; for (const auto entry : var_to_watcher_[var]) { if (!in_queue_[entry.id]) { in_queue_[entry.id] = true; diff --git a/ortools/sat/integer_expr.cc b/ortools/sat/integer_expr.cc index 9d9924f896..29ca184f70 100644 --- a/ortools/sat/integer_expr.cc +++ b/ortools/sat/integer_expr.cc @@ -445,10 +445,11 @@ bool LinearConstraintPropagator::PropagateAtLevelZero() { IntegerValue new_ub; if (use_int128) { const IntegerValue ub = shared_->integer_trail->LevelZeroUpperBound(var); - const absl::int128 div128 = slack128 / absl::int128(coeff.value()); - if (absl::int128(lb.value()) + div128 >= absl::int128(ub.value())) { + if (absl::int128((ub - lb).value()) * absl::int128(coeff.value()) <= + slack128) { continue; } + const absl::int128 div128 = slack128 / absl::int128(coeff.value()); new_ub = lb + IntegerValue(static_cast(div128)); } else { const IntegerValue div = slack / coeff; diff --git a/ortools/sat/integer_search.cc b/ortools/sat/integer_search.cc index 1305be482e..7270d4bca9 100644 --- a/ortools/sat/integer_search.cc +++ b/ortools/sat/integer_search.cc @@ -61,10 +61,10 @@ IntegerLiteral AtMinValue(IntegerVariable var, IntegerTrail* integer_trail) { return IntegerLiteral::LowerOrEqual(var, lb); } -IntegerLiteral ChooseBestObjectiveValue(IntegerVariable var, Model* model) { - const auto& variables = - model->GetOrCreate()->objective_impacting_variables; - auto* integer_trail = model->GetOrCreate(); +IntegerLiteral ChooseBestObjectiveValue( + IntegerVariable var, IntegerTrail* integer_trail, + ObjectiveDefinition* objective_definition) { + const auto& variables = objective_definition->objective_impacting_variables; if (variables.contains(var)) { return AtMinValue(var, integer_trail); } else if (variables.contains(NegationOf(var))) { @@ -394,8 +394,11 @@ std::function IntegerValueSelectionHeuristic( // Objective based value. if (parameters.exploit_objective()) { - value_selection_heuristics.push_back([model](IntegerVariable var) { - return ChooseBestObjectiveValue(var, model); + auto* integer_trail = model->GetOrCreate(); + auto* objective_definition = model->GetOrCreate(); + value_selection_heuristics.push_back([integer_trail, objective_definition]( + IntegerVariable var) { + return ChooseBestObjectiveValue(var, integer_trail, objective_definition); }); } @@ -1336,12 +1339,17 @@ bool IntegerSearchHelper::BeforeTakingDecision() { // the level zero first ! otherwise, the new deductions will not be // incorporated and the solver will loop forever. if (integer_trail_->HasPendingRootLevelDeduction()) { - if (!sat_solver_->ResetToLevelZero()) return false; + sat_solver_->Backtrack(0); } // The rest only trigger at level zero. if (sat_solver_->CurrentDecisionLevel() != 0) return true; + // Rather than doing it in each callback, we detect newly fixed variables or + // tighter bounds, and propagate just once when everything was added. + const int saved_bool_index = sat_solver_->LiteralTrail().Index(); + const int saved_integer_index = integer_trail_->num_enqueues(); + auto* level_zero_callbacks = model_->GetOrCreate(); for (const auto& cb : level_zero_callbacks->callbacks) { if (!cb()) { @@ -1350,6 +1358,13 @@ bool IntegerSearchHelper::BeforeTakingDecision() { } } + // We propagate if needed. + if (sat_solver_->LiteralTrail().Index() > saved_bool_index || + integer_trail_->num_enqueues() > saved_integer_index || + integer_trail_->HasPendingRootLevelDeduction()) { + if (!sat_solver_->ResetToLevelZero()) return false; + } + if (parameters_.use_sat_inprocessing() && !inprocessing_->InprocessingRound()) { sat_solver_->NotifyThatModelIsUnsat(); diff --git a/ortools/sat/integer_search.h b/ortools/sat/integer_search.h index 5a3d5d64b4..bf545851dc 100644 --- a/ortools/sat/integer_search.h +++ b/ortools/sat/integer_search.h @@ -139,7 +139,9 @@ SatSolver::Status SolveIntegerProblemWithLazyEncoding(Model* model); IntegerLiteral AtMinValue(IntegerVariable var, IntegerTrail* integer_trail); // If a variable appear in the objective, branch on its best objective value. -IntegerLiteral ChooseBestObjectiveValue(IntegerVariable var, Model* model); +IntegerLiteral ChooseBestObjectiveValue( + IntegerVariable var, IntegerTrail* integer_trail, + ObjectiveDefinition* objective_definition); // Returns decision corresponding to var >= lb + max(1, (ub - lb) / 2). It also // CHECKs that the variable is not fixed. diff --git a/ortools/sat/linear_constraint.cc b/ortools/sat/linear_constraint.cc index f3f0c891fd..046f27f968 100644 --- a/ortools/sat/linear_constraint.cc +++ b/ortools/sat/linear_constraint.cc @@ -172,26 +172,27 @@ double ComputeActivity( double a1 = 0.0; double a2 = 0.0; double a3 = 0.0; + const double* view = values.data(); for (; i < shifted_size; i += 4) { a0 += static_cast(constraint.coeffs[i].value()) * - values[constraint.vars[i]]; + view[constraint.vars[i].value()]; a1 += static_cast(constraint.coeffs[i + 1].value()) * - values[constraint.vars[i + 1]]; + view[constraint.vars[i + 1].value()]; a2 += static_cast(constraint.coeffs[i + 2].value()) * - values[constraint.vars[i + 2]]; + view[constraint.vars[i + 2].value()]; a3 += static_cast(constraint.coeffs[i + 3].value()) * - values[constraint.vars[i + 3]]; + view[constraint.vars[i + 3].value()]; } double activity = a0 + a1 + a2 + a3; if (i < size) { activity += static_cast(constraint.coeffs[i].value()) * - values[constraint.vars[i]]; + view[constraint.vars[i].value()]; if (i + 1 < size) { activity += static_cast(constraint.coeffs[i + 1].value()) * - values[constraint.vars[i + 1]]; + view[constraint.vars[i + 1].value()]; if (i + 2 < size) { activity += static_cast(constraint.coeffs[i + 2].value()) * - values[constraint.vars[i + 2]]; + view[constraint.vars[i + 2].value()]; } } } diff --git a/ortools/sat/linear_programming_constraint.cc b/ortools/sat/linear_programming_constraint.cc index 2d0ffd9d2e..e69f6cbc25 100644 --- a/ortools/sat/linear_programming_constraint.cc +++ b/ortools/sat/linear_programming_constraint.cc @@ -106,21 +106,31 @@ bool ScatteredIntegerVector::Add(glop::ColIndex col, IntegerValue value) { template bool ScatteredIntegerVector::AddLinearExpressionMultiple( const IntegerValue multiplier, absl::Span cols, - absl::Span coeffs) { + absl::Span coeffs, IntegerValue max_coeff_magnitude) { + // Since we have the norm, this avoid checking each products below. + if (check_overflow) { + const IntegerValue prod = CapProdI(max_coeff_magnitude, multiplier); + if (AtMinOrMaxInt64(prod.value())) return false; + } + + IntegerValue* data = dense_vector_.data(); const double threshold = 0.1 * static_cast(dense_vector_.size()); const int num_terms = cols.size(); if (is_sparse_ && static_cast(num_terms) < threshold) { for (int i = 0; i < num_terms; ++i) { - if (is_zeros_[cols[i]]) { - is_zeros_[cols[i]] = false; - non_zeros_.push_back(cols[i]); + const glop::ColIndex col = cols[i]; + if (is_zeros_[col]) { + is_zeros_[col] = false; + non_zeros_.push_back(col); } + const IntegerValue product = multiplier * coeffs[i]; if (check_overflow) { - if (!AddProductTo(multiplier, coeffs[i], &dense_vector_[cols[i]])) { + if (AddIntoOverflow(product.value(), + data[col.value()].mutable_value())) { return false; } } else { - dense_vector_[cols[i]] += multiplier * coeffs[i]; + data[col.value()] += product; } } if (static_cast(non_zeros_.size()) > threshold) { @@ -129,12 +139,15 @@ bool ScatteredIntegerVector::AddLinearExpressionMultiple( } else { is_sparse_ = false; for (int i = 0; i < num_terms; ++i) { + const glop::ColIndex col = cols[i]; + const IntegerValue product = multiplier * coeffs[i]; if (check_overflow) { - if (!AddProductTo(multiplier, coeffs[i], &dense_vector_[cols[i]])) { + if (AddIntoOverflow(product.value(), + data[col.value()].mutable_value())) { return false; } } else { - dense_vector_[cols[i]] += multiplier * coeffs[i]; + data[col.value()] += product; } } } @@ -733,6 +746,18 @@ bool LinearProgrammingConstraint::SolveLp() { if (lp_solution_level_ == 0) { level_zero_lp_solution_ = lp_solution_; } + } else { + // If this parameter is true, we still copy whatever we have as these + // values will be used for the local-branching lns heuristic. + if (parameters_.stop_after_root_propagation()) { + const int num_vars = integer_variables_.size(); + for (int i = 0; i < num_vars; i++) { + const glop::Fractional value = + GetVariableValueAtCpScale(glop::ColIndex(i)); + expanded_lp_solution_[integer_variables_[i]] = value; + expanded_lp_solution_[NegationOf(integer_variables_[i])] = -value; + } + } } return true; @@ -1220,7 +1245,8 @@ bool LinearProgrammingConstraint::PostprocessAndAddCut( const int slack_index = (var.value() - first_slack.value()) / 2; const glop::RowIndex row = tmp_slack_rows_[slack_index]; if (!tmp_scattered_vector_.AddLinearExpressionMultiple( - coeff, IntegerLpRowCols(row), IntegerLpRowCoeffs(row))) { + coeff, IntegerLpRowCols(row), IntegerLpRowCoeffs(row), + infinity_norms_[row])) { VLOG(2) << "Overflow in slack removal"; ++num_cut_overflows_; return false; @@ -2002,11 +2028,12 @@ bool LinearProgrammingConstraint::ComputeNewLinearConstraint( for (const std::pair& term : integer_multipliers) { const RowIndex row = term.first; const IntegerValue multiplier = term.second; - CHECK_LT(row, integer_lp_.size()); + DCHECK_LT(row, integer_lp_.size()); // Update the constraint. if (!scattered_vector->AddLinearExpressionMultiple( - multiplier, IntegerLpRowCols(row), IntegerLpRowCoeffs(row))) { + multiplier, IntegerLpRowCols(row), IntegerLpRowCoeffs(row), + infinity_norms_[row])) { return false; } @@ -2172,13 +2199,11 @@ void LinearProgrammingConstraint::AdjustNewLinearConstraint( if (to_add != 0) { term.second += to_add; *upper_bound += to_add * row_bound; - - // TODO(user): we could avoid checking overflow here, but this is likely - // not in the hot loop. adjusted = true; CHECK(scattered_vector ->AddLinearExpressionMultiple( - to_add, IntegerLpRowCols(row), IntegerLpRowCoeffs(row))); + to_add, IntegerLpRowCols(row), IntegerLpRowCoeffs(row), + infinity_norms_[row])); } } if (adjusted) ++num_adjusts_; @@ -2298,7 +2323,7 @@ bool LinearProgrammingConstraint::PropagateExactLpReason() { } CHECK(tmp_scattered_vector_ .AddLinearExpressionMultiple( - obj_scale, tmp_cols_, tmp_coeffs_)); + obj_scale, tmp_cols_, tmp_coeffs_, objective_infinity_norm_)); CHECK(AddProductTo(-obj_scale, integer_objective_offset_, &rc_ub)); extra_term = {objective_cp_, -obj_scale}; @@ -2367,15 +2392,12 @@ bool LinearProgrammingConstraint::PropagateExactDualRay() { } int64_t LinearProgrammingConstraint::CalculateDegeneracy() { - const glop::ColIndex num_vars = simplex_.GetProblemNumCols(); int num_non_basic_with_zero_rc = 0; - for (glop::ColIndex i(0); i < num_vars; ++i) { - const double rc = simplex_.GetReducedCost(i); - if (rc != 0.0) continue; - if (simplex_.GetVariableStatus(i) == glop::VariableStatus::BASIC) { - continue; + const auto reduced_costs = simplex_.GetReducedCosts().const_view(); + for (const glop::ColIndex i : simplex_.GetNotBasicBitRow()) { + if (reduced_costs[i] == 0.0) { + num_non_basic_with_zero_rc++; } - num_non_basic_with_zero_rc++; } const int64_t num_cols = simplex_.GetProblemNumCols().value(); is_degenerate_ = num_non_basic_with_zero_rc >= 0.3 * num_cols; diff --git a/ortools/sat/linear_programming_constraint.h b/ortools/sat/linear_programming_constraint.h index cc6e5c2cc5..184753244f 100644 --- a/ortools/sat/linear_programming_constraint.h +++ b/ortools/sat/linear_programming_constraint.h @@ -73,7 +73,8 @@ class ScatteredIntegerVector { template bool AddLinearExpressionMultiple(IntegerValue multiplier, absl::Span cols, - absl::Span coeffs); + absl::Span coeffs, + IntegerValue max_coeff_magnitude); // This is not const only because non_zeros is sorted. Note that sorting the // non-zeros make the result deterministic whether or not we were in sparse diff --git a/ortools/sat/presolve_context.cc b/ortools/sat/presolve_context.cc index 722f530cdf..32de1f0cf7 100644 --- a/ortools/sat/presolve_context.cc +++ b/ortools/sat/presolve_context.cc @@ -97,10 +97,13 @@ int PresolveContext::NewIntVarWithDefinition( return new_var; } -int PresolveContext::NewBoolVar() { return NewIntVar(Domain(0, 1)); } +int PresolveContext::NewBoolVar(absl::string_view source) { + UpdateRuleStats(absl::StrCat("new_bool: ", source)); + return NewIntVar(Domain(0, 1)); +} int PresolveContext::NewBoolVarWithClause(absl::Span clause) { - const int new_var = NewBoolVar(); + const int new_var = NewBoolVar("with clause"); if (hint_is_loaded_) { bool all_have_hint = true; for (const int literal : clause) { @@ -124,7 +127,7 @@ int PresolveContext::NewBoolVarWithClause(absl::Span clause) { } } - // If there all literal where hinted and at zero, we set the hint of + // If all literals where hinted and at zero, we set the hint of // new_var to zero, otherwise we leave it unassigned. if (all_have_hint && !hint_has_value_[new_var]) { hint_has_value_[new_var] = true; @@ -522,7 +525,7 @@ ABSL_MUST_USE_RESULT bool PresolveContext::IntersectDomainWith( if (!domains[var].Contains(hint_[var])) { LOG(FATAL) << "Hint with value " << hint_[var] << " infeasible when changing domain of " << var << " to " - << domain[var]; + << domains[var]; } #endif @@ -598,7 +601,15 @@ void PresolveContext::UpdateRuleStats(const std::string& name, int num_times) { if (!is_todo) num_presolve_operations += num_times; if (logger_->LoggingIsEnabled()) { - VLOG(is_todo ? 3 : 2) << num_presolve_operations << " : " << name; + if (VLOG_IS_ON(1)) { + int level = is_todo ? 3 : 2; + if (std::abs(num_presolve_operations - + params_.debug_max_num_presolve_operations()) <= 100) { + level = 1; + } + VLOG(level) << num_presolve_operations << " : " << name; + } + stats_by_rule_name_[name] += num_times; } } @@ -1378,7 +1389,7 @@ void PresolveContext::CanonicalizeDomainOfSizeTwo(int var) { var_map[var_min] = SavedLiteral(min_literal); } else { UpdateRuleStats("variables with 2 values: create encoding literal"); - max_literal = NewBoolVar(); + max_literal = NewBoolVar("var with 2 values"); min_literal = NegatedRef(max_literal); var_map[var_min] = SavedLiteral(min_literal); var_map[var_max] = SavedLiteral(max_literal); @@ -1411,9 +1422,9 @@ void PresolveContext::CanonicalizeDomainOfSizeTwo(int var) { void PresolveContext::InsertVarValueEncodingInternal(int literal, int var, int64_t value, bool add_constraints) { - CHECK(RefIsPositive(var)); - CHECK(!VariableWasRemoved(literal)); - CHECK(!VariableWasRemoved(var)); + DCHECK(RefIsPositive(var)); + DCHECK(!VariableWasRemoved(literal)); + DCHECK(!VariableWasRemoved(var)); absl::flat_hash_map& var_map = encoding_[var]; // The code below is not 100% correct if this is not the case. @@ -1445,16 +1456,10 @@ void PresolveContext::InsertVarValueEncodingInternal(int literal, int var, // TODO(user): There is a bug here if the var == value was not in the // domain, it will just be ignored. CanonicalizeDomainOfSizeTwo(var); - } else { - VLOG(2) << "Insert lit(" << literal << ") <=> var(" << var - << ") == " << value; - eq_half_encoding_[var][value].insert(literal); - neq_half_encoding_[var][value].insert(NegatedRef(literal)); - if (add_constraints) { - UpdateRuleStats("variables: add encoding constraint"); - AddImplyInDomain(literal, var, Domain(value)); - AddImplyInDomain(NegatedRef(literal), var, Domain(value).Complement()); - } + } else if (add_constraints) { + UpdateRuleStats("variables: add encoding constraint"); + AddImplyInDomain(literal, var, Domain(value)); + AddImplyInDomain(NegatedRef(literal), var, Domain(value).Complement()); } } @@ -1462,32 +1467,25 @@ bool PresolveContext::InsertHalfVarValueEncoding(int literal, int var, int64_t value, bool imply_eq) { if (is_unsat_) return false; DCHECK(RefIsPositive(var)); - if (!CanonicalizeEncoding(&var, &value) || !DomainOf(var).Contains(value)) { - return SetLiteralToFalse(literal); - } // Creates the linking sets on demand. // Insert the enforcement literal in the half encoding map. - auto& direct_set = - imply_eq ? eq_half_encoding_[var][value] : neq_half_encoding_[var][value]; - if (!direct_set.insert(literal).second) return false; // Already there. - + auto& direct_set = imply_eq ? eq_half_encoding_ : neq_half_encoding_; + if (!direct_set.insert({literal, var, value}).second) { + return false; // Already there. + } VLOG(2) << "Collect lit(" << literal << ") implies var(" << var << (imply_eq ? ") == " : ") != ") << value; UpdateRuleStats("variables: detect half reified value encoding"); // Note(user): We don't expect a lot of literals in these sets, so doing // a scan should be okay. - auto& other_set = - imply_eq ? neq_half_encoding_[var][value] : eq_half_encoding_[var][value]; - for (const int other : other_set) { - if (GetLiteralRepresentative(other) != NegatedRef(literal)) continue; - + auto& other_set = imply_eq ? neq_half_encoding_ : eq_half_encoding_; + if (other_set.contains({NegatedRef(literal), var, value})) { UpdateRuleStats("variables: detect fully reified value encoding"); const int imply_eq_literal = imply_eq ? literal : NegatedRef(literal); InsertVarValueEncodingInternal(imply_eq_literal, var, value, /*add_constraints=*/false); - break; } return true; @@ -1508,6 +1506,8 @@ bool PresolveContext::InsertVarValueEncoding(int literal, int var, } literal = GetLiteralRepresentative(literal); InsertVarValueEncodingInternal(literal, var, value, /*add_constraints=*/true); + eq_half_encoding_.insert({literal, var, value}); + neq_half_encoding_.insert({NegatedRef(literal), var, value}); if (hint_is_loaded_) { const int bool_var = PositiveRef(literal); @@ -1524,6 +1524,7 @@ bool PresolveContext::InsertVarValueEncoding(int literal, int var, bool PresolveContext::StoreLiteralImpliesVarEqValue(int literal, int var, int64_t value) { if (!CanonicalizeEncoding(&var, &value) || !DomainOf(var).Contains(value)) { + // The literal cannot be true. return SetLiteralToFalse(literal); } literal = GetLiteralRepresentative(literal); @@ -1532,7 +1533,10 @@ bool PresolveContext::StoreLiteralImpliesVarEqValue(int literal, int var, bool PresolveContext::StoreLiteralImpliesVarNEqValue(int literal, int var, int64_t value) { - if (!CanonicalizeEncoding(&var, &value)) return false; + if (!CanonicalizeEncoding(&var, &value) || !DomainOf(var).Contains(value)) { + // The constraint is trivial. + return false; + } literal = GetLiteralRepresentative(literal); return InsertHalfVarValueEncoding(literal, var, value, /*imply_eq=*/false); } @@ -1541,16 +1545,16 @@ bool PresolveContext::HasVarValueEncoding(int ref, int64_t value, int* literal) { CHECK(!VariableWasRemoved(ref)); if (!CanonicalizeEncoding(&ref, &value)) return false; - const absl::flat_hash_map& var_map = encoding_[ref]; - const auto it = var_map.find(value); - if (it != var_map.end()) { - if (VariableWasRemoved(it->second.Get(this))) return false; - if (literal != nullptr) { - *literal = it->second.Get(this); - } - return true; + const auto first_it = encoding_.find(ref); + if (first_it == encoding_.end()) return false; + const auto it = first_it->second.find(value); + if (it == first_it->second.end()) return false; + + if (VariableWasRemoved(it->second.Get(this))) return false; + if (literal != nullptr) { + *literal = it->second.Get(this); } - return false; + return true; } bool PresolveContext::IsFullyEncoded(int ref) const { @@ -1627,14 +1631,14 @@ int PresolveContext::GetOrCreateVarValueEncoding(int ref, int64_t value) { var_map[0] = SavedLiteral(NegatedRef(representative)); return value == 1 ? representative : NegatedRef(representative); } else { - const int literal = NewBoolVar(); + const int literal = NewBoolVar("integer encoding"); InsertVarValueEncoding(literal, var, var_max); const int representative = GetLiteralRepresentative(literal); return value == var_max ? representative : NegatedRef(representative); } } - const int literal = NewBoolVar(); + const int literal = NewBoolVar("integer encoding"); InsertVarValueEncoding(literal, var, value); return GetLiteralRepresentative(literal); } @@ -2161,7 +2165,7 @@ int PresolveContext::GetOrCreateReifiedPrecedenceLiteral( const auto& it = reified_precedences_cache_.find(key); if (it != reified_precedences_cache_.end()) return it->second; - const int result = NewBoolVar(); + const int result = NewBoolVar("reified precedence"); reified_precedences_cache_[key] = result; // result => (time_i <= time_j) && active_i && active_j. diff --git a/ortools/sat/presolve_context.h b/ortools/sat/presolve_context.h index 2244435d21..47cc332198 100644 --- a/ortools/sat/presolve_context.h +++ b/ortools/sat/presolve_context.h @@ -97,7 +97,7 @@ class PresolveContext { // TODO(user): We should control more how this is called so we can update // a solution hint accordingly. int NewIntVar(const Domain& domain); - int NewBoolVar(); + int NewBoolVar(absl::string_view source); // This should replace NewIntVar() eventually in order to be able to crush // primal solution or just update the hint. @@ -577,6 +577,16 @@ class PresolveContext { // Solution hint accessor. bool VarHasSolutionHint(int var) const { return hint_has_value_[var]; } int64_t SolutionHint(int var) const { return hint_[var]; } + bool HintIsLoaded() const { return hint_is_loaded_; } + absl::Span SolutionHint() const { return hint_; } + + // Allows to set the hint of a newly created variable. + void SetNewVariableHint(int var, int64_t value) { + CHECK(hint_is_loaded_); + CHECK(!hint_has_value_[var]); + hint_has_value_[var] = true; + hint_[var] = value; + } SolverLogger* logger() const { return logger_; } const SatParameters& params() const { return params_; } @@ -714,15 +724,11 @@ class PresolveContext { encoding_; // Contains the currently collected half value encodings: - // i.e.: literal => var ==/!= value + // (literal, var, value), i.e.: literal => var ==/!= value // The state is accumulated (adding x => var == value then !x => var != value) // will deduce that x equivalent to var == value. - absl::flat_hash_map>> - eq_half_encoding_; - absl::flat_hash_map>> - neq_half_encoding_; + absl::flat_hash_set> eq_half_encoding_; + absl::flat_hash_set> neq_half_encoding_; // This regroups all the affine relations between variables. Note that the // constraints used to detect such relations will be removed from the model at diff --git a/ortools/sat/probing.cc b/ortools/sat/probing.cc index e11c15b1f7..941c78c830 100644 --- a/ortools/sat/probing.cc +++ b/ortools/sat/probing.cc @@ -149,7 +149,8 @@ bool Prober::ProbeOneVariableInternal(BooleanVariable b) { IntegerValue ub_min = kMaxIntegerValue; new_integer_bounds_.push_back(IntegerLiteral()); // Sentinel. - for (int i = 0; i < new_integer_bounds_.size(); ++i) { + const int limit = new_integer_bounds_.size(); + for (int i = 0; i < limit; ++i) { const IntegerVariable var = new_integer_bounds_[i].var; // Hole detection. diff --git a/ortools/sat/pseudo_costs.cc b/ortools/sat/pseudo_costs.cc index 2afe38e790..75e424d3a5 100644 --- a/ortools/sat/pseudo_costs.cc +++ b/ortools/sat/pseudo_costs.cc @@ -100,7 +100,36 @@ bool PseudoCosts::SaveLpInfo() { void PseudoCosts::SaveBoundChanges(Literal decision, absl::Span lp_values) { - bound_changes_ = GetBoundChanges(decision, lp_values); + bound_changes_.clear(); + for (const IntegerLiteral l : encoder_->GetIntegerLiterals(decision)) { + PseudoCosts::VariableBoundChange entry; + entry.var = l.var; + entry.lower_bound_change = l.bound - integer_trail_->LowerBound(l.var); + if (l.var < lp_values.size()) { + entry.lp_increase = + std::max(0.0, ToDouble(l.bound) - lp_values[l.var.value()]); + } + bound_changes_.push_back(entry); + } + + // NOTE: We ignore literal associated to var != value. + for (const auto [var, value] : encoder_->GetEqualityLiterals(decision)) { + { + PseudoCosts::VariableBoundChange entry; + entry.var = var; + entry.lower_bound_change = value - integer_trail_->LowerBound(var); + bound_changes_.push_back(entry); + } + + // Also do the negation. + { + PseudoCosts::VariableBoundChange entry; + entry.var = NegationOf(var); + entry.lower_bound_change = + (-value) - integer_trail_->LowerBound(NegationOf(var)); + bound_changes_.push_back(entry); + } + } } void PseudoCosts::BeforeTakingDecision(Literal decision) { @@ -281,42 +310,5 @@ IntegerVariable PseudoCosts::GetBestDecisionVar() { return chosen_var; } -std::vector PseudoCosts::GetBoundChanges( - Literal decision, absl::Span lp_values) { - std::vector bound_changes; - - for (const IntegerLiteral l : encoder_->GetIntegerLiterals(decision)) { - PseudoCosts::VariableBoundChange entry; - entry.var = l.var; - entry.lower_bound_change = l.bound - integer_trail_->LowerBound(l.var); - if (l.var < lp_values.size()) { - entry.lp_increase = - std::max(0.0, ToDouble(l.bound) - lp_values[l.var.value()]); - } - bound_changes.push_back(entry); - } - - // NOTE: We ignore literal associated to var != value. - for (const auto [var, value] : encoder_->GetEqualityLiterals(decision)) { - { - PseudoCosts::VariableBoundChange entry; - entry.var = var; - entry.lower_bound_change = value - integer_trail_->LowerBound(var); - bound_changes.push_back(entry); - } - - // Also do the negation. - { - PseudoCosts::VariableBoundChange entry; - entry.var = NegationOf(var); - entry.lower_bound_change = - (-value) - integer_trail_->LowerBound(NegationOf(var)); - bound_changes.push_back(entry); - } - } - - return bound_changes; -} - } // namespace sat } // namespace operations_research diff --git a/ortools/sat/pseudo_costs.h b/ortools/sat/pseudo_costs.h index 3ab96f30e0..84486f19af 100644 --- a/ortools/sat/pseudo_costs.h +++ b/ortools/sat/pseudo_costs.h @@ -100,8 +100,9 @@ class PseudoCosts { IntegerValue lower_bound_change = IntegerValue(0); double lp_increase = 0.0; }; - std::vector GetBoundChanges( - Literal decision, absl::Span lp_values); + const std::vector& BoundChanges() { + return bound_changes_; + } private: // Returns the current objective info. diff --git a/ortools/sat/sat_parameters.proto b/ortools/sat/sat_parameters.proto index c98489d3a1..2cc412d4aa 100644 --- a/ortools/sat/sat_parameters.proto +++ b/ortools/sat/sat_parameters.proto @@ -23,7 +23,7 @@ option csharp_namespace = "Google.OrTools.Sat"; // Contains the definitions for all the sat algorithm parameters and their // default values. // -// NEXT TAG: 296 +// NEXT TAG: 299 message SatParameters { // In some context, like in a portfolio of search, it makes sense to name a // given parameters set for logging purpose. @@ -580,6 +580,10 @@ message SatParameters { // max-sat. We also minimize problem clauses and not just the learned clause // that we keep forever like in the paper. optional double inprocessing_minimization_dtime = 275 [default = 1.0]; + optional bool inprocessing_minimization_use_conflict_analysis = 297 + [default = true]; + optional bool inprocessing_minimization_use_all_orderings = 298 + [default = false]; // ========================================================================== // Multithread @@ -1213,7 +1217,10 @@ message SatParameters { // Turns on neighborhood generator based on local branching LP. Based on Huang // et al., "Local Branching Relaxation Heuristics for Integer Linear // Programs", 2023. - optional bool use_lb_relax_lns = 255 [default = false]; + optional bool use_lb_relax_lns = 255 [default = true]; + + // Only use lb-relax if we have at least that many workers. + optional int32 lb_relax_num_workers_threshold = 296 [default = 16]; // Rounding method to use for feasibility pump. enum FPRoundingMethod { diff --git a/ortools/sat/sat_solver.cc b/ortools/sat/sat_solver.cc index 4babf78c7c..c29bdd25e0 100644 --- a/ortools/sat/sat_solver.cc +++ b/ortools/sat/sat_solver.cc @@ -1129,7 +1129,7 @@ SatSolver::Status SatSolver::Solve() { return SolveInternal(time_limit_, parameters_->max_number_of_conflicts()); } -void SatSolver::KeepAllClauseUsedToInfer(BooleanVariable variable) { +void SatSolver::KeepAllClausesUsedToInfer(BooleanVariable variable) { CHECK(Assignment().VariableIsAssigned(variable)); if (trail_->Info(variable).level == 0) return; int trail_index = trail_->Info(variable).trail_index; @@ -1161,7 +1161,8 @@ void SatSolver::KeepAllClauseUsedToInfer(BooleanVariable variable) { } } -bool SatSolver::SubsumptionIsInteresting(BooleanVariable variable) { +bool SatSolver::SubsumptionIsInteresting(BooleanVariable variable, + int max_size) { // TODO(user): other id should probably be safe as long as we do not delete // the propagators. Note that symmetry is tricky since we would need to keep // the symmetric clause around in KeepAllClauseUsedToInfer(). @@ -1186,7 +1187,10 @@ bool SatSolver::SubsumptionIsInteresting(BooleanVariable variable) { if (type != binary_id && type != clause_id) return false; SatClause* clause = ReasonClauseOrNull(var); if (clause != nullptr && clauses_propagator_->IsRemovable(clause)) { - ++num_clause_to_mark_as_non_deletable; + if (clause->size() > max_size) { + return false; + } + if (++num_clause_to_mark_as_non_deletable > 1) return false; } for (const Literal l : trail_->Reason(var)) { const AssignmentInfo& info = trail_->Info(l.Variable()); @@ -1201,54 +1205,63 @@ bool SatSolver::SubsumptionIsInteresting(BooleanVariable variable) { } // TODO(user): this is really an in-processing stuff and should be moved out -// of here. I think the name for that (or similar) technique is called vivify. -// Ideally this should be scheduled after other faster in-processing technique. +// of here. Ideally this should be scheduled after other faster in-processing +// techniques. This implements "vivification" as described in +// https://doi.org/10.1016/j.artint.2019.103197, with one significant tweak: +// we sort each clause by current trail index before trying to minimize it so +// that we can reuse the trail from previous calls in case there are overlaps. void SatSolver::TryToMinimizeClause(SatClause* clause) { CHECK(clause != nullptr); ++counters_.minimization_num_clauses; - absl::btree_set moved_last; - std::vector candidate(clause->begin(), clause->end()); + std::vector candidate; + candidate.reserve(clause->size()); - // Note that CP-SAT presolve detect the clauses that share n-1 literals and - // transform them into (n-1 enforcement) => (1 literal per clause). We + // Note that CP-SAT presolve detects clauses that share n-1 literals and + // transforms them into (n-1 enforcement) => (1 literal per clause). We // currently do not support that internally, but these clauses will still - // likely be loaded one after the other, so there is an high chance that if we + // likely be loaded one after the other, so there is a high chance that if we // call TryToMinimizeClause() on consecutive clauses, there will be a long - // prefix in common ! + // prefix in common! // // TODO(user): Exploit this more by choosing a good minimization order? int longest_valid_prefix = 0; if (CurrentDecisionLevel() > 0) { - // Quick linear scan to see if first literal is there. - const Literal first_decision = decisions_[0].literal; + candidate.resize(clause->size()); + // Insert any compatible decisions into their correct place in candidate + for (Literal lit : *clause) { + if (!Assignment().LiteralIsFalse(lit)) continue; + const AssignmentInfo& info = trail_->Info(lit.Variable()); + if (info.level <= 0 || info.level > clause->size()) continue; + if (decisions_[info.level - 1].literal == lit.Negated()) { + candidate[info.level - 1] = lit; + } + } + // Then compute the matching prefix and discard the rest for (int i = 0; i < candidate.size(); ++i) { - if (candidate[i].Negated() == first_decision) { - std::swap(candidate[0], candidate[i]); - longest_valid_prefix = 1; + if (candidate[i] != Literal()) { + ++longest_valid_prefix; + } else { break; } } - // Lets compute the full maximum prefix if we have already one match. - if (longest_valid_prefix == 1 && CurrentDecisionLevel() > 1) { - // Lets do the full algo. - absl::flat_hash_map indexing; - for (int i = 0; i < candidate.size(); ++i) { - indexing[candidate[i].NegatedIndex()] = i; - } - for (; longest_valid_prefix < CurrentDecisionLevel(); - ++longest_valid_prefix) { - const auto it = - indexing.find(decisions_[longest_valid_prefix].literal.Index()); - if (it == indexing.end()) break; - std::swap(candidate[longest_valid_prefix], candidate[it->second]); - indexing[candidate[it->second].NegatedIndex()] = it->second; - } - counters_.minimization_num_reused += longest_valid_prefix; - } + counters_.minimization_num_reused += longest_valid_prefix; + candidate.resize(longest_valid_prefix); } - Backtrack(longest_valid_prefix); + // Then do a second pass to add the remaining literals in order. + for (Literal lit : *clause) { + const AssignmentInfo& info = trail_->Info(lit.Variable()); + // Skip if this literal is already in the prefix. + if (info.level >= 1 && info.level <= longest_valid_prefix && + candidate[info.level - 1] == lit) { + continue; + } + candidate.push_back(lit); + } + CHECK_EQ(candidate.size(), clause->size()); + Backtrack(longest_valid_prefix); + absl::btree_set moved_last; while (!model_is_unsat_) { // We want each literal in candidate to appear last once in our propagation // order. We want to do that while maximizing the reutilization of the @@ -1258,12 +1271,15 @@ void SatSolver::TryToMinimizeClause(SatClause* clause) { moved_last, CurrentDecisionLevel(), &candidate); if (target_level == -1) break; Backtrack(target_level); + while (CurrentDecisionLevel() < candidate.size()) { if (time_limit_->LimitReached()) return; const int level = CurrentDecisionLevel(); const Literal literal = candidate[level]; + // Remove false literals if (Assignment().LiteralIsFalse(literal)) { - candidate.erase(candidate.begin() + level); + candidate[level] = candidate.back(); + candidate.pop_back(); continue; } else if (Assignment().LiteralIsTrue(literal)) { const int variable_level = @@ -1277,27 +1293,35 @@ void SatSolver::TryToMinimizeClause(SatClause* clause) { return; } - // If literal (at true) wasn't propagated by this clause, then we - // know that this clause is subsumed by other clauses in the database, - // so we can remove it. Note however that we need to make sure we will - // never remove the clauses that subsumes it later. + if (parameters_->inprocessing_minimization_use_conflict_analysis()) { + // Replace the clause with the reason for the literal being true, plus + // the literal itself. + candidate.clear(); + for (Literal lit : + GetDecisionsFixing(trail_->Reason(literal.Variable()))) { + candidate.push_back(lit.Negated()); + } + } else { + candidate.resize(variable_level); + } + candidate.push_back(literal); + + // If a (true) literal wasn't propagated by this clause, then we know + // that this clause is subsumed by other clauses in the database, so we + // can remove it so long as the subsumption is due to non-removable + // clauses. If we can subsume this clause by making only 1 additional + // clause permanent and that clause is no longer than this one, we will + // do so. if (ReasonClauseOrNull(literal.Variable()) != clause && - SubsumptionIsInteresting(literal.Variable())) { + SubsumptionIsInteresting(literal.Variable(), candidate.size())) { counters_.minimization_num_subsumed++; counters_.minimization_num_removed_literals += clause->size(); - KeepAllClauseUsedToInfer(literal.Variable()); + KeepAllClausesUsedToInfer(literal.Variable()); Backtrack(0); clauses_propagator_->Detach(clause); return; - } else { - // Simplify. Note(user): we could only keep in clause the literals - // responsible for the propagation, but because of the subsumption - // above, this is not needed. - if (variable_level + 1 < candidate.size()) { - candidate.resize(variable_level); - candidate.push_back(literal); - } } + break; } else { ++counters_.minimization_num_decisions; @@ -1307,19 +1331,31 @@ void SatSolver::TryToMinimizeClause(SatClause* clause) { return; } if (model_is_unsat_) return; + if (CurrentDecisionLevel() < level) { + // There was a conflict, consider the conflicting literal next so we + // should be able to exploit the conflict in the next iteration. + // TODO(user): I *think* this is sufficient to ensure pushing + // the same literal to the new trail fails, immediately on the next + // iteration, if not we may be able to analyse the last failure and + // skip some propagation steps? + std::swap(candidate[level], candidate[CurrentDecisionLevel()]); + } } } if (candidate.empty()) { model_is_unsat_ = true; return; } + if (!parameters_->inprocessing_minimization_use_all_orderings()) break; moved_last.insert(candidate.back().Index()); } + if (candidate.empty()) { + model_is_unsat_ = true; + return; + } + // Returns if we don't have any minimization. - // - // Note that we don't backtrack right away so maybe if the next clause as - // similar literal, we can reuse the trail prefix! if (candidate.size() == clause->size()) return; Backtrack(0); @@ -1504,27 +1540,40 @@ bool SatSolver::MinimizeByPropagation(double dtime) { } std::vector SatSolver::GetLastIncompatibleDecisions() { + std::vector* clause = trail_->MutableConflict(); + int num_true = 0; + for (int i = 0; i < clause->size(); ++i) { + const Literal literal = (*clause)[i]; + if (Assignment().LiteralIsTrue(literal)) { + // literal at true in the conflict must be the last decision/assumption + // that could not be taken. Put it at the front to add to the result + // later. + std::swap((*clause)[i], (*clause)[num_true++]); + } + } + CHECK_LE(num_true, 1); + std::vector result = + GetDecisionsFixing(absl::MakeConstSpan(*clause).subspan(num_true)); + for (int i = 0; i < num_true; ++i) { + result.push_back((*clause)[i].Negated()); + } + return result; +} + +std::vector SatSolver::GetDecisionsFixing( + absl::Span literals) { SCOPED_TIME_STAT(&stats_); std::vector unsat_assumptions; is_marked_.ClearAndResize(num_variables_); int trail_index = 0; - int num_true = 0; - for (const Literal lit : trail_->FailingClause()) { + for (const Literal lit : literals) { CHECK(Assignment().LiteralIsAssigned(lit)); - if (Assignment().LiteralIsTrue(lit)) { - // literal at true in the conflict must be decision/assumptions that could - // not be taken. - ++num_true; - unsat_assumptions.push_back(lit.Negated()); - continue; - } trail_index = std::max(trail_index, trail_->Info(lit.Variable()).trail_index); is_marked_.Set(lit.Variable()); } - CHECK_LE(num_true, 1); // We just expand the conflict until we only have decisions. const int limit = diff --git a/ortools/sat/sat_solver.h b/ortools/sat/sat_solver.h index b8d2deaaff..d1bc181aa5 100644 --- a/ortools/sat/sat_solver.h +++ b/ortools/sat/sat_solver.h @@ -234,6 +234,10 @@ class SatSolver { // the problem UNSAT. std::vector GetLastIncompatibleDecisions(); + // Returns a subset of decisions that are sufficient to ensure all literals in + // `literals` are fixed to their current value. + std::vector GetDecisionsFixing(absl::Span literals); + // Advanced usage. The next 3 functions allow to drive the search from outside // the solver. @@ -717,15 +721,16 @@ class SatSolver { std::string StatusString(Status status) const; std::string RunningStatisticsString() const; - // Marks as "non-deletable" all clauses that were used to infer the given - // variable. The variable must be currently assigned. - void KeepAllClauseUsedToInfer(BooleanVariable variable); - bool SubsumptionIsInteresting(BooleanVariable variable); + // Returns true if variable is fixed in the current assignment due to + // non-removable clauses, plus at most one removable clause with size <= + // max_size. + bool SubsumptionIsInteresting(BooleanVariable variable, int max_size); + void KeepAllClausesUsedToInfer(BooleanVariable variable); // Use propagation to try to minimize the given clause. This is really similar - // to MinimizeCoreWithPropagation(). It must be called when the current - // decision level is zero. Note that because this do a small tree search, it - // will impact the variable/clauses activities and may add new conflicts. + // to MinimizeCoreWithPropagation(). Note that because this does a small tree + // search, it will impact the variable/clause activities and may add new + // conflicts. void TryToMinimizeClause(SatClause* clause); // This is used by the old non-model constructor. diff --git a/ortools/sat/stat_tables.cc b/ortools/sat/stat_tables.cc index e5b96d133d..957d1a7e2e 100644 --- a/ortools/sat/stat_tables.cc +++ b/ortools/sat/stat_tables.cc @@ -26,7 +26,6 @@ #include "absl/synchronization/mutex.h" #include "ortools/lp_data/lp_types.h" #include "ortools/sat/cp_model.pb.h" -#include "ortools/sat/cp_model_lns.h" #include "ortools/sat/linear_programming_constraint.h" #include "ortools/sat/model.h" #include "ortools/sat/sat_solver.h" @@ -228,18 +227,20 @@ void SharedStatTables::AddLpStat(absl::string_view name, Model* model) { } void SharedStatTables::AddLnsStat(absl::string_view name, - const NeighborhoodGenerator& generator) { + int64_t num_fully_solved_calls, + int64_t num_calls, + int64_t num_improving_calls, + double difficulty, + double deterministic_limit) { absl::MutexLock mutex_lock(&mutex_); const double fully_solved_proportion = - static_cast(generator.num_fully_solved_calls()) / - static_cast(std::max(int64_t{1}, generator.num_calls())); + static_cast(num_fully_solved_calls) / + static_cast(std::max(int64_t{1}, num_calls)); lns_table_.push_back( - {FormatName(name), - absl::StrCat(generator.num_improving_calls(), "/", - generator.num_calls()), + {FormatName(name), absl::StrCat(num_improving_calls, "/", num_calls), absl::StrFormat("%2.0f%%", 100 * fully_solved_proportion), - absl::StrFormat("%0.2f", generator.difficulty()), - absl::StrFormat("%0.2f", generator.deterministic_limit())}); + absl::StrFormat("%0.2f", difficulty), + absl::StrFormat("%0.2f", deterministic_limit)}); } void SharedStatTables::AddLsStat(absl::string_view name, int64_t num_batches, diff --git a/ortools/sat/stat_tables.h b/ortools/sat/stat_tables.h index 18b0ffde46..f3b233909e 100644 --- a/ortools/sat/stat_tables.h +++ b/ortools/sat/stat_tables.h @@ -18,9 +18,9 @@ #include #include +#include "absl/container/btree_map.h" #include "absl/strings/string_view.h" #include "absl/synchronization/mutex.h" -#include "ortools/sat/cp_model_lns.h" #include "ortools/sat/model.h" #include "ortools/sat/subsolver.h" #include "ortools/sat/util.h" @@ -42,13 +42,14 @@ class SharedStatTables { void AddLpStat(absl::string_view name, Model* model); - void AddLnsStat(absl::string_view name, - const NeighborhoodGenerator& generator); + void AddLnsStat(absl::string_view name, int64_t num_fully_solved_calls, + int64_t num_calls, int64_t num_improving_calls, + double difficulty, double deterministic_limit); void AddLsStat(absl::string_view name, int64_t num_batches, int64_t num_restarts, int64_t num_linear_moves, int64_t num_general_moves, int64_t num_compound_moves, - int64_t num_bactracks, int64_t num_weight_updates, + int64_t num_backtracks, int64_t num_weight_updates, int64_t num_scores_computed); // Display the set of table at the end. diff --git a/ortools/sat/subsolver.cc b/ortools/sat/subsolver.cc index 885913187b..6641c51add 100644 --- a/ortools/sat/subsolver.cc +++ b/ortools/sat/subsolver.cc @@ -15,6 +15,7 @@ #include #include +#include #include #include #include @@ -43,18 +44,23 @@ namespace { // only SubSolvers for which TaskIsAvailable() is true are considered. Return -1 // if no SubSolver can generate a new task. // -// For now we use a really basic logic: call the least frequently called. +// For now we use a really basic logic that tries to equilibrate the walltime or +// deterministic time spent in each subsolver. int NextSubsolverToSchedule(std::vector>& subsolvers, - absl::Span num_generated_tasks) { + bool deterministic = true) { int best = -1; + double best_score = std::numeric_limits::infinity(); for (int i = 0; i < subsolvers.size(); ++i) { if (subsolvers[i] == nullptr) continue; if (subsolvers[i]->TaskIsAvailable()) { - if (best == -1 || num_generated_tasks[i] < num_generated_tasks[best]) { + const double score = subsolvers[i]->GetSelectionScore(deterministic); + if (best == -1 || score < best_score) { + best_score = score; best = i; } } } + if (best != -1) VLOG(1) << "Scheduling " << subsolvers[best]->name(); return best; } @@ -85,14 +91,13 @@ void SynchronizeAll(const std::vector>& subsolvers) { void SequentialLoop(std::vector>& subsolvers) { int64_t task_id = 0; - std::vector num_generated_tasks(subsolvers.size(), 0); std::vector num_in_flight_per_subsolvers(subsolvers.size(), 0); while (true) { SynchronizeAll(subsolvers); ClearSubsolversThatAreDone(num_in_flight_per_subsolvers, subsolvers); - const int best = NextSubsolverToSchedule(subsolvers, num_generated_tasks); + const int best = NextSubsolverToSchedule(subsolvers); if (best == -1) break; - num_generated_tasks[best]++; + subsolvers[best]->NotifySelection(); WallTimer timer; timer.Start(); @@ -126,7 +131,6 @@ void DeterministicLoop(std::vector>& subsolvers, } int64_t task_id = 0; - std::vector num_generated_tasks(subsolvers.size(), 0); std::vector num_in_flight_per_subsolvers(subsolvers.size(), 0); std::vector> to_run; std::vector indices; @@ -149,10 +153,10 @@ void DeterministicLoop(std::vector>& subsolvers, to_run.clear(); indices.clear(); for (int t = 0; t < batch_size; ++t) { - const int best = NextSubsolverToSchedule(subsolvers, num_generated_tasks); + const int best = NextSubsolverToSchedule(subsolvers); if (best == -1) break; num_in_flight_per_subsolvers[best]++; - num_generated_tasks[best]++; + subsolvers[best]->NotifySelection(); to_run.push_back(subsolvers[best]->GenerateTask(task_id++)); indices.push_back(best); } @@ -210,7 +214,6 @@ void NonDeterministicLoop(std::vector>& subsolvers, // to create millions of them, so we use the blocking nature of // pool.Schedule() when the queue capacity is set. int64_t task_id = 0; - std::vector num_generated_tasks(subsolvers.size(), 0); while (true) { // Set to true if no task is pending right now. bool all_done = false; @@ -238,13 +241,14 @@ void NonDeterministicLoop(std::vector>& subsolvers, } SynchronizeAll(subsolvers); + int best = -1; { // We need to do that while holding the lock since substask below might // be currently updating the time via AddTaskDuration(). const absl::MutexLock mutex_lock(&mutex); ClearSubsolversThatAreDone(num_in_flight_per_subsolvers, subsolvers); + best = NextSubsolverToSchedule(subsolvers, /*deterministic=*/false); } - const int best = NextSubsolverToSchedule(subsolvers, num_generated_tasks); if (best == -1) { if (all_done) break; @@ -257,7 +261,7 @@ void NonDeterministicLoop(std::vector>& subsolvers, } // Schedule next task. - num_generated_tasks[best]++; + subsolvers[best]->NotifySelection(); { absl::MutexLock mutex_lock(&mutex); num_in_flight++; diff --git a/ortools/sat/subsolver.h b/ortools/sat/subsolver.h index 3dea6ceb4d..5f93d8a1dc 100644 --- a/ortools/sat/subsolver.h +++ b/ortools/sat/subsolver.h @@ -101,9 +101,15 @@ class SubSolver { // Note that this is protected by the global execution mutex and so it is // called sequentially. Subclasses do not need to call this. void AddTaskDuration(double duration_in_seconds) { + ++num_finished_tasks_; + wall_time_ += duration_in_seconds; timing_.AddTimeInSec(duration_in_seconds); } + // Note that this is protected by the global execution mutex and so it is + // called sequentially. Subclasses do not need to call this. + void NotifySelection() { ++num_scheduled_tasks_; } + // This one need to be called by the Subclasses. Usually from Synchronize(), // or from the task itself it we execute a single task at the same time. void AddTaskDeterministicDuration(double deterministic_duration) { @@ -127,11 +133,42 @@ class SubSolver { return data; } + // Returns a score used to compare which tasks to schedule next. + // We will schedule the LOWER score. + // + // Tricky: Note that this will only be called sequentially. The deterministic + // time should only be used with the DeterministicLoop() because otherwise it + // can be updated at the same time as this is called. + double GetSelectionScore(bool deterministic) const { + const double time = deterministic ? deterministic_time_ : wall_time_; + const double divisor = num_scheduled_tasks_ > 0 + ? static_cast(num_scheduled_tasks_) + : 1.0; + + // If we have little data, we strongly limit the number of task in flight. + // This is needed if some LNS are stuck for a long time to not just only + // schedule this type at the beginning. + const int64_t in_flight = num_scheduled_tasks_ - num_finished_tasks_; + const double confidence_factor = + num_finished_tasks_ > 10 ? 1.0 : std::exp(in_flight); + + // We assume a "minimum time per task" which will be our base etimation for + // the average running time of this task. + return num_scheduled_tasks_ * std::max(0.1, time / divisor) * + confidence_factor; + } + private: const std::string name_; const SubsolverType type_; + int64_t num_scheduled_tasks_ = 0; + int64_t num_finished_tasks_ = 0; + + // Sum of wall_time / deterministic_time. + double wall_time_ = 0.0; double deterministic_time_ = 0.0; + TimeDistribution timing_ = TimeDistribution("task time"); TimeDistribution dtiming_ = TimeDistribution("task dtime"); }; diff --git a/ortools/sat/util.cc b/ortools/sat/util.cc index edbc813bb1..8bc821226e 100644 --- a/ortools/sat/util.cc +++ b/ortools/sat/util.cc @@ -465,7 +465,7 @@ void CompressTuples(absl::Span domain_sizes, for (int i = 0; i < num_vars; ++i) { const int domain_size = domain_sizes[i]; if (domain_size == 1) continue; - absl::flat_hash_map, std::vector> + absl::flat_hash_map, std::vector> masked_tuples_to_indices; for (int t = 0; t < tuples->size(); ++t) { int out = 0; diff --git a/ortools/sat/util.h b/ortools/sat/util.h index 9ccdf7d309..36d9e8d996 100644 --- a/ortools/sat/util.h +++ b/ortools/sat/util.h @@ -428,10 +428,15 @@ class MaxBoundedSubsetSum { template class FirstFewValues { public: - FirstFewValues() { Reset(); } + FirstFewValues() + : reachable_(new int64_t[n]), new_reachable_(new int64_t[n]) { + Reset(); + } void Reset() { - reachable_.fill(std::numeric_limits::max()); + for (int i = 0; i < n; ++i) { + reachable_[i] = std::numeric_limits::max(); + } reachable_[0] = 0; new_reachable_[0] = 0; } @@ -441,23 +446,25 @@ class FirstFewValues { // TODO(user): Implement Add() with an upper bound on the multiplicity. void Add(const int64_t positive_value) { DCHECK_GT(positive_value, 0); - if (positive_value >= reachable_.back()) return; + const int64_t* reachable = reachable_.get(); + if (positive_value >= reachable[n - 1]) return; // We copy from reachable_[i] to new_reachable_[j]. // The position zero is already copied. int i = 1; int j = 1; + int64_t* new_reachable = new_reachable_.get(); for (int base = 0; j < n && base < n; ++base) { - const int64_t candidate = CapAdd(new_reachable_[base], positive_value); - while (j < n && i < n && reachable_[i] < candidate) { - new_reachable_[j++] = reachable_[i++]; + const int64_t candidate = CapAdd(new_reachable[base], positive_value); + while (j < n && i < n && reachable[i] < candidate) { + new_reachable[j++] = reachable[i++]; } if (j < n) { // Eliminate duplicates. - while (i < n && reachable_[i] == candidate) i++; + while (i < n && reachable[i] == candidate) i++; // insert candidate in its final place. - new_reachable_[j++] = candidate; + new_reachable[j++] = candidate; } } std::swap(reachable_, new_reachable_); @@ -466,16 +473,19 @@ class FirstFewValues { // Returns true iff sum might be expressible as a weighted sum of the added // value. Any sum >= LastValue() is always considered potentially reachable. bool MightBeReachable(int64_t sum) const { - if (sum >= reachable_.back()) return true; - return std::binary_search(reachable_.begin(), reachable_.end(), sum); + if (sum >= reachable_[n - 1]) return true; + return std::binary_search(&reachable_[0], &reachable_[0] + n, sum); } - const std::array& reachable() const { return reachable_; } - int64_t LastValue() const { return reachable_.back(); } + int64_t LastValue() const { return reachable_[n - 1]; } + + absl::Span reachable() { + return absl::MakeSpan(reachable_.get(), n); + } private: - std::array reachable_; - std::array new_reachable_; + std::unique_ptr reachable_; + std::unique_ptr new_reachable_; }; // Use Dynamic programming to solve a single knapsack. This is used by the @@ -700,37 +710,16 @@ inline bool IsNegatableInt64(absl::int128 x) { x > absl::int128(std::numeric_limits::min()); } -// These functions are copied from MathUtils. However, the original ones are -// incompatible with absl::int128 as MathLimits::kIsInteger == -// false. -template -IntType CeilOrFloorOfRatio(IntType numerator, IntType denominator) { - static_assert(std::numeric_limits::is_integer, - "CeilOfRatio is only defined for integral types"); - DCHECK_NE(0, denominator) << "Division by zero is not supported."; - DCHECK(numerator != std::numeric_limits::min() || denominator != -1) - << "Dividing " << numerator << "by -1 is not supported: it would SIGFPE"; - - const IntType rounded_toward_zero = numerator / denominator; - const bool needs_round = (numerator % denominator) != 0; - const bool same_sign = (numerator >= 0) == (denominator >= 0); - - if (ceil) { // Compile-time condition: not an actual branching - return rounded_toward_zero + static_cast(same_sign && needs_round); - } else { - return rounded_toward_zero - - static_cast(!same_sign && needs_round); - } -} - template +ABSL_DEPRECATE_AND_INLINE() IntType CeilOfRatio(IntType numerator, IntType denominator) { - return CeilOrFloorOfRatio(numerator, denominator); + return MathUtil::CeilOfRatio(numerator, denominator); } template +ABSL_DEPRECATE_AND_INLINE() IntType FloorOfRatio(IntType numerator, IntType denominator) { - return CeilOrFloorOfRatio(numerator, denominator); + return MathUtil::FloorOfRatio(numerator, denominator); } template diff --git a/ortools/util/BUILD.bazel b/ortools/util/BUILD.bazel index b2ee31597e..38c67dd3ad 100644 --- a/ortools/util/BUILD.bazel +++ b/ortools/util/BUILD.bazel @@ -129,6 +129,7 @@ cc_library( deps = [ ":saturated_arithmetic", "//ortools/base", + "//ortools/base:dump_vars", "//ortools/base:types", "@com_google_absl//absl/container:btree", "@com_google_absl//absl/strings", diff --git a/ortools/util/saturated_arithmetic.h b/ortools/util/saturated_arithmetic.h index 321426143d..00bf7b92fd 100644 --- a/ortools/util/saturated_arithmetic.h +++ b/ortools/util/saturated_arithmetic.h @@ -299,6 +299,19 @@ inline int64_t CapAdd(int64_t x, int64_t y) { #endif } +// This avoid the need to convert to int64_t min/max and is about twice as fast +// if it corresponds to your use case. +inline bool AddIntoOverflow(int64_t x, int64_t* y) { +#if defined(__clang__) + return __builtin_add_overflow(x, *y, y); +#else + const int64_t result = TwosComplementAddition(x, *y); + if (AddHadOverflow(x, *y, result)) return true; + *y = result; + return false; +#endif +} + inline void CapAddTo(int64_t x, int64_t* y) { *y = CapAdd(*y, x); } inline int64_t CapSub(int64_t x, int64_t y) { diff --git a/ortools/util/strong_integers.h b/ortools/util/strong_integers.h index 6153487a9c..27d436158b 100644 --- a/ortools/util/strong_integers.h +++ b/ortools/util/strong_integers.h @@ -216,6 +216,7 @@ class StrongInt64 { } constexpr int64_t value() const { return value_; } + int64_t* mutable_value() { return &value_; } template // Needed for StrongVector. constexpr ValType value() const { From 163685cf61411bd9e3b0b681de6dc9750a7d1b60 Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Mon, 23 Sep 2024 11:56:14 +0200 Subject: [PATCH 006/105] sat: Fixup test --- ortools/sat/feasibility_jump_test.cc | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/ortools/sat/feasibility_jump_test.cc b/ortools/sat/feasibility_jump_test.cc index e20cac1e0d..c7934af218 100644 --- a/ortools/sat/feasibility_jump_test.cc +++ b/ortools/sat/feasibility_jump_test.cc @@ -27,8 +27,8 @@ TEST(JumpTableTest, TestCachesCalls) { [&](int) { return std::make_pair(++num_calls, -1.0); }); jumps.RecomputeAll(1); - EXPECT_EQ(jumps.GetJump(0), std::make_pair(1, -1.0)); - EXPECT_EQ(jumps.GetJump(0), std::make_pair(1, -1.0)); + EXPECT_EQ(jumps.GetJump(0), std::make_pair(int64_t{1}, -1.0)); + EXPECT_EQ(jumps.GetJump(0), std::make_pair(int64_t{1}, -1.0)); EXPECT_EQ(num_calls, 1); } @@ -42,7 +42,7 @@ TEST(JumpTableTest, TestNeedsRecomputationOneVar) { jumps.GetJump(0); jumps.Recompute(0); - EXPECT_EQ(jumps.GetJump(0), std::make_pair(2, -1.0)); + EXPECT_EQ(jumps.GetJump(0), std::make_pair(int64_t{2}, -1.0)); EXPECT_EQ(num_calls, 2); } @@ -57,8 +57,8 @@ TEST(JumpTableTest, TestNeedsRecomputationMultiVar) { jumps.GetJump(1); jumps.Recompute(0); - EXPECT_EQ(jumps.GetJump(0), std::make_pair(3, 0)); - EXPECT_EQ(jumps.GetJump(1), std::make_pair(2, 1)); + EXPECT_EQ(jumps.GetJump(0), std::make_pair(int64_t{3}, 0.0)); + EXPECT_EQ(jumps.GetJump(1), std::make_pair(int64_t{2}, 1.0)); EXPECT_EQ(num_calls, 3); } @@ -84,7 +84,7 @@ TEST(JumpTableTest, TestSetJump) { EXPECT_FALSE(jumps.NeedRecomputation(0)); EXPECT_GE(jumps.Score(0), 0); - EXPECT_EQ(jumps.GetJump(0), std::make_pair(1, 1.0)); + EXPECT_EQ(jumps.GetJump(0), std::make_pair(int64_t{1}, 1.0)); EXPECT_EQ(num_calls, 0); } From f3f8830ccb4a403d698609710bb2486512fc7d8a Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Mon, 23 Sep 2024 15:27:43 +0200 Subject: [PATCH 007/105] minor speedup --- ortools/glop/lu_factorization.cc | 3 ++- ortools/glop/revised_simplex.cc | 18 ++++++++++-------- ortools/glop/revised_simplex.h | 2 +- ortools/glop/variables_info.cc | 6 +++--- ortools/lp_data/sparse.cc | 26 +++++++++++++++----------- 5 files changed, 31 insertions(+), 24 deletions(-) diff --git a/ortools/glop/lu_factorization.cc b/ortools/glop/lu_factorization.cc index de30ea0f22..8deed0b61c 100644 --- a/ortools/glop/lu_factorization.cc +++ b/ortools/glop/lu_factorization.cc @@ -405,7 +405,8 @@ bool LuFactorization::LeftSolveLWithNonZeros( ClearAndResizeVectorWithNonZeros(x->size(), result_before_permutation); x->swap(result_before_permutation->values); if (nz->empty()) { - for (RowIndex row(0); row < inverse_row_perm_.size(); ++row) { + const RowIndex num_rows = inverse_row_perm_.size(); + for (RowIndex row(0); row < num_rows; ++row) { const Fractional value = (*result_before_permutation)[row]; if (value != 0.0) { const RowIndex permuted_row = inverse_row_perm_[row]; diff --git a/ortools/glop/revised_simplex.cc b/ortools/glop/revised_simplex.cc index 28ef73d4c9..88549afa30 100644 --- a/ortools/glop/revised_simplex.cc +++ b/ortools/glop/revised_simplex.cc @@ -451,7 +451,7 @@ Status RevisedSimplex::Solve(const LinearProgram& lp, TimeLimit* time_limit) { "PRIMAL_UNBOUNDED was reported, but the tolerance are good " "and the unbounded ray is not great."); SOLVER_LOG(logger_, - "The difference between unbounded and optimal can depends " + "The difference between unbounded and optimal can depend " "on a slight change of tolerance, trying to see if we are " "at OPTIMAL after postsolve."); problem_status_ = ProblemStatus::OPTIMAL; @@ -1087,7 +1087,9 @@ bool RevisedSimplex::InitializeObjectiveAndTestIfUnchanged( // This function work whether the lp is in equation form (with slack) or // without, since the objective of the slacks are always zero. DCHECK_GE(num_cols_, lp.num_variables()); - for (ColIndex col(lp.num_variables()); col < num_cols_; ++col) { + + const auto obj_coeffs = lp.objective_coefficients().const_view(); + for (ColIndex col(obj_coeffs.size()); col < num_cols_; ++col) { if (objective_[col] != 0.0) { objective_is_unchanged = false; objective_[col] = 0.0; @@ -1096,8 +1098,8 @@ bool RevisedSimplex::InitializeObjectiveAndTestIfUnchanged( if (lp.IsMaximizationProblem()) { // Note that we use the minimization version of the objective internally. - for (ColIndex col(0); col < lp.num_variables(); ++col) { - const Fractional coeff = -lp.objective_coefficients()[col]; + for (ColIndex col(0); col < obj_coeffs.size(); ++col) { + const Fractional coeff = -obj_coeffs[col]; if (objective_[col] != coeff) { objective_is_unchanged = false; objective_[col] = coeff; @@ -1106,8 +1108,8 @@ bool RevisedSimplex::InitializeObjectiveAndTestIfUnchanged( objective_offset_ = -lp.objective_offset(); objective_scaling_factor_ = -lp.objective_scaling_factor(); } else { - for (ColIndex col(0); col < lp.num_variables(); ++col) { - const Fractional coeff = lp.objective_coefficients()[col]; + for (ColIndex col(0); col < obj_coeffs.size(); ++col) { + const Fractional coeff = obj_coeffs[col]; if (objective_[col] != coeff) { objective_is_unchanged = false; objective_[col] = coeff; @@ -1120,7 +1122,7 @@ bool RevisedSimplex::InitializeObjectiveAndTestIfUnchanged( return objective_is_unchanged; } -void RevisedSimplex::InitializeObjectiveLimit(const LinearProgram& lp) { +void RevisedSimplex::InitializeObjectiveLimit() { objective_limit_reached_ = false; DCHECK(std::isfinite(objective_offset_)); DCHECK(std::isfinite(objective_scaling_factor_)); @@ -1418,7 +1420,7 @@ Status RevisedSimplex::Initialize(const LinearProgram& lp) { } } - InitializeObjectiveLimit(lp); + InitializeObjectiveLimit(); // Computes the variable name as soon as possible for logging. // TODO(user): do we really need to store them? we could just compute them diff --git a/ortools/glop/revised_simplex.h b/ortools/glop/revised_simplex.h index 3f8d9c8b4e..a4fa4b51ad 100644 --- a/ortools/glop/revised_simplex.h +++ b/ortools/glop/revised_simplex.h @@ -414,7 +414,7 @@ class RevisedSimplex { bool InitializeObjectiveAndTestIfUnchanged(const LinearProgram& lp); // Computes the stopping criterion on the problem objective value. - void InitializeObjectiveLimit(const LinearProgram& lp); + void InitializeObjectiveLimit(); // Initializes the starting basis. In most cases it starts by the all slack // basis and tries to apply some heuristics to replace fixed variables. diff --git a/ortools/glop/variables_info.cc b/ortools/glop/variables_info.cc index 3830178e80..d100f15eea 100644 --- a/ortools/glop/variables_info.cc +++ b/ortools/glop/variables_info.cc @@ -120,13 +120,13 @@ void VariablesInfo::InitializeFromBasisState(ColIndex first_slack_col, // Compute the status for all the columns (note that the slack variables are // already added at the end of the matrix at this stage). + const int state_size = state.statuses.size().value(); for (ColIndex col(0); col < num_cols; ++col) { // Start with the given "warm" status from the BasisState if it exists. VariableStatus status; - if (col < first_new_col && col < state.statuses.size()) { + if (col < first_new_col && col < state_size) { status = state.statuses[col]; - } else if (col >= first_slack_col && - col - num_new_cols < state.statuses.size()) { + } else if (col >= first_slack_col && col - num_new_cols < state_size) { status = state.statuses[col - num_new_cols]; } else { UpdateToNonBasicStatus(col, DefaultVariableStatus(col)); diff --git a/ortools/lp_data/sparse.cc b/ortools/lp_data/sparse.cc index 41ff043ee6..1928aa7d55 100644 --- a/ortools/lp_data/sparse.cc +++ b/ortools/lp_data/sparse.cc @@ -463,15 +463,16 @@ void CompactSparseMatrix::PopulateFromMatrixView(const MatrixView& input) { void CompactSparseMatrix::PopulateFromSparseMatrixAndAddSlacks( const SparseMatrix& input) { - num_cols_ = input.num_cols() + RowToColIndex(input.num_rows()); + const int input_num_cols = input.num_cols().value(); + num_cols_ = input_num_cols + RowToColIndex(input.num_rows()); num_rows_ = input.num_rows(); const EntryIndex num_entries = input.num_entries() + EntryIndex(num_rows_.value()); starts_.assign(num_cols_ + 1, EntryIndex(0)); - coefficients_.assign(num_entries, 0.0); - rows_.assign(num_entries, RowIndex(0)); + coefficients_.resize(num_entries, 0.0); + rows_.resize(num_entries, RowIndex(0)); EntryIndex index(0); - for (ColIndex col(0); col < input.num_cols(); ++col) { + for (ColIndex col(0); col < input_num_cols; ++col) { starts_[col] = index; for (const SparseColumn::Entry e : input.column(col)) { coefficients_[index] = e.coefficient(); @@ -480,11 +481,12 @@ void CompactSparseMatrix::PopulateFromSparseMatrixAndAddSlacks( } } for (RowIndex row(0); row < num_rows_; ++row) { - starts_[input.num_cols() + RowToColIndex(row)] = index; + starts_[input_num_cols + RowToColIndex(row)] = index; coefficients_[index] = 1.0; rows_[index] = row; ++index; } + DCHECK_EQ(index, num_entries); starts_[num_cols_] = index; } @@ -496,11 +498,12 @@ void CompactSparseMatrix::PopulateFromTranspose( // Fill the starts_ vector by computing the number of entries of each rows and // then doing a cumulative sum. After this step starts_[col + 1] will be the // actual start of the column col when we are done. - starts_.assign(num_cols_ + 2, EntryIndex(0)); + const ColIndex start_size = num_cols_ + 2; + starts_.assign(start_size, EntryIndex(0)); for (const RowIndex row : input.rows_) { ++starts_[RowToColIndex(row) + 2]; } - for (ColIndex col(2); col < starts_.size(); ++col) { + for (ColIndex col(2); col < start_size; ++col) { starts_[col] += starts_[col - 1]; } coefficients_.resize(starts_.back(), 0.0); @@ -662,12 +665,13 @@ void TriangularMatrix::CloseCurrentColumn(Fractional diagonal_value) { // TODO(user): This is currently not used by all matrices. It will be good // to fill it only when needed. DCHECK_LT(num_cols_, pruned_ends_.size()); - pruned_ends_[num_cols_] = coefficients_.size(); + const EntryIndex num_entries = coefficients_.size(); + pruned_ends_[num_cols_] = num_entries; ++num_cols_; DCHECK_LT(num_cols_, starts_.size()); - starts_[num_cols_] = coefficients_.size(); - if (first_non_identity_column_ == num_cols_ - 1 && coefficients_.empty() && - diagonal_value == 1.0) { + starts_[num_cols_] = num_entries; + if (first_non_identity_column_ == num_cols_ - 1 && diagonal_value == 1.0 && + num_entries == 0) { first_non_identity_column_ = num_cols_; } all_diagonal_coefficients_are_one_ = From f053be97865affaa1cfaa18dfa1c23bb265afac7 Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Mon, 23 Sep 2024 15:28:18 +0200 Subject: [PATCH 008/105] [CP-SAT] fix presolve bug; fix callback bug --- ortools/sat/BUILD.bazel | 1 + ortools/sat/cp_model_mapping.h | 7 + ortools/sat/cp_model_presolve.cc | 168 +++++++++++++------- ortools/sat/feasibility_jump_test.cc | 1 + ortools/sat/feasibility_pump.cc | 7 +- ortools/sat/presolve_context.cc | 27 +++- ortools/sat/presolve_context.h | 3 +- ortools/sat/python/cp_model_test.py | 225 ++++++++++++++++++++++++++- ortools/sat/swig_helper.cc | 24 ++- ortools/sat/swig_helper.h | 13 +- 10 files changed, 385 insertions(+), 91 deletions(-) diff --git a/ortools/sat/BUILD.bazel b/ortools/sat/BUILD.bazel index 87ac5cd71e..9991d8998d 100644 --- a/ortools/sat/BUILD.bazel +++ b/ortools/sat/BUILD.bazel @@ -3004,6 +3004,7 @@ cc_library( ":cp_model_utils", ":model", ":sat_parameters_cc_proto", + ":util", "//ortools/util:logging", "//ortools/util:sorted_interval_list", "//ortools/util:time_limit", diff --git a/ortools/sat/cp_model_mapping.h b/ortools/sat/cp_model_mapping.h index 58a6849f74..530a0a21b5 100644 --- a/ortools/sat/cp_model_mapping.h +++ b/ortools/sat/cp_model_mapping.h @@ -172,6 +172,13 @@ class CpModelMapping { return reverse_integer_map_[var]; } + // This one should only be used when we have a mapping. + int GetProtoLiteralFromLiteral(sat::Literal lit) const { + const int proto_var = GetProtoVariableFromBooleanVariable(lit.Variable()); + DCHECK_NE(proto_var, -1); + return lit.IsPositive() ? proto_var : NegatedRef(proto_var); + } + const std::vector& GetVariableMapping() const { return integers_; } diff --git a/ortools/sat/cp_model_presolve.cc b/ortools/sat/cp_model_presolve.cc index f7e2c33576..4b6720e106 100644 --- a/ortools/sat/cp_model_presolve.cc +++ b/ortools/sat/cp_model_presolve.cc @@ -2508,7 +2508,7 @@ bool CpModelPresolver::PresolveLinearOfSizeOne(ConstraintProto* ct) { context_->UpdateRuleStats("linear1: infeasible"); return MarkConstraintAsFalse(ct); } - if (rhs == context_->DomainOf(var)) { + if (rhs == var_domain) { context_->UpdateRuleStats("linear1: always true"); return RemoveConstraint(ct); } @@ -2544,16 +2544,28 @@ bool CpModelPresolver::PresolveLinearOfSizeOne(ConstraintProto* ct) { } // Detect encoding. + bool changed = false; if (ct->enforcement_literal().size() == 1) { // If we already have an encoding literal, this constraint is really // an implication. - const int lit = ct->enforcement_literal(0); + int lit = ct->enforcement_literal(0); + + // For correctness below, it is important lit is the canonical literal, + // otherwise we might remove the constraint even though it is the one + // defining an encoding literal. + const int representative = context_->GetLiteralRepresentative(lit); + if (lit != representative) { + lit = representative; + ct->set_enforcement_literal(0, lit); + context_->UpdateRuleStats("linear1: remapped enforcement literal"); + changed = true; + } if (rhs.IsFixed()) { const int64_t value = rhs.FixedValue(); int encoding_lit; if (context_->HasVarValueEncoding(var, value, &encoding_lit)) { - if (lit == encoding_lit) return false; + if (lit == encoding_lit) return changed; context_->AddImplication(lit, encoding_lit); context_->UpdateNewConstraintsVariableUsage(); ct->Clear(); @@ -2567,7 +2579,7 @@ bool CpModelPresolver::PresolveLinearOfSizeOne(ConstraintProto* ct) { } context_->UpdateNewConstraintsVariableUsage(); } - return false; + return changed; } const Domain complement = rhs.Complement().IntersectionWith(var_domain); @@ -2575,7 +2587,7 @@ bool CpModelPresolver::PresolveLinearOfSizeOne(ConstraintProto* ct) { const int64_t value = complement.FixedValue(); int encoding_lit; if (context_->HasVarValueEncoding(var, value, &encoding_lit)) { - if (NegatedRef(lit) == encoding_lit) return false; + if (NegatedRef(lit) == encoding_lit) return changed; context_->AddImplication(lit, NegatedRef(encoding_lit)); context_->UpdateNewConstraintsVariableUsage(); ct->Clear(); @@ -2589,11 +2601,11 @@ bool CpModelPresolver::PresolveLinearOfSizeOne(ConstraintProto* ct) { } context_->UpdateNewConstraintsVariableUsage(); } - return false; + return changed; } } - return false; + return changed; } bool CpModelPresolver::PresolveLinearOfSizeTwo(ConstraintProto* ct) { @@ -7110,9 +7122,6 @@ void CpModelPresolver::Probe() { } probing_timer->AddCounter("fixed_bools", num_fixed); - DetectDuplicateConstraintsWithDifferentEnforcements( - mapping, implication_graph, model.GetOrCreate()); - int num_equiv = 0; int num_changed_bounds = 0; const int num_variables = context_->working_model->variables().size(); @@ -7148,6 +7157,12 @@ void CpModelPresolver::Probe() { probing_timer->AddCounter("new_binary_clauses", prober->num_new_binary_clauses()); + // Note that we prefer to run this after we exported all equivalence to the + // context, so that our enforcement list can be presolved to the best of our + // knowledge. + DetectDuplicateConstraintsWithDifferentEnforcements( + mapping, implication_graph, model.GetOrCreate()); + // Stop probing timer now and display info. probing_timer.reset(); @@ -8888,37 +8903,20 @@ void CpModelPresolver::DetectDuplicateConstraintsWithDifferentEnforcements( for (const auto& [dup, rep] : duplicates_without_enforcement) { auto* dup_ct = context_->working_model->mutable_constraints(dup); auto* rep_ct = context_->working_model->mutable_constraints(rep); - if (rep_ct->constraint_case() == ConstraintProto::CONSTRAINT_NOT_SET) { - continue; + + // Make sure our enforcement list are up to date: nothing fixed and that + // its uses the literal representatives. + if (PresolveEnforcementLiteral(dup_ct)) { + context_->UpdateConstraintVariableUsage(dup); + } + if (PresolveEnforcementLiteral(rep_ct)) { + context_->UpdateConstraintVariableUsage(rep); } - // If we have a trail, we can check if any variable of the enforcement is - // fixed to false. This is useful for what follows since calling - // implication_graph->DirectImplications() is invalid for fixed variables. - if (trail != nullptr) { - bool found_false_enforcement = false; - for (const int c : {dup, rep}) { - for (const int l : - context_->working_model->constraints(c).enforcement_literal()) { - if (trail->Assignment().LiteralIsFalse(mapping->Literal(l))) { - found_false_enforcement = true; - break; - } - } - if (found_false_enforcement) { - context_->UpdateRuleStats("enforcement: false literal"); - if (c == rep) { - rep_ct->Swap(dup_ct); - context_->UpdateConstraintVariableUsage(rep); - } - dup_ct->Clear(); - context_->UpdateConstraintVariableUsage(dup); - break; - } - } - if (found_false_enforcement) { - continue; - } + // Skip this pair if one of the constraint was simplified + if (rep_ct->constraint_case() == ConstraintProto::CONSTRAINT_NOT_SET || + dup_ct->constraint_case() == ConstraintProto::CONSTRAINT_NOT_SET) { + continue; } // If one of them has no enforcement, then the other can be ignored. @@ -8936,10 +8934,7 @@ void CpModelPresolver::DetectDuplicateConstraintsWithDifferentEnforcements( // Special case. This looks specific but users might reify with a cost // a duplicate constraint. In this case, no need to have two variables, // we can make them equal by duality argument. - const int a = rep_ct->enforcement_literal(0); - const int b = dup_ct->enforcement_literal(0); - if (context_->IsFixed(a) || context_->IsFixed(b)) continue; - + // // TODO(user): Deal with more general situation? Note that we already // do something similar in dual_bound_strengthening.Strengthen() were we // are more general as we just require an unique blocking constraint rather @@ -8949,6 +8944,8 @@ void CpModelPresolver::DetectDuplicateConstraintsWithDifferentEnforcements( // we can also add the equality. Alternatively, we can just introduce a new // variable and merge all duplicate constraint into 1 + bunch of boolean // constraints liking enforcements. + const int a = rep_ct->enforcement_literal(0); + const int b = dup_ct->enforcement_literal(0); if (context_->VariableWithCostIsUniqueAndRemovable(a) && context_->VariableWithCostIsUniqueAndRemovable(b)) { // Both these case should be presolved before, but it is easy to deal with @@ -9007,19 +9004,19 @@ void CpModelPresolver::DetectDuplicateConstraintsWithDifferentEnforcements( // B, then constraint A is redundant and we can remove it. const int c_a = i == 0 ? dup : rep; const int c_b = i == 0 ? rep : dup; + const auto& ct_a = context_->working_model->constraints(c_a); + const auto& ct_b = context_->working_model->constraints(c_b); enforcement_vars.clear(); implications_used.clear(); - for (const int proto_lit : - context_->working_model->constraints(c_b).enforcement_literal()) { + for (const int proto_lit : ct_b.enforcement_literal()) { const Literal lit = mapping->Literal(proto_lit); - if (trail->Assignment().LiteralIsTrue(lit)) continue; + DCHECK(!trail->Assignment().LiteralIsAssigned(lit)); enforcement_vars.insert(lit); } - for (const int proto_lit : - context_->working_model->constraints(c_a).enforcement_literal()) { + for (const int proto_lit : ct_a.enforcement_literal()) { const Literal lit = mapping->Literal(proto_lit); - if (trail->Assignment().LiteralIsTrue(lit)) continue; + DCHECK(!trail->Assignment().LiteralIsAssigned(lit)); for (const Literal implication_lit : implication_graph->DirectImplications(lit)) { auto extracted = enforcement_vars.extract(implication_lit); @@ -9029,6 +9026,71 @@ void CpModelPresolver::DetectDuplicateConstraintsWithDifferentEnforcements( } } if (enforcement_vars.empty()) { + // Tricky: Because we keep track of literal <=> var == value, we + // cannot easily simplify linear1 here. This is because a scenario + // like this can happen: + // + // We have registered the fact that a <=> X=1 because we saw two + // constraints a => X=1 and not(a) => X!= 1 + // + // Now, we are here and we have: + // a => X=1, b => X=1, a => b + // So we rewrite this as + // a => b, b => X=1 + // + // But later, the PresolveLinearOfSizeOne() see + // b => X=1 and just rewrite this as b => a since (a <=> X=1). + // This is wrong because the constraint "b => X=1" is needed for the + // equivalence (a <=> X=1), but we lost that fact. + // + // Note(user): In the scenario above we can see that a <=> b, and if + // we know that fact, then the transformation is correctly handled. + // The bug was triggered when the Probing finished early due to time + // limit and we never detected that equivalence. + // + // TODO(user): Try to find a cleaner way to handle this. We could + // query our HasVarValueEncoding() directly here and directly detect a + // <=> b. However we also need to figure the case of + // half-implications. + { + if (ct_a.constraint_case() == ConstraintProto::kLinear && + ct_a.linear().vars().size() == 1 && + ct_a.enforcement_literal().size() == 1) { + const int var = ct_a.linear().vars(0); + const Domain var_domain = context_->DomainOf(var); + const Domain rhs = + ReadDomainFromProto(ct_a.linear()) + .InverseMultiplicationBy(ct_a.linear().coeffs(0)) + .IntersectionWith(var_domain); + + // IsFixed() do not work on empty domain. + if (rhs.IsEmpty()) { + context_->UpdateRuleStats("duplicate: linear1 infeasible"); + if (!MarkConstraintAsFalse(rep_ct)) return; + if (!MarkConstraintAsFalse(dup_ct)) return; + context_->UpdateConstraintVariableUsage(rep); + context_->UpdateConstraintVariableUsage(dup); + continue; + } + if (rhs == var_domain) { + context_->UpdateRuleStats("duplicate: linear1 always true"); + rep_ct->Clear(); + dup_ct->Clear(); + context_->UpdateConstraintVariableUsage(rep); + context_->UpdateConstraintVariableUsage(dup); + continue; + } + + // We skip if it is a var == value or var != value constraint. + if (rhs.IsFixed() || + rhs.Complement().IntersectionWith(var_domain).IsFixed()) { + context_->UpdateRuleStats( + "TODO duplicate: skipped identical encoding constraints"); + continue; + } + } + } + context_->UpdateRuleStats( "duplicate: identical constraint with implied enforcements"); if (c_a == rep) { @@ -9043,12 +9105,8 @@ void CpModelPresolver::DetectDuplicateConstraintsWithDifferentEnforcements( // graph. This is because in some case the implications are only true // in the presence of the "duplicated" constraints. for (const auto& [a, b] : implications_used) { - const int var_a = - mapping->GetProtoVariableFromBooleanVariable(a.Variable()); - const int proto_lit_a = a.IsPositive() ? var_a : NegatedRef(var_a); - const int var_b = - mapping->GetProtoVariableFromBooleanVariable(b.Variable()); - const int proto_lit_b = b.IsPositive() ? var_b : NegatedRef(var_b); + const int proto_lit_a = mapping->GetProtoLiteralFromLiteral(a); + const int proto_lit_b = mapping->GetProtoLiteralFromLiteral(b); context_->AddImplication(proto_lit_a, proto_lit_b); } context_->UpdateNewConstraintsVariableUsage(); diff --git a/ortools/sat/feasibility_jump_test.cc b/ortools/sat/feasibility_jump_test.cc index c7934af218..0e848d03ff 100644 --- a/ortools/sat/feasibility_jump_test.cc +++ b/ortools/sat/feasibility_jump_test.cc @@ -13,6 +13,7 @@ #include "ortools/sat/feasibility_jump.h" +#include #include #include "gtest/gtest.h" diff --git a/ortools/sat/feasibility_pump.cc b/ortools/sat/feasibility_pump.cc index e157703049..5ff2c7dd85 100644 --- a/ortools/sat/feasibility_pump.cc +++ b/ortools/sat/feasibility_pump.cc @@ -40,6 +40,7 @@ #include "ortools/sat/sat_parameters.pb.h" #include "ortools/sat/sat_solver.h" #include "ortools/sat/synchronization.h" +#include "ortools/sat/util.h" #include "ortools/util/saturated_arithmetic.h" #include "ortools/util/sorted_interval_list.h" #include "ortools/util/strong_integers.h" @@ -610,11 +611,11 @@ bool FeasibilityPump::PropagationRounding() { } const int64_t rounded_value = - static_cast(std::round(lp_solution_[var_index])); + SafeDoubleToInt64(std::round(lp_solution_[var_index])); const int64_t floor_value = - static_cast(std::floor(lp_solution_[var_index])); + SafeDoubleToInt64(std::floor(lp_solution_[var_index])); const int64_t ceil_value = - static_cast(std::ceil(lp_solution_[var_index])); + SafeDoubleToInt64(std::ceil(lp_solution_[var_index])); const bool floor_is_in_domain = (domain.Contains(floor_value) && lb.value() <= floor_value); diff --git a/ortools/sat/presolve_context.cc b/ortools/sat/presolve_context.cc index 32de1f0cf7..a5dab8dd6b 100644 --- a/ortools/sat/presolve_context.cc +++ b/ortools/sat/presolve_context.cc @@ -1371,8 +1371,9 @@ void PresolveContext::CanonicalizeDomainOfSizeTwo(int var) { max_literal = max_it->second.Get(this); if (min_literal != NegatedRef(max_literal)) { UpdateRuleStats("variables with 2 values: merge encoding literals"); - StoreBooleanEqualityRelation(min_literal, NegatedRef(max_literal)); - if (is_unsat_) return; + if (!StoreBooleanEqualityRelation(min_literal, NegatedRef(max_literal))) { + return; + } } min_literal = GetLiteralRepresentative(min_literal); max_literal = GetLiteralRepresentative(max_literal); @@ -1419,7 +1420,7 @@ void PresolveContext::CanonicalizeDomainOfSizeTwo(int var) { } } -void PresolveContext::InsertVarValueEncodingInternal(int literal, int var, +bool PresolveContext::InsertVarValueEncodingInternal(int literal, int var, int64_t value, bool add_constraints) { DCHECK(RefIsPositive(var)); @@ -1446,10 +1447,12 @@ void PresolveContext::InsertVarValueEncodingInternal(int literal, int var, if (literal != previous_literal) { UpdateRuleStats( "variables: merge equivalent var value encoding literals"); - StoreBooleanEqualityRelation(literal, previous_literal); + if (!StoreBooleanEqualityRelation(literal, previous_literal)) { + return false; + } } } - return; + return true; } if (DomainOf(var).Size() == 2) { @@ -1461,6 +1464,9 @@ void PresolveContext::InsertVarValueEncodingInternal(int literal, int var, AddImplyInDomain(literal, var, Domain(value)); AddImplyInDomain(NegatedRef(literal), var, Domain(value).Complement()); } + + // The canonicalization might have proven UNSAT. + return !ModelIsUnsat(); } bool PresolveContext::InsertHalfVarValueEncoding(int literal, int var, @@ -1484,8 +1490,10 @@ bool PresolveContext::InsertHalfVarValueEncoding(int literal, int var, if (other_set.contains({NegatedRef(literal), var, value})) { UpdateRuleStats("variables: detect fully reified value encoding"); const int imply_eq_literal = imply_eq ? literal : NegatedRef(literal); - InsertVarValueEncodingInternal(imply_eq_literal, var, value, - /*add_constraints=*/false); + if (!InsertVarValueEncodingInternal(imply_eq_literal, var, value, + /*add_constraints=*/false)) { + return false; + } } return true; @@ -1505,7 +1513,10 @@ bool PresolveContext::InsertVarValueEncoding(int literal, int var, return SetLiteralToFalse(literal); } literal = GetLiteralRepresentative(literal); - InsertVarValueEncodingInternal(literal, var, value, /*add_constraints=*/true); + if (!InsertVarValueEncodingInternal(literal, var, value, + /*add_constraints=*/true)) { + return false; + } eq_half_encoding_.insert({literal, var, value}); neq_half_encoding_.insert({NegatedRef(literal), var, value}); diff --git a/ortools/sat/presolve_context.h b/ortools/sat/presolve_context.h index 47cc332198..faa7a39800 100644 --- a/ortools/sat/presolve_context.h +++ b/ortools/sat/presolve_context.h @@ -664,7 +664,8 @@ class PresolveContext { bool imply_eq); // Insert fully reified var-value encoding. - void InsertVarValueEncodingInternal(int literal, int var, int64_t value, + // Returns false if this make the problem infeasible. + bool InsertVarValueEncodingInternal(int literal, int var, int64_t value, bool add_constraints); SolverLogger* logger_; diff --git a/ortools/sat/python/cp_model_test.py b/ortools/sat/python/cp_model_test.py index d3fb0878a7..c4f864cf17 100644 --- a/ortools/sat/python/cp_model_test.py +++ b/ortools/sat/python/cp_model_test.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Tests for ortools.sat.python.cp_model.""" +import itertools from absl.testing import absltest import pandas as pd @@ -95,6 +95,20 @@ class RecordSolution(cp_model.CpSolverSolutionCallback): return self.__bool_var_values +class TimeRecorder(cp_model.CpSolverSolutionCallback): + + def __init__(self, default_time: float) -> None: + super().__init__() + self.__last_time = default_time + + def on_solution_callback(self) -> None: + self.__last_time = self.wall_time + + @property + def last_time(self): + return self.__last_time + + class LogToString: """Record log in a string.""" @@ -1649,6 +1663,215 @@ class CpModelTest(absltest.TestCase): ) self.assertLen(model.proto.constraints, 13) + def testIssue4376SatModel(self): + print("testIssue4376SatModel") + letters: str = "BCFLMRT" + + def symbols_from_string(text: str) -> list[int]: + return [letters.index(char) for char in text] + + def rotate_symbols(symbols: list[int], turns: int) -> list[int]: + return symbols[turns:] + symbols[:turns] + + data = """FMRC +FTLB +MCBR +FRTM +FBTM +BRFM +BTRM +BCRM +RTCF +TFRC +CTRM +CBTM +TFBM +TCBM +CFTM +BLTR +RLFM +CFLM +CRML +FCLR +FBTR +TBRF +RBCF +RBCT +BCTF +TFCR +CBRT +FCBT +FRTB +RBCM +MTFC +MFTC +MBFC +RTBM +RBFM +TRFM""" + + tiles = [symbols_from_string(line) for line in data.splitlines()] + + model = cp_model.CpModel() + + # choices[i, x, y, r] is true iff we put tile i in cell (x,y) with + # rotation r. + choices = {} + for i in range(len(tiles)): + for x in range(6): + for y in range(6): + for r in range(4): + choices[(i, x, y, r)] = model.new_bool_var( + f"tile_{i}_{x}_{y}_{r}" + ) + + # corners[x, y, s] is true iff the corner at (x,y) contains symbol s. + corners = {} + for x in range(7): + for y in range(7): + for s in range(7): + corners[(x, y, s)] = model.new_bool_var(f"corner_{x}_{y}_{s}") + + # Placing a tile puts a symbol in each corner. + for (i, x, y, r), choice in choices.items(): + symbols = rotate_symbols(tiles[i], r) + model.add_implication(choice, corners[x, y, symbols[0]]) + model.add_implication(choice, corners[x, y + 1, symbols[1]]) + model.add_implication(choice, corners[x + 1, y + 1, symbols[2]]) + model.add_implication(choice, corners[x + 1, y, symbols[3]]) + + # We must make exactly one choice for each tile. + for i in range(len(tiles)): + tmp_literals = [] + for x in range(6): + for y in range(6): + for r in range(4): + tmp_literals.append(choices[(i, x, y, r)]) + model.add_exactly_one(tmp_literals) + + # We must make exactly one choice for each square. + for x, y in itertools.product(range(6), range(6)): + tmp_literals = [] + for i in range(len(tiles)): + for r in range(4): + tmp_literals.append(choices[(i, x, y, r)]) + model.add_exactly_one(tmp_literals) + + # Each corner contains exactly one symbol. + for x, y in itertools.product(range(7), range(7)): + model.add_exactly_one(corners[x, y, s] for s in range(7)) + + # Solve. + solver = cp_model.CpSolver() + solver.parameters.num_workers = 8 + solver.parameters.max_time_in_seconds = 20 + solver.parameters.log_search_progress = True + solver.parameters.cp_model_presolve = False + solver.parameters.symmetry_level = 0 + + callback = TimeRecorder(solver.parameters.max_time_in_seconds) + solver.Solve(model, callback) + self.assertLess(solver.wall_time, callback.last_time + 5.0) + + def testIssue4376MinimizeModel(self): + print("testIssue4376MinimizeModel") + + model = cp_model.CpModel() + + jobs = [ + [3, 3], # [duration, width] + [2, 5], + [1, 3], + [3, 7], + [7, 3], + [2, 2], + [2, 2], + [5, 5], + [10, 2], + [4, 3], + [2, 6], + [1, 2], + [6, 8], + [4, 5], + [3, 7], + ] + + max_width = 10 + + horizon = sum(t[0] for t in jobs) + num_jobs = len(jobs) + all_jobs = range(num_jobs) + + intervals = [] + intervals0 = [] + intervals1 = [] + performed = [] + starts = [] + ends = [] + demands = [] + + for i in all_jobs: + # Create main interval. + start = model.new_int_var(0, horizon, f"start_{i}") + duration = jobs[i][0] + end = model.new_int_var(0, horizon, f"end_{i}") + interval = model.new_interval_var(start, duration, end, f"interval_{i}") + starts.append(start) + intervals.append(interval) + ends.append(end) + demands.append(jobs[i][1]) + + # Create an optional copy of interval to be executed on machine 0. + performed_on_m0 = model.new_bool_var(f"perform_{i}_on_m0") + performed.append(performed_on_m0) + start0 = model.new_int_var(0, horizon, f"start_{i}_on_m0") + end0 = model.new_int_var(0, horizon, f"end_{i}_on_m0") + interval0 = model.new_optional_interval_var( + start0, duration, end0, performed_on_m0, f"interval_{i}_on_m0" + ) + intervals0.append(interval0) + + # Create an optional copy of interval to be executed on machine 1. + start1 = model.new_int_var(0, horizon, f"start_{i}_on_m1") + end1 = model.new_int_var(0, horizon, f"end_{i}_on_m1") + interval1 = model.new_optional_interval_var( + start1, + duration, + end1, + ~performed_on_m0, + f"interval_{i}_on_m1", + ) + intervals1.append(interval1) + + # We only propagate the constraint if the tasks is performed on the + # machine. + model.add(start0 == start).only_enforce_if(performed_on_m0) + model.add(start1 == start).only_enforce_if(~performed_on_m0) + + # Width constraint (modeled as a cumulative) + model.add_cumulative(intervals, demands, max_width) + + # Choose which machine to perform the jobs on. + model.add_no_overlap(intervals0) + model.add_no_overlap(intervals1) + + # Objective variable. + makespan = model.new_int_var(0, horizon, "makespan") + model.add_max_equality(makespan, ends) + model.minimize(makespan) + + # Symmetry breaking. + model.add(performed[0] == 0) + + # Solve. + solver = cp_model.CpSolver() + solver.parameters.num_workers = 8 + solver.parameters.max_time_in_seconds = 50 + solver.parameters.log_search_progress = True + callback = TimeRecorder(solver.parameters.max_time_in_seconds) + solver.Solve(model, callback) + self.assertLess(solver.wall_time, callback.last_time + 5.0) + if __name__ == "__main__": absltest.main() diff --git a/ortools/sat/swig_helper.cc b/ortools/sat/swig_helper.cc index b03de25b90..0d9b045c4e 100644 --- a/ortools/sat/swig_helper.cc +++ b/ortools/sat/swig_helper.cc @@ -15,7 +15,6 @@ #include -#include #include #include @@ -27,9 +26,9 @@ #include "ortools/sat/cp_model_utils.h" #include "ortools/sat/model.h" #include "ortools/sat/sat_parameters.pb.h" +#include "ortools/sat/util.h" #include "ortools/util/logging.h" #include "ortools/util/sorted_interval_list.h" -#include "ortools/util/time_limit.h" namespace operations_research { namespace sat { @@ -90,18 +89,15 @@ bool SolutionCallback::SolutionBooleanValue(int index) { } void SolutionCallback::StopSearch() { - if (stopped_ptr_ != nullptr) { - (*stopped_ptr_) = true; - } + if (wrapper_ != nullptr) wrapper_->StopSearch(); } operations_research::sat::CpSolverResponse SolutionCallback::Response() const { return response_; } -void SolutionCallback::SetAtomicBooleanToStopTheSearch( - std::atomic* stopped_ptr) const { - stopped_ptr_ = stopped_ptr; +void SolutionCallback::SetWrapperClass(SolveWrapper* wrapper) const { + wrapper_ = wrapper; } bool SolutionCallback::HasResponse() const { return has_response_; } @@ -116,15 +112,13 @@ void SolveWrapper::SetStringParameters(const std::string& string_parameters) { } void SolveWrapper::AddSolutionCallback(const SolutionCallback& callback) { - // Overwrite the atomic bool. - callback.SetAtomicBooleanToStopTheSearch(&stopped_); + callback.SetWrapperClass(this); model_.Add(NewFeasibleSolutionObserver( [&callback](const CpSolverResponse& r) { return callback.Run(r); })); } void SolveWrapper::ClearSolutionCallback(const SolutionCallback& callback) { - // cleanup the atomic bool. - callback.SetAtomicBooleanToStopTheSearch(nullptr); + callback.SetWrapperClass(nullptr); // Detach the wrapper class. } void SolveWrapper::AddLogCallback( @@ -157,11 +151,13 @@ void SolveWrapper::AddBestBoundCallbackFromClass(BestBoundCallback* callback) { operations_research::sat::CpSolverResponse SolveWrapper::Solve( const operations_research::sat::CpModelProto& model_proto) { FixFlagsAndEnvironmentForSwig(); - model_.GetOrCreate()->RegisterExternalBooleanAsLimit(&stopped_); return operations_research::sat::SolveCpModel(model_proto, &model_); } -void SolveWrapper::StopSearch() { stopped_ = true; } +void SolveWrapper::StopSearch() { + model_.GetOrCreate()->Stop(); +} + std::string CpSatHelper::ModelStats( const operations_research::sat::CpModelProto& model_proto) { return CpModelStats(model_proto); diff --git a/ortools/sat/swig_helper.h b/ortools/sat/swig_helper.h index e9821b620d..3a9cfeec69 100644 --- a/ortools/sat/swig_helper.h +++ b/ortools/sat/swig_helper.h @@ -14,24 +14,20 @@ #ifndef OR_TOOLS_SAT_SWIG_HELPER_H_ #define OR_TOOLS_SAT_SWIG_HELPER_H_ -#include #include #include #include #include "ortools/sat/cp_model.pb.h" -#include "ortools/sat/cp_model_checker.h" -#include "ortools/sat/cp_model_solver.h" -#include "ortools/sat/cp_model_utils.h" #include "ortools/sat/model.h" #include "ortools/sat/sat_parameters.pb.h" -#include "ortools/util/logging.h" #include "ortools/util/sorted_interval_list.h" -#include "ortools/util/time_limit.h" namespace operations_research { namespace sat { +class SolveWrapper; + // Base class for SWIG director based on solution callbacks. // See http://www.swig.org/Doc4.0/SWIGDocumentation.html#CSharp_directors. class SolutionCallback { @@ -72,14 +68,14 @@ class SolutionCallback { operations_research::sat::CpSolverResponse Response() const; // We use mutable and non const methods to overcome SWIG difficulties. - void SetAtomicBooleanToStopTheSearch(std::atomic* stopped_ptr) const; + void SetWrapperClass(SolveWrapper* wrapper) const; bool HasResponse() const; private: mutable CpSolverResponse response_; mutable bool has_response_ = false; - mutable std::atomic* stopped_ptr_; + mutable SolveWrapper* wrapper_ = nullptr; }; // Simple director class for C#. @@ -126,7 +122,6 @@ class SolveWrapper { private: Model model_; - std::atomic stopped_ = false; }; // Static methods are stored in a module which name can vary. From b38890bebe5b5ff1becca8067fecfd19e5c73a65 Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Mon, 23 Sep 2024 15:46:03 +0200 Subject: [PATCH 009/105] cmake: cleanup tests --- ortools/graph/CMakeLists.txt | 21 +-------------------- ortools/sat/CMakeLists.txt | 1 + 2 files changed, 2 insertions(+), 20 deletions(-) diff --git a/ortools/graph/CMakeLists.txt b/ortools/graph/CMakeLists.txt index 109cf30b01..f76659bd0c 100644 --- a/ortools/graph/CMakeLists.txt +++ b/ortools/graph/CMakeLists.txt @@ -12,28 +12,9 @@ # limitations under the License. file(GLOB _SRCS "*.h" "*.cc") +list(FILTER _SRCS EXCLUDE REGEX ".*/.*_test.cc") list(REMOVE_ITEM _SRCS - ${CMAKE_CURRENT_SOURCE_DIR}/assignment_test.cc - ${CMAKE_CURRENT_SOURCE_DIR}/bidirectional_dijkstra_test.cc - ${CMAKE_CURRENT_SOURCE_DIR}/bounded_dijkstra_test.cc - ${CMAKE_CURRENT_SOURCE_DIR}/christofides_test.cc - ${CMAKE_CURRENT_SOURCE_DIR}/cliques_test.cc - ${CMAKE_CURRENT_SOURCE_DIR}/dag_constrained_shortest_path_test.cc - ${CMAKE_CURRENT_SOURCE_DIR}/dag_shortest_path_test.cc - ${CMAKE_CURRENT_SOURCE_DIR}/ebert_graph_test.cc - ${CMAKE_CURRENT_SOURCE_DIR}/eulerian_path_test.cc - ${CMAKE_CURRENT_SOURCE_DIR}/hamiltonian_path_test.cc - ${CMAKE_CURRENT_SOURCE_DIR}/k_shortest_paths_test.cc - ${CMAKE_CURRENT_SOURCE_DIR}/linear_assignment_test.cc - ${CMAKE_CURRENT_SOURCE_DIR}/max_flow_test.cc - ${CMAKE_CURRENT_SOURCE_DIR}/min_cost_flow_test.cc - ${CMAKE_CURRENT_SOURCE_DIR}/minimum_spanning_tree_test.cc - ${CMAKE_CURRENT_SOURCE_DIR}/multi_dijkstra_test.cc - ${CMAKE_CURRENT_SOURCE_DIR}/one_tree_lower_bound_test.cc - ${CMAKE_CURRENT_SOURCE_DIR}/perfect_matching_test.cc - ${CMAKE_CURRENT_SOURCE_DIR}/rooted_tree_test.cc ${CMAKE_CURRENT_SOURCE_DIR}/shortest_paths_benchmarks.cc - ${CMAKE_CURRENT_SOURCE_DIR}/shortest_paths_test.cc ${CMAKE_CURRENT_SOURCE_DIR}/solve_flow_model.cc ) diff --git a/ortools/sat/CMakeLists.txt b/ortools/sat/CMakeLists.txt index 99f2113edb..c2a1d46fa2 100644 --- a/ortools/sat/CMakeLists.txt +++ b/ortools/sat/CMakeLists.txt @@ -47,6 +47,7 @@ if(BUILD_TESTING) FILE_NAME ${FILE_NAME} DEPS + benchmark::benchmark GTest::gmock GTest::gtest_main ) From c971dc4f70301f61934f95274c64dd7fc82b899c Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Mon, 23 Sep 2024 16:04:16 +0200 Subject: [PATCH 010/105] fix includes --- ortools/graph/ebert_graph.h | 21 ++++++++++----------- ortools/graph/linear_assignment.h | 20 +++++++------------- 2 files changed, 17 insertions(+), 24 deletions(-) diff --git a/ortools/graph/ebert_graph.h b/ortools/graph/ebert_graph.h index 71e6daed82..812b876718 100644 --- a/ortools/graph/ebert_graph.h +++ b/ortools/graph/ebert_graph.h @@ -176,7 +176,6 @@ #include #include "absl/strings/str_cat.h" -#include "gtest/gtest_prod.h" #include "ortools/base/logging.h" #include "ortools/util/permutation.h" #include "ortools/util/zvector.h" @@ -950,8 +949,6 @@ const ArcIndexType template class EbertGraphBase : public StarGraphBase { - FRIEND_TEST(ForwardEbertGraphTest, ImpossibleBuildTailArray); - typedef StarGraphBase Base; friend class StarGraphBase; @@ -1112,6 +1109,16 @@ class EbertGraphBase }; #endif // SWIG + // Using the SetHead() method implies that the BuildRepresentation() + // method must be called to restore consistency before the graph is + // used. + // + // Visible for testing. + void SetHead(const ArcIndexType arc, const NodeIndexType head) { + representation_clean_ = false; + head_.Set(arc, head); + } + protected: EbertGraphBase() : next_adjacent_arc_(), representation_clean_(true) {} @@ -1178,14 +1185,6 @@ class EbertGraphBase } bool RepresentationClean() const { return representation_clean_; } - - // Using the SetHead() method implies that the BuildRepresentation() - // method must be called to restore consistency before the graph is - // used. - void SetHead(const ArcIndexType arc, const NodeIndexType head) { - representation_clean_ = false; - head_.Set(arc, head); - } }; // Most users should only use StarGraph, which is EbertGraph, diff --git a/ortools/graph/linear_assignment.h b/ortools/graph/linear_assignment.h index 449c6c54c8..635fd275c9 100644 --- a/ortools/graph/linear_assignment.h +++ b/ortools/graph/linear_assignment.h @@ -207,7 +207,6 @@ #include "absl/flags/declare.h" #include "absl/flags/flag.h" #include "absl/strings/str_format.h" -#include "gtest/gtest_prod.h" #include "ortools/base/logging.h" #include "ortools/graph/ebert_graph.h" #include "ortools/util/permutation.h" @@ -228,13 +227,6 @@ class LinearSumAssignment { typedef typename GraphType::NodeIndex NodeIndex; typedef typename GraphType::ArcIndex ArcIndex; -#ifndef SWIG - // Friends don't let friends drive untested. One or more of our - // tests are white-box tests, i.e., they look inside the - // implementation and check various internal invariants. - FRIEND_TEST(LinearSumAssignmentFriendTest, EpsilonOptimal); -#endif - // Constructor for the case in which we will build the graph // incrementally as we discover arc costs, as might be done with any // of the dynamic graph representations such as StarGraph or ForwardStarGraph. @@ -386,6 +378,12 @@ class LinearSumAssignment { typename GraphType::NodeIndex node_iterator_; }; + // Returns true if and only if the current pseudoflow is + // epsilon-optimal. To be used in a DCHECK. + // + // Visible for testing. + bool EpsilonOptimal() const; + private: struct Stats { Stats() : pushes_(0), double_pushes_(0), relabelings_(0), refinements_(0) {} @@ -470,10 +468,6 @@ class LinearSumAssignment { // right-side nodes during DoublePush operations. typedef std::pair ImplicitPriceSummary; - // Returns true if and only if the current pseudoflow is - // epsilon-optimal. To be used in a DCHECK. - bool EpsilonOptimal() const; - // Checks that all nodes are matched. // To be used in a DCHECK. bool AllMatched() const; @@ -523,7 +517,7 @@ class LinearSumAssignment { // definition of admissibility, this action is different from // saturating all admissible arcs (which we never do). All negative // arcs are admissible, but not all admissible arcs are negative. It - // is alwsys enough to saturate only the negative ones. + // is always enough to saturate only the negative ones. void SaturateNegativeArcs(); // Performs an optimized sequence of pushing a unit of excess out of From 2c94629c75950c67b212cfaf1548dfe4dbc755af Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Mon, 23 Sep 2024 16:04:53 +0200 Subject: [PATCH 011/105] add API in ortools/util --- ortools/util/bitset.h | 4 ++++ ortools/util/integer_pq.h | 26 ++++++++++++++------------ 2 files changed, 18 insertions(+), 12 deletions(-) diff --git a/ortools/util/bitset.h b/ortools/util/bitset.h index 82299b66d9..7f2f3a4932 100644 --- a/ortools/util/bitset.h +++ b/ortools/util/bitset.h @@ -815,6 +815,10 @@ inline int Bitset64::Value(int64_t input) { DCHECK_GE(input, 0); return input; } +template <> +inline int Bitset64::Value(size_t input) { + return input; +} // A simple utility class to set/unset integer in a range [0, size). // This is optimized for sparsity. diff --git a/ortools/util/integer_pq.h b/ortools/util/integer_pq.h index 7ae182f372..1d50e4d7db 100644 --- a/ortools/util/integer_pq.h +++ b/ortools/util/integer_pq.h @@ -129,8 +129,8 @@ class IntegerPriorityQueue { private: // Puts the given element at heap index i. - void Set(int i, Element element) { - heap_[i] = element; + void Set(Element* heap, int i, Element element) { + heap[i] = element; position_[element.Index()] = i; } @@ -139,44 +139,46 @@ class IntegerPriorityQueue { // this position. void SetAndDecreasePriority(int i, const Element element) { const int size = size_; + Element* heap = heap_.data(); while (true) { const int left = i * 2; const int right = left + 1; if (right > size) { if (left > size) break; - const Element left_element = heap_[left]; + const Element left_element = heap[left]; if (!less_(element, left_element)) break; - Set(i, left_element); + Set(heap, i, left_element); i = left; break; } - const Element left_element = heap_[left]; - const Element right_element = heap_[right]; + const Element left_element = heap[left]; + const Element right_element = heap[right]; if (less_(left_element, right_element)) { if (!less_(element, right_element)) break; - Set(i, right_element); + Set(heap, i, right_element); i = right; } else { if (!less_(element, left_element)) break; - Set(i, left_element); + Set(heap, i, left_element); i = left; } } - Set(i, element); + Set(heap, i, element); } // Puts the given element at heap index i and update the heap knowing that the // element has a priority >= than the priority of the element currently at // this position. void SetAndIncreasePriority(int i, const Element element) { + Element* heap = heap_.data(); while (i > 1) { const int parent = i >> 1; - const Element parent_element = heap_[parent]; + const Element parent_element = heap[parent]; if (!less_(parent_element, element)) break; - Set(i, parent_element); + Set(heap, i, parent_element); i = parent; } - Set(i, element); + Set(heap, i, element); } int size_; From 5f120fd8ee02f617e499b2e019d8c3e3688e3c00 Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Tue, 24 Sep 2024 07:30:57 +0200 Subject: [PATCH 012/105] fix test --- ortools/sat/linear_constraint_test.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ortools/sat/linear_constraint_test.cc b/ortools/sat/linear_constraint_test.cc index 6c6dbc4ada..bc87f50ed3 100644 --- a/ortools/sat/linear_constraint_test.cc +++ b/ortools/sat/linear_constraint_test.cc @@ -44,7 +44,7 @@ TEST(ComputeActivityTest, BasicBehavior) { util_intops::StrongVector values = {0.5, 0.0, 1.4, 0.0, -2.1, 0.0}; - EXPECT_EQ(ComputeActivity(ct.Build(), values), 1 * 0.5 - 2 * 1.4 - 3 * 2.1); + EXPECT_NEAR(ComputeActivity(ct.Build(), values), 1 * 0.5 - 2 * 1.4 - 3 * 2.1, 1e-6); } TEST(ComputeActivityTest, EmptyConstraint) { From f18487c861827d4d6b334c5c365403e88d9a3eaf Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Wed, 25 Sep 2024 11:19:37 +0100 Subject: [PATCH 013/105] Update graph code --- ortools/graph/BUILD.bazel | 3 +- ortools/graph/max_flow.cc | 81 ++++++++++++++++++++++++++++++++++ ortools/graph/max_flow.h | 12 +++++ ortools/graph/max_flow_test.cc | 39 ++++++++++++---- 4 files changed, 125 insertions(+), 10 deletions(-) diff --git a/ortools/graph/BUILD.bazel b/ortools/graph/BUILD.bazel index c3ae7f7dcd..d00f8154c3 100644 --- a/ortools/graph/BUILD.bazel +++ b/ortools/graph/BUILD.bazel @@ -393,9 +393,9 @@ cc_library( ":graph", ":graphs", "//ortools/base", - "//ortools/base:types", "//ortools/util:stats", "//ortools/util:zvector", + "@com_google_absl//absl/log:check", "@com_google_absl//absl/memory", "@com_google_absl//absl/strings", "@com_google_absl//absl/strings:str_format", @@ -416,6 +416,7 @@ cc_test( "//ortools/base:path", "//ortools/linear_solver", "//ortools/util:file_util", + "@com_google_absl//absl/algorithm:container", "@com_google_absl//absl/random", "@com_google_absl//absl/strings:str_format", "@com_google_absl//absl/types:span", diff --git a/ortools/graph/max_flow.cc b/ortools/graph/max_flow.cc index c9022ddc42..cbe1a725b4 100644 --- a/ortools/graph/max_flow.cc +++ b/ortools/graph/max_flow.cc @@ -19,9 +19,11 @@ #include #include +#include "absl/log/check.h" #include "absl/memory/memory.h" #include "absl/strings/str_format.h" #include "absl/strings/string_view.h" +#include "ortools/graph/ebert_graph.h" #include "ortools/graph/graph.h" #include "ortools/graph/graphs.h" @@ -1025,4 +1027,83 @@ template class GenericMaxFlow<::util::ReverseArcListGraph<>>; template class GenericMaxFlow<::util::ReverseArcStaticGraph<>>; template class GenericMaxFlow<::util::ReverseArcMixedGraph<>>; +std::vector BipartiteMinimumVertexCover( + const std::vector>& left_to_right_arcs, int num_right) { + // This algorithm first uses the maximum flow to find a maximum matching. Then + // it uses the same method outlined in the proof of Konig's theorem to + // transform the maximum matching into a minimum vertex cover. + // + // More concretely, it uses a DFS starting with unmatched nodes and + // alternating matched/unmatched edges to find a minimum vertex cover. + SimpleMaxFlow max_flow; + const int num_left = left_to_right_arcs.size(); + std::vector arcs; + for (int i = 0; i < num_left; ++i) { + for (const int right_node : left_to_right_arcs[i]) { + DCHECK_GE(right_node, num_left); + DCHECK_LT(right_node, num_right + num_left); + arcs.push_back(max_flow.AddArcWithCapacity(i, right_node, 1)); + } + } + std::vector> adj_list = left_to_right_arcs; + adj_list.resize(num_left + num_right); + for (int i = 0; i < num_left; ++i) { + for (const int right_node : left_to_right_arcs[i]) { + adj_list[right_node].push_back(i); + } + } + const int sink = num_left + num_right; + const int source = num_left + num_right + 1; + for (int i = 0; i < num_left; ++i) { + max_flow.AddArcWithCapacity(source, i, 1); + } + for (int i = 0; i < num_right; ++i) { + max_flow.AddArcWithCapacity(i + num_left, sink, 1); + } + CHECK(max_flow.Solve(source, sink) == SimpleMaxFlow::OPTIMAL); + std::vector maximum_matching(num_left + num_right, -1); + for (const ArcIndex arc : arcs) { + if (max_flow.Flow(arc) > 0) { + maximum_matching[max_flow.Tail(arc)] = max_flow.Head(arc); + maximum_matching[max_flow.Head(arc)] = max_flow.Tail(arc); + } + } + // We do a DFS starting with unmatched nodes and alternating matched/unmatched + // edges. + std::vector in_alternating_path(num_left + num_right, false); + std::vector to_visit; + for (int i = 0; i < num_left; ++i) { + if (maximum_matching[i] == -1) { + to_visit.push_back(i); + } + } + while (!to_visit.empty()) { + const int current = to_visit.back(); + to_visit.pop_back(); + if (in_alternating_path[current]) { + continue; + } + in_alternating_path[current] = true; + for (const int j : adj_list[current]) { + if (current < num_left && maximum_matching[current] != j) { + to_visit.push_back(j); + } else if (current >= num_left && maximum_matching[current] == j) { + to_visit.push_back(j); + } + } + } + std::vector minimum_vertex_cover(num_left + num_right, false); + for (int i = 0; i < num_left; ++i) { + if (!in_alternating_path[i]) { + minimum_vertex_cover[i] = true; + } + } + for (int i = num_left; i < num_left + num_right; ++i) { + if (in_alternating_path[i]) { + minimum_vertex_cover[i] = true; + } + } + return minimum_vertex_cover; +} + } // namespace operations_research diff --git a/ortools/graph/max_flow.h b/ortools/graph/max_flow.h index a5d961e539..b9cfa38d69 100644 --- a/ortools/graph/max_flow.h +++ b/ortools/graph/max_flow.h @@ -679,6 +679,18 @@ extern template class GenericMaxFlow<::util::ReverseArcListGraph<>>; extern template class GenericMaxFlow<::util::ReverseArcStaticGraph<>>; extern template class GenericMaxFlow<::util::ReverseArcMixedGraph<>>; +// This method computes a minimum vertex cover for the bipartite graph. +// +// If we define num_left=left_to_right_arcs.size(), the "left" nodes are +// integers in [0, num_left), and the "right" nodes are integers in [num_left, +// num_left + num_right). +// +// Returns a vector of size num_left+num_right, such that element #l is true if +// it is part of the minimum vertex cover and false if it is part of the maximum +// independent set (one is the complement of the other). +std::vector BipartiteMinimumVertexCover( + const std::vector>& left_to_right_arcs, int num_right); + // Default instance MaxFlow that uses StarGraph. Note that we cannot just use a // typedef because of dependent code expecting MaxFlow to be a real class. // TODO(user): Modify this code and remove it. diff --git a/ortools/graph/max_flow_test.cc b/ortools/graph/max_flow_test.cc index 05784665bb..5b9e0fbcce 100644 --- a/ortools/graph/max_flow_test.cc +++ b/ortools/graph/max_flow_test.cc @@ -21,6 +21,7 @@ #include #include +#include "absl/algorithm/container.h" #include "absl/random/random.h" #include "absl/strings/str_format.h" #include "absl/types/span.h" @@ -264,7 +265,7 @@ class GenericMaxFlowTest : public ::testing::Test {}; typedef ::testing::Types, util::ReverseArcStaticGraph<>, - util::ReverseArcMixedGraph<> > + util::ReverseArcMixedGraph<>> GraphTypes; TYPED_TEST_SUITE(GenericMaxFlowTest, GraphTypes); @@ -559,7 +560,7 @@ void FullRandomAssignment(typename MaxFlowSolver::Solver f, GenerateCompleteGraph(num_tails, num_heads, &graph); Graphs::Build(&graph); std::vector arc_capacity(graph.num_arcs(), 1); - std::unique_ptr > max_flow(new GenericMaxFlow( + std::unique_ptr> max_flow(new GenericMaxFlow( &graph, graph.num_nodes() - 2, graph.num_nodes() - 1)); SetUpNetworkData(arc_capacity, max_flow.get()); FlowQuantity flow = f(max_flow.get()); @@ -578,7 +579,7 @@ void PartialRandomAssignment(typename MaxFlowSolver::Solver f, Graphs::Build(&graph); CHECK_EQ(graph.num_arcs(), num_tails * kDegree + num_tails + num_heads); std::vector arc_capacity(graph.num_arcs(), 1); - std::unique_ptr > max_flow(new GenericMaxFlow( + std::unique_ptr> max_flow(new GenericMaxFlow( &graph, graph.num_nodes() - 2, graph.num_nodes() - 1)); SetUpNetworkData(arc_capacity, max_flow.get()); FlowQuantity flow = f(max_flow.get()); @@ -613,7 +614,7 @@ void PartialRandomFlow(typename MaxFlowSolver::Solver f, Graphs::Build(&graph, &permutation); util::Permute(permutation, &arc_capacity); - std::unique_ptr > max_flow(new GenericMaxFlow( + std::unique_ptr> max_flow(new GenericMaxFlow( &graph, graph.num_nodes() - 2, graph.num_nodes() - 1)); SetUpNetworkData(arc_capacity, max_flow.get()); FlowQuantity flow = f(max_flow.get()); @@ -642,7 +643,7 @@ void FullRandomFlow(typename MaxFlowSolver::Solver f, Graphs::Build(&graph, &permutation); util::Permute(permutation, &arc_capacity); - std::unique_ptr > max_flow(new GenericMaxFlow( + std::unique_ptr> max_flow(new GenericMaxFlow( &graph, graph.num_nodes() - 2, graph.num_nodes() - 1)); SetUpNetworkData(arc_capacity, max_flow.get()); FlowQuantity flow = f(max_flow.get()); @@ -672,10 +673,10 @@ void FullRandomFlow(typename MaxFlowSolver::Solver f, expected_flow2); \ } -#define FLOW_ONLY_TEST_SG(test_name, size, expected_flow1, expected_flow2) \ - TEST(MaxFlowTestStaticGraph, test_name##size) { \ - test_name >(SolveMaxFlow, size, size, \ - expected_flow1, expected_flow2); \ +#define FLOW_ONLY_TEST_SG(test_name, size, expected_flow1, expected_flow2) \ + TEST(MaxFlowTestStaticGraph, test_name##size) { \ + test_name>(SolveMaxFlow, size, size, \ + expected_flow1, expected_flow2); \ } LP_AND_FLOW_TEST(FullRandomAssignment, 300, 300, 300); @@ -838,6 +839,26 @@ TEST(PriorityQueueWithRestrictedPushTest, RandomPushPop) { } } +TEST(BipartiteMinimumVertexCoverTest, BasicBehavior) { + const int num_right = 4; + const std::vector> left_to_right = { + {5}, {4, 5, 6}, {5}, {5, 6, 7}}; + EXPECT_EQ(absl::c_count(BipartiteMinimumVertexCover(left_to_right, num_right), + true), + 3); + EXPECT_EQ(absl::c_count(BipartiteMinimumVertexCover(left_to_right, num_right), + false), + 5); +} + +TEST(BipartiteMinimumVertexCoverTest, Empty) { + const int num_right = 4; + const std::vector> left_to_right = {{}, {}}; + EXPECT_EQ(absl::c_count(BipartiteMinimumVertexCover(left_to_right, num_right), + false), + 6); +} + TEST(PriorityQueueWithRestrictedPushDeathTest, DCHECK) { // Don't run this test in opt mode. if (!DEBUG_MODE) GTEST_SKIP(); From c6980713c5110b4b4e7de4cb3371104779d17fb4 Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Wed, 25 Sep 2024 11:19:45 +0100 Subject: [PATCH 014/105] reindent --- ortools/gurobi/environment.cc | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/ortools/gurobi/environment.cc b/ortools/gurobi/environment.cc index f2f73cbac2..1e606fc9b8 100644 --- a/ortools/gurobi/environment.cc +++ b/ortools/gurobi/environment.cc @@ -346,8 +346,8 @@ void LoadGurobiFunctions(DynamicLibrary* gurobi_dynamic_library) { std::vector GurobiDynamicLibraryPotentialPaths() { std::vector potential_paths; const std::vector kGurobiVersions = { - "1103", "1102", "1101", "1100", "1003", "1002", "1001", "1000", "952", "951", - "950", "911", "910", "903", "902", "811", "801", "752"}; + "1103", "1102", "1101", "1100", "1003", "1002", "1001", "1000", "952", + "951", "950", "911", "910", "903", "902", "811", "801", "752"}; potential_paths.reserve(kGurobiVersions.size() * 3); // Look for libraries pointed by GUROBI_HOME first. @@ -406,8 +406,8 @@ std::vector GurobiDynamicLibraryPotentialPaths() { #if defined(__GNUC__) // path in linux64 gurobi/optimizer docker image. for (const std::string& version : - {"11.0.3", "11.0.2", "11.0.1", "11.0.0", "10.0.3", "10.0.2", "10.0.1", "10.0.0", - "9.5.2", "9.5.1", "9.5.0"}) { + {"11.0.3", "11.0.2", "11.0.1", "11.0.0", "10.0.3", "10.0.2", "10.0.1", + "10.0.0", "9.5.2", "9.5.1", "9.5.0"}) { potential_paths.push_back( absl::StrCat("/opt/gurobi/linux64/lib/libgurobi.so.", version)); } From 4ba61d5558e74e9a1b96ae8b3c7f4259668f0c25 Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Wed, 25 Sep 2024 11:20:06 +0100 Subject: [PATCH 015/105] use std::optional --- ortools/linear_solver/proto_solver/gurobi_proto_solver.cc | 3 ++- ortools/linear_solver/proto_solver/highs_proto_solver.cc | 3 ++- ortools/linear_solver/proto_solver/scip_proto_solver.cc | 3 ++- 3 files changed, 6 insertions(+), 3 deletions(-) diff --git a/ortools/linear_solver/proto_solver/gurobi_proto_solver.cc b/ortools/linear_solver/proto_solver/gurobi_proto_solver.cc index 9f4cbf0734..a13e20cdd9 100644 --- a/ortools/linear_solver/proto_solver/gurobi_proto_solver.cc +++ b/ortools/linear_solver/proto_solver/gurobi_proto_solver.cc @@ -19,6 +19,7 @@ #include #include #include +#include #include #include @@ -274,7 +275,7 @@ absl::Status SetSolverSpecificParameters(absl::string_view parameters, absl::StatusOr GurobiSolveProto( LazyMutableCopy request, GRBenv* gurobi_env) { MPSolutionResponse response; - const absl::optional> optional_model = + const std::optional> optional_model = GetMPModelOrPopulateResponse(request, &response); if (!optional_model) return response; const MPModelProto& model = **optional_model; diff --git a/ortools/linear_solver/proto_solver/highs_proto_solver.cc b/ortools/linear_solver/proto_solver/highs_proto_solver.cc index d015b5df31..9e8d79c8c1 100644 --- a/ortools/linear_solver/proto_solver/highs_proto_solver.cc +++ b/ortools/linear_solver/proto_solver/highs_proto_solver.cc @@ -16,6 +16,7 @@ #include #include #include +#include #include #include @@ -45,7 +46,7 @@ absl::Status SetSolverSpecificParameters(const std::string& parameters, absl::StatusOr HighsSolveProto( LazyMutableCopy request) { MPSolutionResponse response; - const absl::optional> optional_model = + const std::optional> optional_model = GetMPModelOrPopulateResponse(request, &response); if (!optional_model) return response; const MPModelProto& model = **optional_model; diff --git a/ortools/linear_solver/proto_solver/scip_proto_solver.cc b/ortools/linear_solver/proto_solver/scip_proto_solver.cc index f07b419a58..5665adee00 100644 --- a/ortools/linear_solver/proto_solver/scip_proto_solver.cc +++ b/ortools/linear_solver/proto_solver/scip_proto_solver.cc @@ -21,6 +21,7 @@ #include #include #include +#include #include #include #include @@ -692,7 +693,7 @@ std::string FindErrorInMPModelForScip(const MPModelProto& model, SCIP* scip) { absl::StatusOr ScipSolveProto( LazyMutableCopy request) { MPSolutionResponse response; - const absl::optional> optional_model = + const std::optional> optional_model = GetMPModelOrPopulateResponse(request, &response); if (!optional_model) return response; const MPModelProto& model = **optional_model; From 9efab291d45fbe2df2e4507ad99d42a47d186f06 Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Wed, 25 Sep 2024 11:20:30 +0100 Subject: [PATCH 016/105] implement #4386 --- ortools/lp_data/mps_reader_template.h | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/ortools/lp_data/mps_reader_template.h b/ortools/lp_data/mps_reader_template.h index b8e8e61899..439487d608 100644 --- a/ortools/lp_data/mps_reader_template.h +++ b/ortools/lp_data/mps_reader_template.h @@ -501,7 +501,7 @@ class MPSReaderTemplate { // Parses a file in MPS format; if successful, returns the type of MPS // format detected (one of `kFree` or `kFixed`). If `form` is either `kFixed` // or `kFree`, the function will either return `kFixed` (or `kFree` - // respectivelly) if the input data satisfies the format, or an + // respectively) if the input data satisfies the format, or an // `absl::InvalidArgumentError` otherwise. absl::StatusOr ParseFile( absl::string_view file_name, DataWrapper* data, @@ -510,7 +510,7 @@ class MPSReaderTemplate { // Parses a string in MPS format; if successful, returns the type of MPS // format detected (one of `kFree` or `kFixed`). If `form` is either `kFixed` // or `kFree`, the function will either return `kFixed` (or `kFree` - // respectivelly) if the input data satisfies the format, or an + // respectively) if the input data satisfies the format, or an // `absl::InvalidArgumentError` otherwise. absl::StatusOr ParseString( absl::string_view source, DataWrapper* data, @@ -720,6 +720,7 @@ absl::Status MPSReaderTemplate::ProcessLine(absl::string_view line, } else { return line_info.InvalidArgumentError("Unknown section."); } + if (section_ == internal::MPSSectionId::kName) { // NOTE(user): The name may differ between fixed and free forms. In // fixed form, the name has at most 8 characters, and starts at a specific @@ -746,6 +747,21 @@ absl::Status MPSReaderTemplate::ProcessLine(absl::string_view line, data->SetName(fixed_name); } } + + // Supports the case where the direction is on the same line as the + // OBJSENSE keyword. + if (section_ == internal::MPSSectionId::kObjsense && + line_info.GetFieldsSize() == 2 && free_form_) { + if (absl::StrContains(line_info.GetField(1), "MIN")) { + data->SetObjectiveDirection(/*maximize=*/false); + } else if (absl::StrContains(line_info.GetField(1), "MAX")) { + data->SetObjectiveDirection(/*maximize=*/true); + } else { + return line_info.InvalidArgumentError( + "Invalid inline objective direction."); + } + } + return absl::OkStatus(); } switch (section_) { From c709e98327977f52014cfeb109a496a70d8abff3 Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Wed, 25 Sep 2024 11:21:36 +0100 Subject: [PATCH 017/105] [CP-SAT] improve 2d packing presolve; improve MIR cuts; rewrite some of the cuts managements --- ortools/sat/2d_rectangle_presolve.cc | 656 ++++++++++++++++++- ortools/sat/2d_rectangle_presolve.h | 26 +- ortools/sat/2d_rectangle_presolve_test.cc | 185 +++++- ortools/sat/BUILD.bazel | 1 + ortools/sat/cuts.cc | 268 ++++---- ortools/sat/cuts.h | 57 +- ortools/sat/cuts_test.cc | 24 +- ortools/sat/diffn_util.cc | 42 +- ortools/sat/diffn_util.h | 22 +- ortools/sat/diffn_util_test.cc | 32 +- ortools/sat/integer.cc | 2 +- ortools/sat/integer.h | 3 +- ortools/sat/linear_constraint_test.cc | 3 +- ortools/sat/linear_programming_constraint.cc | 28 +- ortools/sat/linear_programming_constraint.h | 2 +- ortools/sat/python/cp_model_test.py | 51 +- 16 files changed, 1124 insertions(+), 278 deletions(-) diff --git a/ortools/sat/2d_rectangle_presolve.cc b/ortools/sat/2d_rectangle_presolve.cc index 94ec802c99..8db1a1e2f8 100644 --- a/ortools/sat/2d_rectangle_presolve.cc +++ b/ortools/sat/2d_rectangle_presolve.cc @@ -31,6 +31,7 @@ #include "absl/types/span.h" #include "ortools/base/logging.h" #include "ortools/base/stl_util.h" +#include "ortools/graph/max_flow.h" #include "ortools/graph/strongly_connected_components.h" #include "ortools/sat/diffn_util.h" #include "ortools/sat/integer.h" @@ -149,7 +150,7 @@ bool PresolveFixed2dRectangles( if (!new_box.IsDisjoint(existing_box)) { is_disjoint = false; for (const Rectangle& disjoint_box : - new_box.SetDifference(existing_box)) { + new_box.RegionDifference(existing_box)) { to_add.push_back(disjoint_box); } break; @@ -209,7 +210,30 @@ bool PresolveFixed2dRectangles( optional_boxes.erase(optional_boxes.begin(), optional_boxes.begin() + num_optional_boxes_to_remove); - if (ReduceNumberofBoxes(fixed_boxes, &optional_boxes)) { + // TODO(user): instead of doing the greedy algorithm first with optional + // boxes, and then the one that is exact for mandatory boxes but weak for + // optional ones, refactor the second algorithm. One possible way of doing + // that would be to follow the shape boundary of optional+mandatory boxes and + // look whether we can shave off some turns. For example, if we have a shape + // like below, with the "+" representing area covered by optional boxes, we + // can replace the turns by a straight line. + // + // --> + // ^ ++++ + // . ++++ . + // . ++++ . => + // ++++ \/ + // --> ++++ --> --> + // *********** *********** + // *********** *********** + // + // Since less turns means less edges, this should be a good way to reduce the + // number of boxes. + if (ReduceNumberofBoxesGreedy(fixed_boxes, &optional_boxes)) { + changed = true; + } + const int num_after_first_pass = fixed_boxes->size(); + if (ReduceNumberOfBoxesExactMandatory(fixed_boxes, &optional_boxes)) { changed = true; } if (changed && VLOG_IS_ON(1)) { @@ -219,8 +243,8 @@ bool PresolveFixed2dRectangles( } VLOG_EVERY_N_SEC(1, 1) << "Presolved " << original_num_boxes << " fixed rectangles (area=" << original_area - << ") into " << fixed_boxes->size() - << " (area=" << area << ")"; + << ") into " << num_after_first_pass << " then " + << fixed_boxes->size() << " (area=" << area << ")"; VLOG_EVERY_N_SEC(2, 2) << "Presolved rectangles:\n" << RenderDot(bounding_box, fixed_boxes_copy) @@ -283,18 +307,10 @@ struct Edge { }; } // namespace -bool ReduceNumberofBoxes(std::vector* mandatory_rectangles, - std::vector* optional_rectangles) { +bool ReduceNumberofBoxesGreedy(std::vector* mandatory_rectangles, + std::vector* optional_rectangles) { // The current implementation just greedly merge rectangles that shares an - // edge. This is far from optimal, and it exists a polynomial optimal - // algorithm (see page 3 of [1]) for this problem at least for the case where - // optional_rectangles is empty. - // - // TODO(user): improve - // - // [1] Eppstein, David. "Graph-theoretic solutions to computational geometry - // problems." International Workshop on Graph-Theoretic Concepts in Computer - // Science. Berlin, Heidelberg: Springer Berlin Heidelberg, 2009. + // edge. std::vector> rectangle_storage; enum class OptionalEnum { OPTIONAL, MANDATORY }; // bool for is_optional @@ -811,5 +827,615 @@ std::vector BoxesToShapes(absl::Span rectangles, return result; } +namespace { +struct PolygonCut { + std::pair start; + std::pair end; + int start_index; + int end_index; + + struct CmpByStartY { + bool operator()(const PolygonCut& a, const PolygonCut& b) const { + return std::tie(a.start.second, a.start.first) < + std::tie(b.start.second, b.start.first); + } + }; + + struct CmpByEndY { + bool operator()(const PolygonCut& a, const PolygonCut& b) const { + return std::tie(a.end.second, a.end.first) < + std::tie(b.end.second, b.end.first); + } + }; + + struct CmpByStartX { + bool operator()(const PolygonCut& a, const PolygonCut& b) const { + return a.start < b.start; + } + }; + + struct CmpByEndX { + bool operator()(const PolygonCut& a, const PolygonCut& b) const { + return a.end < b.end; + } + }; + + template + friend void AbslStringify(Sink& sink, const PolygonCut& diagonal) { + absl::Format(&sink, "(%v,%v)-(%v,%v)", diagonal.start.first, + diagonal.start.second, diagonal.end.first, + diagonal.end.second); + } +}; + +// A different representation of a shape. The two vectors must have the same +// size. The first one contains the points of the shape and the second one +// contains the index of the next point in the shape. +// +// Note that we code in this file is only correct for shapes with points +// connected only by horizontal or vertical lines. +struct FlatShape { + std::vector> points; + std::vector next; +}; + +EdgePosition GetSegmentDirection( + const std::pair& curr_segment, + const std::pair& next_segment) { + if (curr_segment.first == next_segment.first) { + return next_segment.second > curr_segment.second ? EdgePosition::TOP + : EdgePosition::BOTTOM; + } else { + return next_segment.first > curr_segment.first ? EdgePosition::RIGHT + : EdgePosition::LEFT; + } +} + +// Given a polygon, this function returns all line segments that start on a +// concave vertex and follow horizontally or vertically until it reaches the +// border of the polygon. This function returns all such segments grouped on the +// direction the line takes after starting in the concave vertex. Some of those +// segments start and end on a convex vertex, so they will appear twice in the +// output. This function modifies the shape by splitting some of the path +// segments in two. This is needed to make sure that `PolygonCut.start_index` +// and `PolygonCut.end_index` always corresponds to points in the FlatShape, +// even if they are not edges. +std::array, 4> GetPotentialPolygonCuts( + FlatShape& shape) { + std::array, 4> cuts; + + // First, for each concave vertex we create a cut that starts at it and + // crosses the polygon until infinite (in practice, int_max/int_min). + for (int i = 0; i < shape.points.size(); i++) { + const auto& it = &shape.points[shape.next[i]]; + const auto& previous = &shape.points[i]; + const auto& next_segment = &shape.points[shape.next[shape.next[i]]]; + const EdgePosition previous_dir = GetSegmentDirection(*previous, *it); + const EdgePosition next_dir = GetSegmentDirection(*it, *next_segment); + + if ((previous_dir == EdgePosition::TOP && next_dir == EdgePosition::LEFT) || + (previous_dir == EdgePosition::RIGHT && + next_dir == EdgePosition::TOP)) { + cuts[EdgePosition::RIGHT].push_back( + {.start = *it, + .end = {std::numeric_limits::max(), it->second}, + .start_index = shape.next[i]}); + } + if ((previous_dir == EdgePosition::BOTTOM && + next_dir == EdgePosition::RIGHT) || + (previous_dir == EdgePosition::LEFT && + next_dir == EdgePosition::BOTTOM)) { + cuts[EdgePosition::LEFT].push_back( + {.start = {std::numeric_limits::min(), it->second}, + .end = *it, + .end_index = shape.next[i]}); + } + if ((previous_dir == EdgePosition::RIGHT && + next_dir == EdgePosition::TOP) || + (previous_dir == EdgePosition::BOTTOM && + next_dir == EdgePosition::RIGHT)) { + cuts[EdgePosition::BOTTOM].push_back( + {.start = {it->first, std::numeric_limits::min()}, + .end = *it, + .end_index = shape.next[i]}); + } + if ((previous_dir == EdgePosition::TOP && next_dir == EdgePosition::LEFT) || + (previous_dir == EdgePosition::LEFT && + next_dir == EdgePosition::BOTTOM)) { + cuts[EdgePosition::TOP].push_back( + {.start = *it, + .end = {it->first, std::numeric_limits::max()}, + .start_index = shape.next[i]}); + } + } + + // Now that we have one of the points of the segment (the one starting on a + // vertex), we need to find the other point. This is basically finding the + // first path segment that crosses each cut connecting edge->infinity we + // collected above. We do a rather naive implementation of that below and its + // complexity is O(N^2) even if it should be fast in most cases. If it + // turns out to be costly on profiling we can use a more sophisticated + // algorithm for finding the first intersection. + + // We need to sort the cuts so we can use binary search to quickly find cuts + // that cross a segment. + std::sort(cuts[EdgePosition::RIGHT].begin(), cuts[EdgePosition::RIGHT].end(), + PolygonCut::CmpByStartY()); + std::sort(cuts[EdgePosition::LEFT].begin(), cuts[EdgePosition::LEFT].end(), + PolygonCut::CmpByEndY()); + std::sort(cuts[EdgePosition::BOTTOM].begin(), + cuts[EdgePosition::BOTTOM].end(), PolygonCut::CmpByEndX()); + std::sort(cuts[EdgePosition::TOP].begin(), cuts[EdgePosition::TOP].end(), + PolygonCut::CmpByStartX()); + + // This function cuts a segment in two if it crosses a cut. In any case, it + // returns the index of a point `point_idx` so that `shape.points[point_idx] + // == point_to_cut`. + const auto cut_segment_if_necessary = + [&shape](int segment_idx, + std::pair point_to_cut) { + const auto& cur = shape.points[segment_idx]; + const auto& next = shape.points[shape.next[segment_idx]]; + if (cur.second == next.second) { + DCHECK_EQ(point_to_cut.second, cur.second); + // We have a horizontal segment + const IntegerValue edge_start = std::min(cur.first, next.first); + const IntegerValue edge_end = std::max(cur.first, next.first); + + if (edge_start < point_to_cut.first && + point_to_cut.first < edge_end) { + shape.points.push_back(point_to_cut); + const int next_idx = shape.next[segment_idx]; + shape.next[segment_idx] = shape.points.size() - 1; + shape.next.push_back(next_idx); + return static_cast(shape.points.size() - 1); + } + return (shape.points[segment_idx] == point_to_cut) + ? segment_idx + : shape.next[segment_idx]; + } else { + DCHECK_EQ(cur.first, next.first); + DCHECK_EQ(point_to_cut.first, cur.first); + // We have a vertical segment + const IntegerValue edge_start = std::min(cur.second, next.second); + const IntegerValue edge_end = std::max(cur.second, next.second); + + if (edge_start < point_to_cut.second && + point_to_cut.second < edge_end) { + shape.points.push_back(point_to_cut); + const int next_idx = shape.next[segment_idx]; + shape.next[segment_idx] = shape.points.size() - 1; + shape.next.push_back(next_idx); + return static_cast(shape.points.size() - 1); + } + return (shape.points[segment_idx] == point_to_cut) + ? segment_idx + : shape.next[segment_idx]; + } + }; + + for (int i = 0; i < shape.points.size(); i++) { + const auto* cur_point_ptr = &shape.points[shape.next[i]]; + const auto* previous = &shape.points[i]; + DCHECK(cur_point_ptr->first == previous->first || + cur_point_ptr->second == previous->second) + << "found a segment that is neither horizontal nor vertical"; + const EdgePosition direction = + GetSegmentDirection(*previous, *cur_point_ptr); + + if (direction == EdgePosition::BOTTOM) { + const auto cut_start = absl::c_lower_bound( + cuts[EdgePosition::RIGHT], + PolygonCut{.start = {std::numeric_limits::min(), + cur_point_ptr->second}}, + PolygonCut::CmpByStartY()); + auto cut_end = absl::c_upper_bound( + cuts[EdgePosition::RIGHT], + PolygonCut{.start = {std::numeric_limits::max(), + previous->second}}, + PolygonCut::CmpByStartY()); + + for (auto cut_it = cut_start; cut_it < cut_end; ++cut_it) { + PolygonCut& diagonal = *cut_it; + const IntegerValue diagonal_start_x = diagonal.start.first; + const IntegerValue diagonal_cur_end_x = diagonal.end.first; + // Our binary search guarantees those two conditions. + DCHECK_LE(cur_point_ptr->second, diagonal.start.second); + DCHECK_LE(diagonal.start.second, previous->second); + + // Let's test if the diagonal crosses the current boundary segment + if (diagonal_start_x <= previous->first && + diagonal_cur_end_x > cur_point_ptr->first) { + DCHECK_LT(diagonal_start_x, cur_point_ptr->first); + DCHECK_LE(previous->first, diagonal_cur_end_x); + + diagonal.end.first = cur_point_ptr->first; + + diagonal.end_index = cut_segment_if_necessary(i, diagonal.end); + DCHECK(shape.points[diagonal.end_index] == diagonal.end); + + // Subtle: cut_segment_if_necessary might add new points to the vector + // of the shape, so the pointers computed from it might become + // invalid. Moreover, the current segment now is shorter, so we need + // to update our upper bound. + cur_point_ptr = &shape.points[shape.next[i]]; + previous = &shape.points[i]; + cut_end = absl::c_upper_bound( + cuts[EdgePosition::RIGHT], + PolygonCut{.start = {std::numeric_limits::max(), + previous->second}}, + PolygonCut::CmpByStartY()); + } + } + } + + if (direction == EdgePosition::TOP) { + const auto cut_start = absl::c_lower_bound( + cuts[EdgePosition::LEFT], + PolygonCut{.end = {std::numeric_limits::min(), + previous->second}}, + PolygonCut::CmpByEndY()); + auto cut_end = absl::c_upper_bound( + cuts[EdgePosition::LEFT], + PolygonCut{.end = {std::numeric_limits::max(), + cur_point_ptr->second}}, + PolygonCut::CmpByEndY()); + for (auto cut_it = cut_start; cut_it < cut_end; ++cut_it) { + PolygonCut& diagonal = *cut_it; + const IntegerValue diagonal_start_x = diagonal.start.first; + const IntegerValue diagonal_cur_end_x = diagonal.end.first; + // Our binary search guarantees those two conditions. + DCHECK_LE(diagonal.end.second, cur_point_ptr->second); + DCHECK_LE(previous->second, diagonal.end.second); + + // Let's test if the diagonal crosses the current boundary segment + if (diagonal_start_x < cur_point_ptr->first && + previous->first <= diagonal_cur_end_x) { + DCHECK_LT(cur_point_ptr->first, diagonal_cur_end_x); + DCHECK_LE(diagonal_start_x, previous->first); + + diagonal.start.first = cur_point_ptr->first; + diagonal.start_index = cut_segment_if_necessary(i, diagonal.start); + DCHECK(shape.points[diagonal.start_index] == diagonal.start); + cur_point_ptr = &shape.points[shape.next[i]]; + previous = &shape.points[i]; + cut_end = absl::c_upper_bound( + cuts[EdgePosition::LEFT], + PolygonCut{.end = {std::numeric_limits::max(), + cur_point_ptr->second}}, + PolygonCut::CmpByEndY()); + } + } + } + + if (direction == EdgePosition::LEFT) { + const auto cut_start = absl::c_lower_bound( + cuts[EdgePosition::BOTTOM], + PolygonCut{.end = {cur_point_ptr->first, + std::numeric_limits::min()}}, + PolygonCut::CmpByEndX()); + auto cut_end = absl::c_upper_bound( + cuts[EdgePosition::BOTTOM], + PolygonCut{.end = {previous->first, + std::numeric_limits::max()}}, + PolygonCut::CmpByEndX()); + for (auto cut_it = cut_start; cut_it < cut_end; ++cut_it) { + PolygonCut& diagonal = *cut_it; + const IntegerValue diagonal_start_y = diagonal.start.second; + const IntegerValue diagonal_cur_end_y = diagonal.end.second; + + // Our binary search guarantees those two conditions. + DCHECK_LE(cur_point_ptr->first, diagonal.end.first); + DCHECK_LE(diagonal.end.first, previous->first); + + // Let's test if the diagonal crosses the current boundary segment + if (diagonal_start_y < cur_point_ptr->second && + cur_point_ptr->second <= diagonal_cur_end_y) { + DCHECK_LE(diagonal_start_y, previous->second); + DCHECK_LT(cur_point_ptr->second, diagonal_cur_end_y); + + diagonal.start.second = cur_point_ptr->second; + diagonal.start_index = cut_segment_if_necessary(i, diagonal.start); + DCHECK(shape.points[diagonal.start_index] == diagonal.start); + cur_point_ptr = &shape.points[shape.next[i]]; + previous = &shape.points[i]; + cut_end = absl::c_upper_bound( + cuts[EdgePosition::BOTTOM], + PolygonCut{.end = {previous->first, + std::numeric_limits::max()}}, + PolygonCut::CmpByEndX()); + } + } + } + + if (direction == EdgePosition::RIGHT) { + const auto cut_start = absl::c_lower_bound( + cuts[EdgePosition::TOP], + PolygonCut{.start = {previous->first, + std::numeric_limits::min()}}, + PolygonCut::CmpByStartX()); + auto cut_end = absl::c_upper_bound( + cuts[EdgePosition::TOP], + PolygonCut{.start = {cur_point_ptr->first, + std::numeric_limits::max()}}, + PolygonCut::CmpByStartX()); + for (auto cut_it = cut_start; cut_it < cut_end; ++cut_it) { + PolygonCut& diagonal = *cut_it; + const IntegerValue diagonal_start_y = diagonal.start.second; + const IntegerValue diagonal_cur_end_y = diagonal.end.second; + + // Our binary search guarantees those two conditions. + DCHECK_LE(previous->first, diagonal.start.first); + DCHECK_LE(diagonal.start.first, cur_point_ptr->first); + + // Let's test if the diagonal crosses the current boundary segment + if (diagonal_start_y <= cur_point_ptr->second && + cur_point_ptr->second < diagonal_cur_end_y) { + DCHECK_LT(diagonal_start_y, previous->second); + DCHECK_LE(cur_point_ptr->second, diagonal_cur_end_y); + + diagonal.end.second = cur_point_ptr->second; + diagonal.end_index = cut_segment_if_necessary(i, diagonal.end); + DCHECK(shape.points[diagonal.end_index] == diagonal.end); + cur_point_ptr = &shape.points[shape.next[i]]; + cut_end = absl::c_upper_bound( + cuts[EdgePosition::TOP], + PolygonCut{.start = {cur_point_ptr->first, + std::numeric_limits::max()}}, + PolygonCut::CmpByStartX()); + previous = &shape.points[i]; + } + } + } + } + return cuts; +} + +void CutShapeWithPolygonCuts(FlatShape& shape, + absl::Span cuts) { + std::vector previous(shape.points.size(), -1); + for (int i = 0; i < shape.points.size(); i++) { + previous[shape.next[i]] = i; + } + + std::vector> cut_previous_index(cuts.size(), {-1, -1}); + for (int i = 0; i < cuts.size(); i++) { + DCHECK(cuts[i].start == shape.points[cuts[i].start_index]); + DCHECK(cuts[i].end == shape.points[cuts[i].end_index]); + + cut_previous_index[i].first = previous[cuts[i].start_index]; + cut_previous_index[i].second = previous[cuts[i].end_index]; + } + + for (const auto& [i, j] : cut_previous_index) { + const int prev_start_next = shape.next[i]; + const int prev_end_next = shape.next[j]; + const std::pair start = + shape.points[prev_start_next]; + const std::pair end = + shape.points[prev_end_next]; + + shape.points.push_back(start); + shape.next[i] = shape.points.size() - 1; + shape.next.push_back(prev_end_next); + + shape.points.push_back(end); + shape.next[j] = shape.points.size() - 1; + shape.next.push_back(prev_start_next); + } +} +} // namespace + +// This function applies the method described in page 3 of [1]. +// +// [1] Eppstein, David. "Graph-theoretic solutions to computational geometry +// problems." International Workshop on Graph-Theoretic Concepts in Computer +// Science. Berlin, Heidelberg: Springer Berlin Heidelberg, 2009. +std::vector CutShapeIntoRectangles(SingleShape shape) { + auto is_aligned = [](const std::pair& p1, + const std::pair& p2, + const std::pair& p3) { + return ((p1.first == p2.first) == (p2.first == p3.first)) && + ((p1.second == p2.second) == (p2.second == p3.second)); + }; + const auto add_segment = + [&is_aligned](const std::pair& segment, + const int start_index, + std::vector>& points, + std::vector& next) { + if (points.size() > 1 + start_index && + is_aligned(points[points.size() - 1], points[points.size() - 2], + segment)) { + points.back() = segment; + } else { + points.push_back(segment); + next.push_back(points.size()); + } + }; + + // To cut our polygon into rectangles, we first put it into a data structure + // that is easier to manipulate. + FlatShape flat_shape; + for (int i = 0; 1 + i < shape.boundary.step_points.size(); ++i) { + const std::pair& segment = + shape.boundary.step_points[i]; + add_segment(segment, 0, flat_shape.points, flat_shape.next); + } + flat_shape.next.back() = 0; + for (const ShapePath& hole : shape.holes) { + const int start = flat_shape.next.size(); + if (hole.step_points.size() < 2) continue; + for (int i = 0; i + 1 < hole.step_points.size(); ++i) { + const std::pair& segment = + hole.step_points[i]; + add_segment(segment, start, flat_shape.points, flat_shape.next); + } + flat_shape.next.back() = start; + } + + std::array, 4> all_cuts = + GetPotentialPolygonCuts(flat_shape); + + // Some cuts connect two concave edges and will be duplicated in all_cuts. + // Those are important: since they "fix" two concavities with a single cut, + // they are called "good diagonals" in the literature. Note that in + // computational geometry jargon, a diagonal of a polygon is a line segment + // that connects two non-adjacent vertices of a polygon, even in cases like + // ours that we are only talking of diagonals that are not "diagonal" in the + // usual meaning of the word: ie., horizontal or vertical segments connecting + // two vertices of the polygon). + std::array, 2> good_diagonals; + for (const auto& d : all_cuts[EdgePosition::BOTTOM]) { + if (absl::c_binary_search(all_cuts[EdgePosition::TOP], d, + PolygonCut::CmpByStartX())) { + good_diagonals[0].push_back(d); + } + } + for (const auto& d : all_cuts[EdgePosition::LEFT]) { + if (absl::c_binary_search(all_cuts[EdgePosition::RIGHT], d, + PolygonCut::CmpByStartY())) { + good_diagonals[1].push_back(d); + } + } + + // The "good diagonals" are only more optimal that any cut if they are not + // crossed by other cuts. To maximize their usefulness, we build a graph where + // the good diagonals are the vertices and we add an edge every time a + // vertical and horizontal diagonal cross. The minimum vertex cover of this + // graph is the minimal set of good diagonals that are not crossed by other + // cuts. + std::vector> arcs(good_diagonals[0].size()); + for (int i = 0; i < good_diagonals[0].size(); ++i) { + for (int j = 0; j < good_diagonals[1].size(); ++j) { + const PolygonCut& vertical = good_diagonals[0][i]; + const PolygonCut& horizontal = good_diagonals[1][j]; + const IntegerValue vertical_x = vertical.start.first; + const IntegerValue horizontal_y = horizontal.start.second; + if (horizontal.start.first <= vertical_x && + vertical_x <= horizontal.end.first && + vertical.start.second <= horizontal_y && + horizontal_y <= vertical.end.second) { + arcs[i].push_back(good_diagonals[0].size() + j); + } + } + } + + const std::vector minimum_cover = + BipartiteMinimumVertexCover(arcs, good_diagonals[1].size()); + + std::vector minimum_cover_horizontal_diagonals; + for (int i = good_diagonals[0].size(); + i < good_diagonals[0].size() + good_diagonals[1].size(); ++i) { + if (minimum_cover[i]) continue; + minimum_cover_horizontal_diagonals.push_back( + good_diagonals[1][i - good_diagonals[0].size()]); + } + + // Since our data structure only allow to cut the shape according to a list + // of vertical or horizontal cuts, but not a list mixing both, we cut first + // on the chosen horizontal good diagonals. + CutShapeWithPolygonCuts(flat_shape, minimum_cover_horizontal_diagonals); + + // We need to recompute the cuts after we applied the good diagonals, since + // the geometry has changed. + all_cuts = GetPotentialPolygonCuts(flat_shape); + + // Now that we did all horizontal good diagonals, we need to cut on all + // vertical good diagonals and then cut arbitrarily to remove all concave + // edges. To make things simple, just apply all vertical cuts, since they + // include all the vertical good diagonals and also fully slice the shape into + // rectangles. + + // Remove duplicates coming from good diagonals first. + std::vector cuts = all_cuts[EdgePosition::TOP]; + for (const auto& cut : all_cuts[EdgePosition::BOTTOM]) { + if (!absl::c_binary_search(all_cuts[EdgePosition::TOP], cut, + PolygonCut::CmpByStartX())) { + cuts.push_back(cut); + } + } + + CutShapeWithPolygonCuts(flat_shape, cuts); + + // Now every connected component of the shape is a rectangle. Build the final + // result. + std::vector result; + std::vector seen(flat_shape.points.size(), false); + for (int i = 0; i < flat_shape.points.size(); ++i) { + if (seen[i]) continue; + Rectangle& rectangle = result.emplace_back(Rectangle{ + .x_min = std::numeric_limits::max(), + .x_max = std::numeric_limits::min(), + .y_min = std::numeric_limits::max(), + .y_max = std::numeric_limits::min(), + }); + int cur = i; + do { + seen[cur] = true; + rectangle.GrowToInclude({.x_min = flat_shape.points[cur].first, + .x_max = flat_shape.points[cur].first, + .y_min = flat_shape.points[cur].second, + .y_max = flat_shape.points[cur].second}); + cur = flat_shape.next[cur]; + DCHECK_LT(cur, flat_shape.next.size()); + } while (cur != i); + } + + return result; +} + +bool ReduceNumberOfBoxesExactMandatory( + std::vector* mandatory_rectangles, + std::vector* optional_rectangles) { + if (mandatory_rectangles->empty()) return false; + std::vector result = *mandatory_rectangles; + std::vector new_optional_rectangles = *optional_rectangles; + + Rectangle mandatory_bounding_box = (*mandatory_rectangles)[0]; + for (const Rectangle& box : *mandatory_rectangles) { + mandatory_bounding_box.GrowToInclude(box); + } + const std::vector mandatory_empty_holes = + FindEmptySpaces(mandatory_bounding_box, *mandatory_rectangles); + const std::vector> mandatory_holes_components = + SplitInConnectedComponents(BuildNeighboursGraph(mandatory_empty_holes)); + + // Now for every connected component of the holes in the mandatory area, see + // if we can fill them with optional boxes. + std::vector holes_in_component; + for (const std::vector& component : mandatory_holes_components) { + holes_in_component.clear(); + holes_in_component.reserve(component.size()); + for (const int index : component) { + holes_in_component.push_back(mandatory_empty_holes[index]); + } + if (RegionIncludesOther(new_optional_rectangles, holes_in_component)) { + // Fill the hole. + result.insert(result.end(), holes_in_component.begin(), + holes_in_component.end()); + // We can modify `optional_rectangles` here since we know that if we + // remove a hole this function will return true. + new_optional_rectangles = PavedRegionDifference( + new_optional_rectangles, std::move(holes_in_component)); + } + } + const Neighbours neighbours = BuildNeighboursGraph(result); + std::vector shapes = BoxesToShapes(result, neighbours); + + result.clear(); + for (SingleShape& shape : shapes) { + // This is the function that applies the algorithm described in [1]. + const std::vector cut_rectangles = + CutShapeIntoRectangles(std::move(shape)); + result.insert(result.end(), cut_rectangles.begin(), cut_rectangles.end()); + } + // It is possible that the algorithm actually increases the number of boxes. + // See the "Problematic2" test. + if (result.size() >= mandatory_rectangles->size()) return false; + mandatory_rectangles->swap(result); + optional_rectangles->swap(new_optional_rectangles); + return true; +} + } // namespace sat } // namespace operations_research diff --git a/ortools/sat/2d_rectangle_presolve.h b/ortools/sat/2d_rectangle_presolve.h index 362f458ecf..a7f1c6f07e 100644 --- a/ortools/sat/2d_rectangle_presolve.h +++ b/ortools/sat/2d_rectangle_presolve.h @@ -38,17 +38,29 @@ bool PresolveFixed2dRectangles( absl::Span non_fixed_boxes, std::vector* fixed_boxes); -// Given a set of non-overlapping rectangles split in two groups, mandatory and -// optional, try to build a set of as few non-overlapping rectangles as -// possible defining a region R that satisfy: +// Given two vectors of non-overlapping rectangles defining two regions of the +// space: one mandatory region that must be occupied and one optional region +// that can be occupied, try to build a vector of as few non-overlapping +// rectangles as possible defining a region R that satisfy: // - R \subset (mandatory \union optional); // - mandatory \subset R. // -// The function updates the set of `mandatory_rectangles` with `R` and +// The function updates the vector of `mandatory_rectangles` with `R` and // `optional_rectangles` with `optional_rectangles \setdiff R`. It returns // true if the `mandatory_rectangles` was updated. -bool ReduceNumberofBoxes(std::vector* mandatory_rectangles, - std::vector* optional_rectangles); +// +// This function uses a greedy algorithm that merge rectangles that share an +// edge. +bool ReduceNumberofBoxesGreedy(std::vector* mandatory_rectangles, + std::vector* optional_rectangles); + +// Same as above, but this implementation returns the optimal solution in +// minimizing the number of boxes if `optional_rectangles` is empty. On the +// other hand, its handling of optional boxes is rather limited. It simply fills +// the holes in the mandatory boxes with optional boxes, if possible. +bool ReduceNumberOfBoxesExactMandatory( + std::vector* mandatory_rectangles, + std::vector* optional_rectangles); enum EdgePosition { TOP = 0, RIGHT = 1, BOTTOM = 2, LEFT = 3 }; @@ -172,6 +184,8 @@ struct SingleShape { std::vector BoxesToShapes(absl::Span rectangles, const Neighbours& neighbours); +std::vector CutShapeIntoRectangles(SingleShape shapes); + } // namespace sat } // namespace operations_research diff --git a/ortools/sat/2d_rectangle_presolve_test.cc b/ortools/sat/2d_rectangle_presolve_test.cc index 0c47b13152..899dc9cd37 100644 --- a/ortools/sat/2d_rectangle_presolve_test.cc +++ b/ortools/sat/2d_rectangle_presolve_test.cc @@ -51,13 +51,15 @@ std::vector BuildFromAsciiArt(std::string_view input) { for (int i = 0; i < lines.size(); i++) { for (int j = 0; j < lines[i].size(); j++) { if (lines[i][j] != ' ') { - rectangles.push_back( - {.x_min = j, .x_max = j + 1, .y_min = i, .y_max = i + 1}); + rectangles.push_back({.x_min = j, + .x_max = j + 1, + .y_min = 2 * lines.size() - 2 * i, + .y_max = 2 * lines.size() - 2 * i + 2}); } } } std::vector empty; - ReduceNumberofBoxes(&rectangles, &empty); + ReduceNumberofBoxesGreedy(&rectangles, &empty); return rectangles; } @@ -156,8 +158,7 @@ TEST(RectanglePresolve, RemoveOutsideBB) { } TEST(RectanglePresolve, RandomTest) { - constexpr int kTotalRectangles = 100; - constexpr int kFixedRectangleSize = 60; + constexpr int kFixedRectangleSize = 10; constexpr int kNumRuns = 1000; absl::BitGen bit_gen; @@ -165,9 +166,8 @@ TEST(RectanglePresolve, RandomTest) { // Start by generating a feasible problem that we know the solution with // some items fixed. std::vector input = - GenerateNonConflictingRectangles(kTotalRectangles, bit_gen); + GenerateNonConflictingRectanglesWithPacking({100, 100}, 40, bit_gen); std::shuffle(input.begin(), input.end(), bit_gen); - CHECK_EQ(input.size(), kTotalRectangles); absl::Span fixed_rectangles = absl::MakeConstSpan(input).subspan(0, kFixedRectangleSize); absl::Span other_rectangles = @@ -185,7 +185,12 @@ TEST(RectanglePresolve, RandomTest) { << RenderDot(std::nullopt, new_fixed_rectangles); } - CHECK_LE(new_fixed_rectangles.size(), kFixedRectangleSize); + if (new_fixed_rectangles.size() > fixed_rectangles.size()) { + LOG(FATAL) << "Presolved:\n" + << RenderDot(std::nullopt, fixed_rectangles) << "To:\n" + << RenderDot(std::nullopt, new_fixed_rectangles); + } + CHECK_LE(new_fixed_rectangles.size(), fixed_rectangles.size()); // Check if the original solution is still a solution. std::vector all_rectangles(new_fixed_rectangles.begin(), @@ -742,16 +747,14 @@ TEST(ContourTest, Random) { GenerateNonConflictingRectanglesWithPacking({100, 100}, 60, bit_gen); std::shuffle(input.begin(), input.end(), bit_gen); const int num_fixed_rectangles = input.size() * 2 / 3; - absl::Span fixed_rectangles = + const absl::Span fixed_rectangles = absl::MakeConstSpan(input).subspan(0, num_fixed_rectangles); - absl::Span other_rectangles = + const absl::Span other_rectangles = absl::MakeSpan(input).subspan(num_fixed_rectangles); - std::vector new_fixed_rectangles(fixed_rectangles.begin(), - fixed_rectangles.end()); const std::vector input_in_range = MakeItemsFromRectangles(other_rectangles, 0.6, bit_gen); - auto neighbours = BuildNeighboursGraph(fixed_rectangles); + const Neighbours neighbours = BuildNeighboursGraph(fixed_rectangles); const auto components = SplitInConnectedComponents(neighbours); const Rectangle bb = {.x_min = 0, .x_max = 100, .y_min = 0, .y_max = 100}; int min_index = -1; @@ -766,25 +769,26 @@ TEST(ContourTest, Random) { } } - auto s = BoxesToShapes(fixed_rectangles, neighbours); - for (int i = 0; i < s.size(); ++i) { - const ShapePath& shape = s[i].boundary; + const std::vector shapes = + BoxesToShapes(fixed_rectangles, neighbours); + for (const SingleShape& shape : shapes) { + const ShapePath& boundary = shape.boundary; const ShapePath expected_shape = - TraceBoundary(shape.step_points[0], shape.touching_box_index[0], + TraceBoundary(boundary.step_points[0], boundary.touching_box_index[0], fixed_rectangles, neighbours); - if (shape.step_points != expected_shape.step_points) { + if (boundary.step_points != expected_shape.step_points) { LOG(ERROR) << "Fast algo:\n" - << RenderContour(bb, fixed_rectangles, shape); + << RenderContour(bb, fixed_rectangles, boundary); LOG(ERROR) << "Naive algo:\n" << RenderContour(bb, fixed_rectangles, expected_shape); LOG(FATAL) << "Found different solutions between naive and fast algo!"; } - EXPECT_EQ(shape.step_points, expected_shape.step_points); - EXPECT_EQ(shape.touching_box_index, expected_shape.touching_box_index); + EXPECT_EQ(boundary.step_points, expected_shape.step_points); + EXPECT_EQ(boundary.touching_box_index, expected_shape.touching_box_index); } if (run == 0) { - LOG(INFO) << RenderShapes(bb, fixed_rectangles, s); + LOG(INFO) << RenderShapes(bb, fixed_rectangles, shapes); } } } @@ -839,6 +843,143 @@ TEST(ContourTest, SimpleShapes) { std::make_pair(0, 20))); } +TEST(ContourTest, ExampleFromPaper) { + const std::vector input = BuildFromAsciiArt(R"( + ******************* + ******************* + ********** ******************* + ********** ******************* + *************************************** + *************************************** + *************************************** + *************************************** + *********** ************** **** + *********** ************** **** + *********** ******* *** **** + *********** ******* *** **** + *********** ************** **** + *********** ************** **** + *********** ************** **** + *************************************** + *************************************** + *************************************** + ************************************** + ************************************** + ************************************** + ******************************* + *************************************** + *************************************** + **************** **************** + **************** **************** + ****** *** + ****** *** + ****** *** + ****** + )"); + const Neighbours neighbours = BuildNeighboursGraph(input); + auto s = BoxesToShapes(input, neighbours); + LOG(INFO) << RenderDot(std::nullopt, input); + const std::vector output = CutShapeIntoRectangles(s[0]); + LOG(INFO) << RenderDot(std::nullopt, output); + EXPECT_THAT(output.size(), 16); +} + +bool RectanglesCoverSameArea(absl::Span a, + absl::Span b) { + return RegionIncludesOther(a, b) && RegionIncludesOther(b, a); +} + +TEST(ReduceNumberOfBoxes, RandomTestNoOptional) { + constexpr int kNumRuns = 1000; + absl::BitGen bit_gen; + + for (int run = 0; run < kNumRuns; ++run) { + // Start by generating a feasible problem that we know the solution with + // some items fixed. + std::vector input = + GenerateNonConflictingRectanglesWithPacking({100, 100}, 60, bit_gen); + std::shuffle(input.begin(), input.end(), bit_gen); + + std::vector output = input; + std::vector optional_rectangles_empty; + ReduceNumberOfBoxesExactMandatory(&output, &optional_rectangles_empty); + if (run == 0) { + LOG(INFO) << "Presolved:\n" << RenderDot(std::nullopt, input); + LOG(INFO) << "To:\n" << RenderDot(std::nullopt, output); + } + + if (output.size() > input.size()) { + LOG(INFO) << "Presolved:\n" << RenderDot(std::nullopt, input); + LOG(INFO) << "To:\n" << RenderDot(std::nullopt, output); + ADD_FAILURE() << "ReduceNumberofBoxes() increased the number of boxes, " + "but it should be optimal in reducing them!"; + } + CHECK(RectanglesCoverSameArea(output, input)); + } +} + +TEST(ReduceNumberOfBoxes, Problematic) { + // This example shows that we must consider diagonals that touches only on its + // extremities as "intersecting" for the bipartite graph. + const std::vector input = { + {.x_min = 26, .x_max = 51, .y_min = 54, .y_max = 81}, + {.x_min = 51, .x_max = 78, .y_min = 44, .y_max = 67}, + {.x_min = 51, .x_max = 62, .y_min = 67, .y_max = 92}, + {.x_min = 78, .x_max = 98, .y_min = 24, .y_max = 54}, + }; + std::vector output = input; + std::vector optional_rectangles_empty; + ReduceNumberOfBoxesExactMandatory(&output, &optional_rectangles_empty); + LOG(INFO) << "Presolved:\n" << RenderDot(std::nullopt, input); + LOG(INFO) << "To:\n" << RenderDot(std::nullopt, output); +} + +// This example shows that sometimes the best solution with respect to minimum +// number of boxes requires *not* filling a hole. Actually this follows from the +// formula that the minimum number of rectangles in a partition of a polygon +// with n vertices and h holes is n/2 + h − g − 1, where g is the number of +// non-intersecting good diagonals. This test-case shows a polygon with 4 +// internal vertices, 1 hole and 4 non-intersecting good diagonals that includes +// the hole. Removing the hole reduces the n/2 term by 2, decrease the h term by +// 1, but decrease the g term by 4. +// +// *********************** +// *********************** +// ***********************..................... +// ***********************..................... +// ***********************..................... +// ***********************..................... +// ***********************..................... +// ++++++++++++++++++++++ ..................... +// ++++++++++++++++++++++ ..................... +// ++++++++++++++++++++++ ..................... +// ++++++++++++++++++++++000000000000000000000000 +// ++++++++++++++++++++++000000000000000000000000 +// ++++++++++++++++++++++000000000000000000000000 +// 000000000000000000000000 +// 000000000000000000000000 +// 000000000000000000000000 +// 000000000000000000000000 +// +TEST(ReduceNumberOfBoxes, Problematic2) { + const std::vector input = { + {.x_min = 64, .x_max = 82, .y_min = 76, .y_max = 98}, + {.x_min = 39, .x_max = 59, .y_min = 63, .y_max = 82}, + {.x_min = 59, .x_max = 78, .y_min = 61, .y_max = 76}, + {.x_min = 44, .x_max = 64, .y_min = 82, .y_max = 100}, + }; + std::vector output = input; + std::vector optional_rectangles = { + {.x_min = 59, .x_max = 64, .y_min = 76, .y_max = 82}, + }; + ReduceNumberOfBoxesExactMandatory(&output, &optional_rectangles); + LOG(INFO) << "Presolving:\n" << RenderDot(std::nullopt, input); + + // Presolve will refuse to do anything since removing the hole will increase + // the number of boxes. + CHECK(input == output); +} + } // namespace } // namespace sat } // namespace operations_research diff --git a/ortools/sat/BUILD.bazel b/ortools/sat/BUILD.bazel index 9991d8998d..5077db303c 100644 --- a/ortools/sat/BUILD.bazel +++ b/ortools/sat/BUILD.bazel @@ -2532,6 +2532,7 @@ cc_library( ":diffn_util", ":integer", "//ortools/base:stl_util", + "//ortools/graph:max_flow", "//ortools/graph:strongly_connected_components", "@com_google_absl//absl/algorithm:container", "@com_google_absl//absl/container:btree", diff --git a/ortools/sat/cuts.cc b/ortools/sat/cuts.cc index 289d4da62c..ea727355d4 100644 --- a/ortools/sat/cuts.cc +++ b/ortools/sat/cuts.cc @@ -20,6 +20,7 @@ #include #include #include +#include #include #include @@ -232,7 +233,7 @@ bool CutData::AllCoefficientsArePositive() const { return true; } -void CutData::Canonicalize() { +void CutData::SortRelevantEntries() { num_relevant_entries = 0; max_magnitude = 0; for (CutTerm& entry : terms) { @@ -269,92 +270,78 @@ double CutData::ComputeEfficacy() const { return violation / std::sqrt(norm); } -void CutDataBuilder::ClearIndices() { - num_merges_ = 0; - constraint_is_indexed_ = false; - bool_index_.clear(); - secondary_bool_index_.clear(); +// We can only merge the term if term.coeff + old_coeff do not overflow and +// if t * new_coeff do not overflow. +// +// If we cannot merge the term, we will keep them separate. The produced cut +// will be less strong, but can still be used. +bool CutDataBuilder::MergeIfPossible(IntegerValue t, CutTerm& to_add, + CutTerm& target) { + DCHECK_EQ(to_add.expr_vars[0], target.expr_vars[0]); + DCHECK_EQ(to_add.expr_coeffs[0], target.expr_coeffs[0]); + + const IntegerValue new_coeff = CapAddI(to_add.coeff, target.coeff); + if (AtMinOrMaxInt64I(new_coeff) || ProdOverflow(t, new_coeff)) { + return false; + } + + to_add.coeff = 0; // Clear since we merge it. + target.coeff = new_coeff; + return true; } -void CutDataBuilder::RegisterAllBooleanTerms(const CutData& cut) { - constraint_is_indexed_ = true; - const int size = cut.terms.size(); - for (int i = 0; i < size; ++i) { - const CutTerm& term = cut.terms[i]; +// We only deal with coeff * Bool or coeff * (1 - Bool) +// +// TODO(user): Because of merges, we might have entry with a coefficient of +// zero than are not useful. Remove them? +int CutDataBuilder::AddOrMergeBooleanTerms(absl::Span new_terms, + IntegerValue t, CutData* cut) { + if (new_terms.empty()) return 0; + + bool_index_.clear(); + secondary_bool_index_.clear(); + int num_merges = 0; + + // Fill the maps. + int i = 0; + for (CutTerm& term : new_terms) { + const IntegerVariable var = term.expr_vars[0]; + auto& map = term.expr_coeffs[0] > 0 ? bool_index_ : secondary_bool_index_; + const auto [it, inserted] = map.insert({var, i}); + if (!inserted) { + if (MergeIfPossible(t, term, new_terms[it->second])) { + ++num_merges; + } + } + ++i; + } + + // Loop over the cut now. Note that we loop with indices as we might add new + // terms in the middle of the loop. + for (CutTerm& term : cut->terms) { if (term.bound_diff != 1) continue; if (!term.IsSimple()) continue; - // Initially we shouldn't have duplicate bools and (1 - bools). - // So we just fill bool_index_. - bool_index_[term.expr_vars[0]] = i; - } -} + const IntegerVariable var = term.expr_vars[0]; + auto& map = term.expr_coeffs[0] > 0 ? bool_index_ : secondary_bool_index_; + auto it = map.find(var); + if (it == map.end()) continue; -void CutDataBuilder::AddOrMergeTerm(const CutTerm& term, IntegerValue t, - CutData* cut) { - if (!constraint_is_indexed_) { - RegisterAllBooleanTerms(*cut); - } - - DCHECK(term.IsSimple()); - const IntegerVariable var = term.expr_vars[0]; - const bool is_positive = (term.expr_coeffs[0] > 0); - const int new_index = cut->terms.size(); - const auto [it, inserted] = bool_index_.insert({var, new_index}); - if (inserted) { - cut->terms.push_back(term); - return; - } - - // If the referred var is not right, replace the entry. - int entry_index = it->second; - if (entry_index >= new_index || cut->terms[entry_index].expr_vars[0] != var) { - it->second = new_index; - cut->terms.push_back(term); - return; - } - - // If the sign is not right, look into secondary hash_map for opposite sign. - if ((cut->terms[entry_index].expr_coeffs[0] > 0) != is_positive) { - const auto [it, inserted] = secondary_bool_index_.insert({var, new_index}); - if (inserted) { - cut->terms.push_back(term); - return; - } - - // If the referred var is not right, replace the entry. - entry_index = it->second; - if (entry_index >= new_index || - cut->terms[entry_index].expr_vars[0] != var) { - it->second = new_index; - cut->terms.push_back(term); - return; - } - - // If the sign is not right, replace the entry. - if ((cut->terms[entry_index].expr_coeffs[0] > 0) != is_positive) { - it->second = new_index; - cut->terms.push_back(term); - return; + // We found a match, try to merge the map entry into the cut. + // Note that we don't waste time erasing this entry from the map since + // we should have no duplicates in the original cut. + if (MergeIfPossible(t, new_terms[it->second], term)) { + ++num_merges; } } - DCHECK_EQ(cut->terms[entry_index].expr_vars[0], var); - DCHECK_EQ((cut->terms[entry_index].expr_coeffs[0] > 0), is_positive); - // We can only merge the term if term.coeff + old_coeff do not overflow and - // if t * new_coeff do not overflow. - // - // If we cannot merge the term, we will keep them separate. The produced cut - // will be less strong, but can still be used. - const IntegerValue new_coeff = - CapAddI(cut->terms[entry_index].coeff, term.coeff); - if (AtMinOrMaxInt64I(new_coeff) || ProdOverflow(t, new_coeff)) { - // If we cannot merge the term, we keep them separate. + // Finally add the terms we couldn't merge. + for (const CutTerm& term : new_terms) { + if (term.coeff == 0) continue; cut->terms.push_back(term); - } else { - ++num_merges_; - cut->terms[entry_index].coeff = new_coeff; } + + return num_merges; } // TODO(user): Divide by gcd first to avoid possible overflow in the @@ -788,40 +775,38 @@ bool IntegerRoundingCutHelper::ComputeCut( // This should be better except it can mess up the norm and the divisors. cut_ = base_ct; if (options.use_ib_before_heuristic && ib_processor != nullptr) { - ib_processor->BaseCutBuilder()->ClearNumMerges(); - const int old_size = static_cast(cut_.terms.size()); - bool abort = true; - for (int i = 0; i < old_size; ++i) { - if (cut_.terms[i].bound_diff <= 1) continue; - if (!cut_.terms[i].HasRelevantLpValue()) continue; + std::vector* new_bool_terms = + ib_processor->ClearedMutableTempTerms(); + for (CutTerm& term : cut_.terms) { + if (term.bound_diff <= 1) continue; + if (!term.HasRelevantLpValue()) continue; - if (options.prefer_positive_ib && cut_.terms[i].coeff < 0) { + if (options.prefer_positive_ib && term.coeff < 0) { // We complement the term before trying the implied bound. - cut_.terms[i].Complement(&cut_.rhs); + term.Complement(&cut_.rhs); if (ib_processor->TryToExpandWithLowerImpliedbound( - IntegerValue(1), i, - /*complement=*/true, &cut_, ib_processor->BaseCutBuilder())) { + IntegerValue(1), + /*complement=*/true, &term, &cut_.rhs, new_bool_terms)) { ++total_num_initial_ibs_; - abort = false; continue; } - cut_.terms[i].Complement(&cut_.rhs); + term.Complement(&cut_.rhs); } if (ib_processor->TryToExpandWithLowerImpliedbound( - IntegerValue(1), i, - /*complement=*/true, &cut_, ib_processor->BaseCutBuilder())) { - abort = false; + IntegerValue(1), + /*complement=*/true, &term, &cut_.rhs, new_bool_terms)) { ++total_num_initial_ibs_; } } - total_num_initial_merges_ += - ib_processor->BaseCutBuilder()->NumMergesSinceLastClear(); // TODO(user): We assume that this is called with and without the option // use_ib_before_heuristic, so that we can abort if no IB has been applied // since then we will redo the computation. This is not really clean. - if (abort) return false; + if (new_bool_terms->empty()) return false; + total_num_initial_merges_ += + ib_processor->MutableCutBuilder()->AddOrMergeBooleanTerms( + absl::MakeSpan(*new_bool_terms), IntegerValue(1), &cut_); } // Our heuristic will try to generate a few different cuts, and we will keep @@ -841,7 +826,7 @@ bool IntegerRoundingCutHelper::ComputeCut( // // TODO(user): If the rhs is small and close to zero, we might want to // consider different way of complementing the variables. - cut_.Canonicalize(); + cut_.SortRelevantEntries(); const IntegerValue remainder_threshold( std::max(IntegerValue(1), cut_.max_magnitude / 1000)); if (cut_.rhs >= 0 && cut_.rhs < remainder_threshold.value()) { @@ -996,11 +981,11 @@ bool IntegerRoundingCutHelper::ComputeCut( // This should lead to stronger cuts even if the norms might be worse. num_ib_used_ = 0; if (ib_processor != nullptr) { - const auto [num_lb, num_ub] = ib_processor->PostprocessWithImpliedBound( - f, factor_t, &cut_, &cut_builder_); + const auto [num_lb, num_ub, num_merges] = + ib_processor->PostprocessWithImpliedBound(f, factor_t, &cut_); total_num_pos_lifts_ += num_lb; total_num_neg_lifts_ += num_ub; - total_num_merges_ += cut_builder_.NumMergesSinceLastClear(); + total_num_merges_ += num_merges; num_ib_used_ = num_lb + num_ub; } @@ -1296,21 +1281,23 @@ bool CoverCutHelper::TrySimpleKnapsack(const CutData& input_ct, // Tricky: This only work because the cut absl128 rhs is not changed by these // operations. if (ib_processor != nullptr) { - ib_processor->BaseCutBuilder()->ClearNumMerges(); - const int old_size = static_cast(cut_.terms.size()); - for (int i = 0; i < old_size; ++i) { + std::vector* new_bool_terms = + ib_processor->ClearedMutableTempTerms(); + for (CutTerm& term : cut_.terms) { // We only look at non-Boolean with an lp value not close to the upper // bound. - const CutTerm& term = cut_.terms[i]; if (term.bound_diff <= 1) continue; if (term.lp_value + 1e-4 > AsDouble(term.bound_diff)) continue; if (ib_processor->TryToExpandWithLowerImpliedbound( - IntegerValue(1), i, - /*complement=*/false, &cut_, ib_processor->BaseCutBuilder())) { + IntegerValue(1), + /*complement=*/false, &term, &cut_.rhs, new_bool_terms)) { ++cover_stats_.num_initial_ibs; } } + + ib_processor->MutableCutBuilder()->AddOrMergeBooleanTerms( + absl::MakeSpan(*new_bool_terms), IntegerValue(1), &cut_); } bool has_relevant_int = false; @@ -1386,11 +1373,11 @@ bool CoverCutHelper::TrySimpleKnapsack(const CutData& input_ct, } if (ib_processor != nullptr) { - const auto [num_lb, num_ub] = ib_processor->PostprocessWithImpliedBound( - f, /*factor_t=*/1, &cut_, &cut_builder_); + const auto [num_lb, num_ub, num_merges] = + ib_processor->PostprocessWithImpliedBound(f, /*factor_t=*/1, &cut_); cover_stats_.num_lb_ibs += num_lb; cover_stats_.num_ub_ibs += num_ub; - cover_stats_.num_merges += cut_builder_.NumMergesSinceLastClear(); + cover_stats_.num_merges += num_merges; } cover_stats_.num_bumps += ApplyWithPotentialBump(f, best_coeff, &cut_); @@ -1466,11 +1453,11 @@ bool CoverCutHelper::TrySingleNodeFlow(const CutData& input_ct, min_magnitude); if (ib_processor != nullptr) { - const auto [num_lb, num_ub] = ib_processor->PostprocessWithImpliedBound( - f, /*factor_t=*/1, &cut_, &cut_builder_); + const auto [num_lb, num_ub, num_merges] = + ib_processor->PostprocessWithImpliedBound(f, /*factor_t=*/1, &cut_); flow_stats_.num_lb_ibs += num_lb; flow_stats_.num_ub_ibs += num_ub; - flow_stats_.num_merges += cut_builder_.NumMergesSinceLastClear(); + flow_stats_.num_merges += num_merges; } // Lifting. @@ -1525,16 +1512,19 @@ bool CoverCutHelper::TryWithLetchfordSouliLifting( // // TODO(user): Merge Boolean terms that are complement of each other. if (ib_processor != nullptr) { - ib_processor->BaseCutBuilder()->ClearNumMerges(); - const int old_size = static_cast(cut_.terms.size()); - for (int i = 0; i < old_size; ++i) { - if (cut_.terms[i].bound_diff <= 1) continue; + std::vector* new_bool_terms = + ib_processor->ClearedMutableTempTerms(); + for (CutTerm& term : cut_.terms) { + if (term.bound_diff <= 1) continue; if (ib_processor->TryToExpandWithLowerImpliedbound( - IntegerValue(1), i, - /*complement=*/false, &cut_, ib_processor->BaseCutBuilder())) { + IntegerValue(1), + /*complement=*/false, &term, &cut_.rhs, new_bool_terms)) { ++ls_stats_.num_initial_ibs; } } + + ib_processor->MutableCutBuilder()->AddOrMergeBooleanTerms( + absl::MakeSpan(*new_bool_terms), IntegerValue(1), &cut_); } // TODO(user): we currently only deal with Boolean in the cover. Fix. @@ -2191,9 +2181,9 @@ bool ImpliedBoundsProcessor::DecomposeWithImpliedUpperBound( return true; } -std::pair ImpliedBoundsProcessor::PostprocessWithImpliedBound( +std::tuple ImpliedBoundsProcessor::PostprocessWithImpliedBound( const std::function& f, IntegerValue factor_t, - CutData* cut, CutDataBuilder* builder) { + CutData* cut) { int num_applied_lb = 0; int num_applied_ub = 0; @@ -2201,10 +2191,9 @@ std::pair ImpliedBoundsProcessor::PostprocessWithImpliedBound( CutTerm slack_term; CutTerm ub_bool_term; CutTerm ub_slack_term; - builder->ClearIndices(); - const int initial_size = cut->terms.size(); - for (int i = 0; i < initial_size; ++i) { - CutTerm& term = cut->terms[i]; + + tmp_terms_.clear(); + for (CutTerm& term : cut->terms) { if (term.bound_diff <= 1) continue; if (!term.IsSimple()) continue; @@ -2254,30 +2243,31 @@ std::pair ImpliedBoundsProcessor::PostprocessWithImpliedBound( // loose more, so we prefer to be a bit defensive. if (score > base_score + 1e-2) { ++num_applied_ub; - term = ub_slack_term; // Override first before push_back() ! - builder->AddOrMergeTerm(ub_bool_term, factor_t, cut); + term = ub_slack_term; + tmp_terms_.push_back(ub_bool_term); continue; } } if (expand) { ++num_applied_lb; - term = slack_term; // Override first before push_back() ! - builder->AddOrMergeTerm(bool_term, factor_t, cut); + term = slack_term; + tmp_terms_.push_back(bool_term); } } - return {num_applied_lb, num_applied_ub}; + + const int num_merges = cut_builder_.AddOrMergeBooleanTerms( + absl::MakeSpan(tmp_terms_), factor_t, cut); + + return {num_applied_lb, num_applied_ub, num_merges}; } -// Important: The cut_builder_ must have been reset. bool ImpliedBoundsProcessor::TryToExpandWithLowerImpliedbound( - IntegerValue factor_t, int i, bool complement, CutData* cut, - CutDataBuilder* builder) { - CutTerm& term = cut->terms[i]; - + IntegerValue factor_t, bool complement, CutTerm* term, absl::int128* rhs, + std::vector* new_bool_terms) { CutTerm bool_term; CutTerm slack_term; - if (!DecomposeWithImpliedLowerBound(term, factor_t, bool_term, slack_term)) { + if (!DecomposeWithImpliedLowerBound(*term, factor_t, bool_term, slack_term)) { return false; } @@ -2286,26 +2276,22 @@ bool ImpliedBoundsProcessor::TryToExpandWithLowerImpliedbound( // It is always good to complement such variable. // // Note that here we do more and just complement anything closer to UB. - // - // TODO(user): Because of merges, we might have entry with a coefficient of - // zero than are not useful. Remove them. if (complement) { if (bool_term.lp_value > 0.5) { - bool_term.Complement(&cut->rhs); + bool_term.Complement(rhs); } if (slack_term.lp_value > 0.5 * AsDouble(slack_term.bound_diff)) { - slack_term.Complement(&cut->rhs); + slack_term.Complement(rhs); } } - term = slack_term; - builder->AddOrMergeTerm(bool_term, factor_t, cut); + *term = slack_term; + new_bool_terms->push_back(bool_term); return true; } bool ImpliedBoundsProcessor::CacheDataForCut(IntegerVariable first_slack, CutData* cut) { - base_cut_builder_.ClearIndices(); cached_data_.clear(); const int size = cut->terms.size(); diff --git a/ortools/sat/cuts.h b/ortools/sat/cuts.h index 7e975cc6d4..245a440c87 100644 --- a/ortools/sat/cuts.h +++ b/ortools/sat/cuts.h @@ -134,6 +134,10 @@ struct CutData { double ComputeViolation() const; double ComputeEfficacy() const; + // This sorts terms by decreasing lp values and fills both + // num_relevant_entries and max_magnitude. + void SortRelevantEntries(); + std::string DebugString() const; // Note that we use a 128 bit rhs so we can freely complement variable without @@ -141,8 +145,7 @@ struct CutData { absl::int128 rhs; std::vector terms; - // This sorts terms and fill both num_relevant_entries and max_magnitude. - void Canonicalize(); + // Only filled after SortRelevantEntries(). IntegerValue max_magnitude; int num_relevant_entries; }; @@ -150,24 +153,21 @@ struct CutData { // Stores temporaries used to build or manipulate a CutData. class CutDataBuilder { public: + // Returns false if we encounter an integer overflow. + bool ConvertToLinearConstraint(const CutData& cut, LinearConstraint* output); + // These function allow to merges entries corresponding to the same variable // and complementation. That is (X - lb) and (ub - X) are NOT merged and kept // as separate terms. Note that we currently only merge Booleans since this // is the only case we need. - void ClearIndices(); - void AddOrMergeTerm(const CutTerm& term, IntegerValue t, CutData* cut); - - void ClearNumMerges() { num_merges_ = 0; } - int NumMergesSinceLastClear() const { return num_merges_; } - - // Returns false if we encounter an integer overflow. - bool ConvertToLinearConstraint(const CutData& cut, LinearConstraint* output); + // + // Return num_merges. + int AddOrMergeBooleanTerms(absl::Span terms, IntegerValue t, + CutData* cut); private: - void RegisterAllBooleanTerms(const CutData& cut); + bool MergeIfPossible(IntegerValue t, CutTerm& to_add, CutTerm& target); - int num_merges_ = 0; - bool constraint_is_indexed_ = false; absl::flat_hash_map bool_index_; absl::flat_hash_map secondary_bool_index_; absl::btree_map tmp_map_; @@ -219,27 +219,31 @@ class ImpliedBoundsProcessor { // We are about to apply the super-additive function f() to the CutData. Use // implied bound information to eventually substitute and make the cut - // stronger. Returns the number of {lb_ib, ub_ib} applied. + // stronger. Returns the number of {lb_ib, ub_ib, merges} applied. // // This should lead to stronger cuts even if the norms migth be worse. - std::pair PostprocessWithImpliedBound( + std::tuple PostprocessWithImpliedBound( const std::function& f, IntegerValue factor_t, - CutData* cut, CutDataBuilder* builder); + CutData* cut); // Precomputes quantities used by all cut generation. // This allows to do that once rather than 6 times. // Return false if there are no exploitable implied bounds. bool CacheDataForCut(IntegerVariable first_slack, CutData* cut); - // All our cut code use the same base cut (modulo complement), so we reuse the - // hash-map of where boolean are in the cut. Note that even if we add new - // entry that are no longer there for another cut algo, we can still reuse the - // same hash-map. - CutDataBuilder* BaseCutBuilder() { return &base_cut_builder_; } + bool TryToExpandWithLowerImpliedbound(IntegerValue factor_t, bool complement, + CutTerm* term, absl::int128* rhs, + std::vector* new_bool_terms); - bool TryToExpandWithLowerImpliedbound(IntegerValue factor_t, int i, - bool complement, CutData* cut, - CutDataBuilder* builder); + // This can be used to share the hash-map memory. + CutDataBuilder* MutableCutBuilder() { return &cut_builder_; } + + // This can be used as a temporary storage for + // TryToExpandWithLowerImpliedbound(). + std::vector* ClearedMutableTempTerms() { + tmp_terms_.clear(); + return &tmp_terms_; + } // Add a new variable that could be used in the new cuts. // Note that the cache must be computed to take this into account. @@ -283,7 +287,8 @@ class ImpliedBoundsProcessor { mutable absl::flat_hash_map cache_; // Temporary data used by CacheDataForCut(). - CutDataBuilder base_cut_builder_; + std::vector tmp_terms_; + CutDataBuilder cut_builder_; std::vector cached_data_; TopNCuts ib_cut_pool_ = TopNCuts(50); @@ -431,7 +436,6 @@ class IntegerRoundingCutHelper { std::vector best_rs_; int64_t num_ib_used_ = 0; - CutDataBuilder cut_builder_; CutData cut_; std::vector> adjusted_coeffs_; @@ -531,7 +535,6 @@ class CoverCutHelper { // Here to reuse memory, cut_ is both the input and the output. CutData cut_; CutData temp_cut_; - CutDataBuilder cut_builder_; // Hack to not sort twice. bool has_bool_base_ct_ = false; diff --git a/ortools/sat/cuts_test.cc b/ortools/sat/cuts_test.cc index 130db4669f..d8d78d153c 100644 --- a/ortools/sat/cuts_test.cc +++ b/ortools/sat/cuts_test.cc @@ -885,7 +885,6 @@ TEST(ImpliedBoundsProcessorTest, PositiveBasicTest) { // Lets look at the term X. CutData data; - CutDataBuilder builder; CutTerm X; X.coeff = 1; @@ -898,9 +897,14 @@ TEST(ImpliedBoundsProcessorTest, PositiveBasicTest) { data.terms.push_back(X); processor.CacheDataForCut(IntegerVariable(100), &data); - EXPECT_TRUE(processor.TryToExpandWithLowerImpliedbound(IntegerValue(1), 0, - /*complement=*/false, - &data, &builder)); + const IntegerValue t(1); + std::vector new_terms; + EXPECT_TRUE(processor.TryToExpandWithLowerImpliedbound( + t, /*complement=*/false, &data.terms[0], &data.rhs, &new_terms)); + + EXPECT_EQ(0, processor.MutableCutBuilder()->AddOrMergeBooleanTerms( + absl::MakeSpan(new_terms), t, &data)); + EXPECT_EQ(data.terms.size(), 2); EXPECT_THAT(data.terms[0].DebugString(), ::testing::StartsWith("coeff=1 lp=0 range=7")); @@ -937,7 +941,6 @@ TEST(ImpliedBoundsProcessorTest, NegativeBasicTest) { // Lets look at the term X. CutData data; - CutDataBuilder builder; CutTerm X; X.coeff = 1; @@ -950,9 +953,14 @@ TEST(ImpliedBoundsProcessorTest, NegativeBasicTest) { data.terms.push_back(X); processor.CacheDataForCut(IntegerVariable(100), &data); - EXPECT_TRUE(processor.TryToExpandWithLowerImpliedbound(IntegerValue(1), 0, - /*complement=*/false, - &data, &builder)); + + const IntegerValue t(1); + std::vector new_terms; + EXPECT_TRUE(processor.TryToExpandWithLowerImpliedbound( + t, /*complement=*/false, &data.terms[0], &data.rhs, &new_terms)); + EXPECT_EQ(0, processor.MutableCutBuilder()->AddOrMergeBooleanTerms( + absl::MakeSpan(new_terms), t, &data)); + EXPECT_EQ(data.terms.size(), 2); EXPECT_THAT(data.terms[0].DebugString(), ::testing::StartsWith("coeff=1 lp=0 range=7")); diff --git a/ortools/sat/diffn_util.cc b/ortools/sat/diffn_util.cc index ba8875ff6a..2c7d93d365 100644 --- a/ortools/sat/diffn_util.cc +++ b/ortools/sat/diffn_util.cc @@ -51,7 +51,7 @@ bool Rectangle::IsDisjoint(const Rectangle& other) const { other.y_min >= y_max; } -absl::InlinedVector Rectangle::SetDifference( +absl::InlinedVector Rectangle::RegionDifference( const Rectangle& other) const { const Rectangle intersect = Intersect(other); if (intersect.SizeX() == 0) { @@ -155,8 +155,8 @@ bool ReportEnergyConflict(Rectangle bounding_box, absl::Span boxes, return x->ReportConflict(); } -bool BoxesAreInEnergyConflict(const std::vector& rectangles, - const std::vector& energies, +bool BoxesAreInEnergyConflict(absl::Span rectangles, + absl::Span energies, absl::Span boxes, Rectangle* conflict) { // First consider all relevant intervals along the x axis. @@ -1542,18 +1542,17 @@ std::string RenderDot(std::optional bb, std::stringstream ss; ss << "digraph {\n"; ss << " graph [ bgcolor=lightgray ]\n"; - ss << " node [style=filled]\n"; + ss << " node [style=filled shape=box]\n"; if (bb.has_value()) { ss << " bb [fillcolor=\"grey\" pos=\"" << 2 * bb->x_min + bb->SizeX() - << "," << 2 * bb->y_min + bb->SizeY() - << "!\" shape=box width=" << 2 * bb->SizeX() + << "," << 2 * bb->y_min + bb->SizeY() << "!\" width=" << 2 * bb->SizeX() << " height=" << 2 * bb->SizeY() << "]\n"; } for (int i = 0; i < solution.size(); ++i) { ss << " " << i << " [fillcolor=\"" << colors[i % colors.size()] << "\" pos=\"" << 2 * solution[i].x_min + solution[i].SizeX() << "," << 2 * solution[i].y_min + solution[i].SizeY() - << "!\" shape=box width=" << 2 * solution[i].SizeX() + << "!\" width=" << 2 * solution[i].SizeX() << " height=" << 2 * solution[i].SizeY() << "]\n"; } ss << extra_dot_payload; @@ -1563,27 +1562,30 @@ std::string RenderDot(std::optional bb, std::vector FindEmptySpaces( const Rectangle& bounding_box, std::vector ocupied_rectangles) { - std::vector empty_spaces = {bounding_box}; - std::vector new_empty_spaces; // Sorting is not necessary for correctness but makes it faster. std::sort(ocupied_rectangles.begin(), ocupied_rectangles.end(), [](const Rectangle& a, const Rectangle& b) { return std::tuple(a.x_min, -a.x_max, a.y_min) < std::tuple(b.x_min, -b.x_max, b.y_min); }); - for (const Rectangle& ocupied_rectangle : ocupied_rectangles) { - new_empty_spaces.clear(); - for (const auto& empty_space : empty_spaces) { - for (Rectangle& r : empty_space.SetDifference(ocupied_rectangle)) { - new_empty_spaces.push_back(std::move(r)); - } - } - empty_spaces.swap(new_empty_spaces); - if (empty_spaces.empty()) { - break; + return PavedRegionDifference({bounding_box}, ocupied_rectangles); +} + +std::vector PavedRegionDifference( + std::vector original_region, + absl::Span area_to_remove) { + std::vector new_area_to_cover; + for (const Rectangle& rectangle : area_to_remove) { + new_area_to_cover.clear(); + for (const Rectangle& r : original_region) { + const auto& new_rectangles = r.RegionDifference(rectangle); + new_area_to_cover.insert(new_area_to_cover.end(), new_rectangles.begin(), + new_rectangles.end()); } + original_region.swap(new_area_to_cover); + if (original_region.empty()) break; } - return empty_spaces; + return original_region; } } // namespace sat diff --git a/ortools/sat/diffn_util.h b/ortools/sat/diffn_util.h index 3aa6933807..c2688c0d8c 100644 --- a/ortools/sat/diffn_util.h +++ b/ortools/sat/diffn_util.h @@ -20,6 +20,7 @@ #include #include #include +#include #include #include #include @@ -63,7 +64,8 @@ struct Rectangle { // Returns `this \ other` as a set of disjoint rectangles of non-empty area. // The resulting vector will have at most four elements. - absl::InlinedVector SetDifference(const Rectangle& other) const; + absl::InlinedVector RegionDifference( + const Rectangle& other) const; template friend void AbslStringify(Sink& sink, const Rectangle& r) { @@ -125,8 +127,8 @@ std::vector> GetOverlappingRectangleComponents( // Visible for testing. The algo is in O(n^4) so shouldn't be used directly. // Returns true if there exist a bounding box with too much energy. -bool BoxesAreInEnergyConflict(const std::vector& rectangles, - const std::vector& energies, +bool BoxesAreInEnergyConflict(absl::Span rectangles, + absl::Span energies, absl::Span boxes, Rectangle* conflict = nullptr); @@ -615,6 +617,20 @@ std::string RenderDot(std::optional bb, std::vector FindEmptySpaces( const Rectangle& bounding_box, std::vector ocupied_rectangles); +// Given two regions, each one of them defined by a vector of non-overlapping +// rectangles paving them, returns a vector of non-overlapping rectangles that +// paves the points that were part of the first region but not of the second. +// This can also be seen as the set difference of the points of the regions. +std::vector PavedRegionDifference( + std::vector original_region, + absl::Span area_to_remove); + +// The two regions must be defined by non-overlapping rectangles. +inline bool RegionIncludesOther(absl::Span region, + absl::Span other) { + return PavedRegionDifference({other.begin(), other.end()}, region).empty(); +} + } // namespace sat } // namespace operations_research diff --git a/ortools/sat/diffn_util_test.cc b/ortools/sat/diffn_util_test.cc index 508ac031be..b0d9a11279 100644 --- a/ortools/sat/diffn_util_test.cc +++ b/ortools/sat/diffn_util_test.cc @@ -483,7 +483,7 @@ TEST(RectangleTest, BasicTest) { Rectangle({.x_min = 1, .x_max = 2, .y_min = 1, .y_max = 2})); } -TEST(RectangleTest, RandomSetDifferenceTest) { +TEST(RectangleTest, RandomRegionDifferenceTest) { absl::BitGen random; const int64_t size = 20; constexpr int num_runs = 400; @@ -497,7 +497,7 @@ TEST(RectangleTest, RandomSetDifferenceTest) { ret[i].y_max = ret[i].y_min + IntegerValue(absl::Uniform(random, 1, size - 1)); } - auto set_diff = ret[0].SetDifference(ret[1]); + auto set_diff = ret[0].RegionDifference(ret[1]); EXPECT_EQ(set_diff.empty(), ret[0].Intersect(ret[1]) == ret[0]); IntegerValue diff_area = 0; for (int i = 0; i < set_diff.size(); ++i) { @@ -514,6 +514,34 @@ TEST(RectangleTest, RandomSetDifferenceTest) { } } +TEST(RectangleTest, RandomPavedRegionDifferenceTest) { + absl::BitGen random; + constexpr int num_runs = 100; + for (int k = 0; k < num_runs; k++) { + const std::vector set1 = + GenerateNonConflictingRectanglesWithPacking({100, 100}, 60, random); + const std::vector set2 = + GenerateNonConflictingRectanglesWithPacking({100, 100}, 60, random); + + const std::vector diff = PavedRegionDifference(set1, set2); + for (int i = 0; i < diff.size(); ++i) { + for (int j = i + 1; j < diff.size(); ++j) { + EXPECT_TRUE(diff[i].IsDisjoint(diff[j])); + } + } + for (const auto& r_diff : diff) { + for (const auto& r2 : set2) { + EXPECT_TRUE(r_diff.IsDisjoint(r2)); + } + IntegerValue area = 0; + for (const auto& r1 : set1) { + area += r_diff.IntersectArea(r1); + } + EXPECT_EQ(area, r_diff.Area()); + } + } +} + TEST(GetMinimumOverlapTest, BasicTest) { RectangleInRange range_ret = { .bounding_area = {.x_min = 0, .x_max = 15, .y_min = 0, .y_max = 15}, diff --git a/ortools/sat/integer.cc b/ortools/sat/integer.cc index 85fe9849af..5803e39202 100644 --- a/ortools/sat/integer.cc +++ b/ortools/sat/integer.cc @@ -49,7 +49,7 @@ namespace operations_research { namespace sat { std::vector NegationOf( - const std::vector& vars) { + absl::Span vars) { std::vector result(vars.size()); for (int i = 0; i < vars.size(); ++i) { result[i] = NegationOf(vars[i]); diff --git a/ortools/sat/integer.h b/ortools/sat/integer.h index 3923dcb89d..8aa355bddb 100644 --- a/ortools/sat/integer.h +++ b/ortools/sat/integer.h @@ -207,8 +207,7 @@ inline std::string IntegerTermDebugString(IntegerVariable var, } // Returns the vector of the negated variables. -std::vector NegationOf( - const std::vector& vars); +std::vector NegationOf(absl::Span vars); // The integer equivalent of a literal. // It represents an IntegerVariable and an upper/lower bound on it. diff --git a/ortools/sat/linear_constraint_test.cc b/ortools/sat/linear_constraint_test.cc index bc87f50ed3..cad9ad4d9e 100644 --- a/ortools/sat/linear_constraint_test.cc +++ b/ortools/sat/linear_constraint_test.cc @@ -44,7 +44,8 @@ TEST(ComputeActivityTest, BasicBehavior) { util_intops::StrongVector values = {0.5, 0.0, 1.4, 0.0, -2.1, 0.0}; - EXPECT_NEAR(ComputeActivity(ct.Build(), values), 1 * 0.5 - 2 * 1.4 - 3 * 2.1, 1e-6); + EXPECT_NEAR(ComputeActivity(ct.Build(), values), 1 * 0.5 - 2 * 1.4 - 3 * 2.1, + 1e-6); } TEST(ComputeActivityTest, EmptyConstraint) { diff --git a/ortools/sat/linear_programming_constraint.cc b/ortools/sat/linear_programming_constraint.cc index e69f6cbc25..a1d70915b6 100644 --- a/ortools/sat/linear_programming_constraint.cc +++ b/ortools/sat/linear_programming_constraint.cc @@ -219,10 +219,11 @@ void ScatteredIntegerVector::ConvertToCutData( CutData* result) { result->terms.clear(); result->rhs = rhs; + absl::Span dense_vector = dense_vector_; if (is_sparse_) { std::sort(non_zeros_.begin(), non_zeros_.end()); for (const glop::ColIndex col : non_zeros_) { - const IntegerValue coeff = dense_vector_[col]; + const IntegerValue coeff = dense_vector[col.value()]; if (coeff == 0) continue; const IntegerVariable var = integer_variables[col.value()]; CHECK(result->AppendOneTerm(var, coeff, lp_solution[col.value()], @@ -230,12 +231,11 @@ void ScatteredIntegerVector::ConvertToCutData( integer_trail->LevelZeroUpperBound(var))); } } else { - const int size = dense_vector_.size(); - for (glop::ColIndex col(0); col < size; ++col) { - const IntegerValue coeff = dense_vector_[col]; + for (int col(0); col < dense_vector.size(); ++col) { + const IntegerValue coeff = dense_vector[col]; if (coeff == 0) continue; - const IntegerVariable var = integer_variables[col.value()]; - CHECK(result->AppendOneTerm(var, coeff, lp_solution[col.value()], + const IntegerVariable var = integer_variables[col]; + CHECK(result->AppendOneTerm(var, coeff, lp_solution[col], integer_trail->LevelZeroLowerBound(var), integer_trail->LevelZeroUpperBound(var))); } @@ -1466,8 +1466,6 @@ void LinearProgrammingConstraint::AddMirCuts() { const int num_rows = lp_data_.num_constraints().value(); std::vector> base_rows; util_intops::StrongVector row_weights(num_rows, 0.0); - util_intops::StrongVector at_ub(num_rows, false); - util_intops::StrongVector at_lb(num_rows, false); for (RowIndex row(0); row < num_rows; ++row) { // We only consider tight rows. // We use both the status and activity to have as much options as possible. @@ -1480,13 +1478,11 @@ void LinearProgrammingConstraint::AddMirCuts() { if (activity > lp_data_.constraint_upper_bounds()[row] - 1e-4 || status == glop::ConstraintStatus::AT_UPPER_BOUND || status == glop::ConstraintStatus::FIXED_VALUE) { - at_ub[row] = true; base_rows.push_back({row, IntegerValue(1)}); } if (activity < lp_data_.constraint_lower_bounds()[row] + 1e-4 || status == glop::ConstraintStatus::AT_LOWER_BOUND || status == glop::ConstraintStatus::FIXED_VALUE) { - at_lb[row] = true; base_rows.push_back({row, IntegerValue(-1)}); } @@ -1604,16 +1600,20 @@ void LinearProgrammingConstraint::AddMirCuts() { if (used_rows[row]) continue; used_rows[row] = true; - // We only consider "tight" rows, as defined above. + // Note that we consider all rows here, not only tight one. This makes a + // big difference on problem like blp-ic98.pb.gz. We can also use the + // integrality of the slack when adding a non-tight row to derive good + // cuts. Also, non-tight row will have a low weight, so they should + // still be chosen after the tight-one in most situation. bool add_row = false; - if (at_ub[row]) { + if (!integer_lp_[row].ub_is_trivial) { if (entry.coefficient() > 0.0) { if (dense_cut[var_to_eliminate] < 0) add_row = true; } else { if (dense_cut[var_to_eliminate] > 0) add_row = true; } } - if (at_lb[row]) { + if (!integer_lp_[row].lb_is_trivial) { if (entry.coefficient() > 0.0) { if (dense_cut[var_to_eliminate] > 0) add_row = true; } else { @@ -1933,7 +1933,7 @@ bool LinearProgrammingConstraint::ScalingCanOverflow( std::vector> LinearProgrammingConstraint::ScaleLpMultiplier( bool take_objective_into_account, bool ignore_trivial_constraints, - const std::vector>& lp_multipliers, + absl::Span> lp_multipliers, IntegerValue* scaling, int64_t overflow_cap) const { *scaling = 0; diff --git a/ortools/sat/linear_programming_constraint.h b/ortools/sat/linear_programming_constraint.h index 184753244f..2757dc3f2e 100644 --- a/ortools/sat/linear_programming_constraint.h +++ b/ortools/sat/linear_programming_constraint.h @@ -330,7 +330,7 @@ class LinearProgrammingConstraint : public PropagatorInterface, // will still be exact as it will work for any set of multiplier. std::vector> ScaleLpMultiplier( bool take_objective_into_account, bool ignore_trivial_constraints, - const std::vector>& lp_multipliers, + absl::Span> lp_multipliers, IntegerValue* scaling, int64_t overflow_cap = std::numeric_limits::max()) const; diff --git a/ortools/sat/python/cp_model_test.py b/ortools/sat/python/cp_model_test.py index c4f864cf17..8bd2aae00b 100644 --- a/ortools/sat/python/cp_model_test.py +++ b/ortools/sat/python/cp_model_test.py @@ -13,6 +13,7 @@ # limitations under the License. import itertools +import time from absl.testing import absltest import pandas as pd @@ -97,42 +98,55 @@ class RecordSolution(cp_model.CpSolverSolutionCallback): class TimeRecorder(cp_model.CpSolverSolutionCallback): - def __init__(self, default_time: float) -> None: + def __init__(self) -> None: super().__init__() - self.__last_time = default_time + self.__last_time: float = 0.0 def on_solution_callback(self) -> None: - self.__last_time = self.wall_time + self.__last_time = time.time() @property - def last_time(self): + def last_time(self) -> float: return self.__last_time class LogToString: """Record log in a string.""" - def __init__(self): + def __init__(self) -> None: self.__log = "" - def new_message(self, message: str): + def new_message(self, message: str) -> None: self.__log += message self.__log += "\n" @property - def log(self): + def log(self) -> str: return self.__log class BestBoundCallback: - def __init__(self): + def __init__(self) -> None: self.best_bound: float = 0.0 - def new_best_bound(self, bb: float): + def new_best_bound(self, bb: float) -> None: self.best_bound = bb +class BestBoundTimeCallback: + + def __init__(self) -> None: + self.__last_time: float = 0.0 + + def new_best_bound(self, unused_bb: float): + self.__last_time = time.time() + + @property + def last_time(self) -> float: + return self.__last_time + + class CpModelTest(absltest.TestCase): def testCreateIntegerVariable(self): @@ -1769,9 +1783,10 @@ TRFM""" solver.parameters.cp_model_presolve = False solver.parameters.symmetry_level = 0 - callback = TimeRecorder(solver.parameters.max_time_in_seconds) - solver.Solve(model, callback) - self.assertLess(solver.wall_time, callback.last_time + 5.0) + solution_callback = TimeRecorder() + status = solver.Solve(model, solution_callback) + if status == cp_model.OPTIMAL: + self.assertLess(time.time(), solution_callback.last_time + 5.0) def testIssue4376MinimizeModel(self): print("testIssue4376MinimizeModel") @@ -1868,9 +1883,15 @@ TRFM""" solver.parameters.num_workers = 8 solver.parameters.max_time_in_seconds = 50 solver.parameters.log_search_progress = True - callback = TimeRecorder(solver.parameters.max_time_in_seconds) - solver.Solve(model, callback) - self.assertLess(solver.wall_time, callback.last_time + 5.0) + solution_callback = TimeRecorder() + best_bound_callback = BestBoundTimeCallback() + solver.best_bound_callback = best_bound_callback.new_best_bound + status = solver.Solve(model, solution_callback) + if status == cp_model.OPTIMAL: + self.assertLess( + time.time(), + max(best_bound_callback.last_time, solution_callback.last_time) + 5.0, + ) if __name__ == "__main__": From 3568fa161f25f258e63031f2832a7ae486e3bd97 Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Wed, 25 Sep 2024 07:56:21 +0200 Subject: [PATCH 018/105] small cleanup --- ortools/base/top_n.h | 2 +- ortools/port/proto_utils.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/ortools/base/top_n.h b/ortools/base/top_n.h index 2ce971a02c..f5023d4461 100644 --- a/ortools/base/top_n.h +++ b/ortools/base/top_n.h @@ -254,7 +254,7 @@ const T& TopN::peek_bottom() { return elements_.front(); } template -std::vector TopN::Take() { +std::vector TopN::Take() { std::vector out = std::move(elements_); if (state_ != State::HEAP_SORTED) { std::sort(out.begin(), out.end(), cmp_); diff --git a/ortools/port/proto_utils.h b/ortools/port/proto_utils.h index 535d634312..ba0464b8ca 100644 --- a/ortools/port/proto_utils.h +++ b/ortools/port/proto_utils.h @@ -58,7 +58,7 @@ std::string ProtoEnumToString(ProtoEnumType enum_value) { "Invalid enum value of: ", enum_value, " for enum type: ", google::protobuf::GetEnumDescriptor()->name()); } - return enum_value_descriptor->name(); + return std::string(enum_value_descriptor->name()); #endif // !defined(__PORTABLE_PLATFORM__) } From 1b03cf25ebab1d0203ac5c7eb61e1f6bba05e960 Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Wed, 25 Sep 2024 07:56:56 +0200 Subject: [PATCH 019/105] julia: Export ORTools.jl --- ortools/julia/ORTools.jl/LICENSE | 202 ++++++++++++++++++++++ ortools/julia/ORTools.jl/Project.toml | 15 ++ ortools/julia/ORTools.jl/README.md | 6 + ortools/julia/ORTools.jl/src/ORTools.jl | 5 + ortools/julia/ORTools.jl/test/runtests.jl | 6 + 5 files changed, 234 insertions(+) create mode 100644 ortools/julia/ORTools.jl/LICENSE create mode 100644 ortools/julia/ORTools.jl/Project.toml create mode 100644 ortools/julia/ORTools.jl/README.md create mode 100644 ortools/julia/ORTools.jl/src/ORTools.jl create mode 100644 ortools/julia/ORTools.jl/test/runtests.jl diff --git a/ortools/julia/ORTools.jl/LICENSE b/ortools/julia/ORTools.jl/LICENSE new file mode 100644 index 0000000000..cbe9ed1ae2 --- /dev/null +++ b/ortools/julia/ORTools.jl/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/ortools/julia/ORTools.jl/Project.toml b/ortools/julia/ORTools.jl/Project.toml new file mode 100644 index 0000000000..33f069be36 --- /dev/null +++ b/ortools/julia/ORTools.jl/Project.toml @@ -0,0 +1,15 @@ +name = "ORTools" +uuid = "b7d69b34-a827-4671-8cfa-f7e1eec930c7" +version = "1.0.0-DEV" + +[deps] +MathOptInterface = "b8f27783-ece8-5eb3-8dc8-9495eed66fee" + +[compat] +julia = "1.6.7" + +[extras] +Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" + +[targets] +test = ["Test"] diff --git a/ortools/julia/ORTools.jl/README.md b/ortools/julia/ORTools.jl/README.md new file mode 100644 index 0000000000..941908eba9 --- /dev/null +++ b/ortools/julia/ORTools.jl/README.md @@ -0,0 +1,6 @@ +# ORTools + +This is the +[MathOptInterface.jl](https://github.com/jump-dev/MathOptInterface.jl) Julia +wrapper for Google's +[MathOpt](https://developers.google.com/optimization/math_opt). diff --git a/ortools/julia/ORTools.jl/src/ORTools.jl b/ortools/julia/ORTools.jl/src/ORTools.jl new file mode 100644 index 0000000000..dcdf2dca76 --- /dev/null +++ b/ortools/julia/ORTools.jl/src/ORTools.jl @@ -0,0 +1,5 @@ +module ORTools + +# Write your package code here. + +end diff --git a/ortools/julia/ORTools.jl/test/runtests.jl b/ortools/julia/ORTools.jl/test/runtests.jl new file mode 100644 index 0000000000..8b2e109cf6 --- /dev/null +++ b/ortools/julia/ORTools.jl/test/runtests.jl @@ -0,0 +1,6 @@ +using ORTools +using Test + +@testset "ORTools.jl" begin + # Write your tests here. +end From 6feb7c8575c69b0a99bb9cd08024c90530b909b2 Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Wed, 25 Sep 2024 07:57:28 +0200 Subject: [PATCH 020/105] algorithms: export from google3 --- ortools/algorithms/BUILD.bazel | 4 +- ortools/algorithms/python/set_cover.cc | 403 ++++++++++++++++-- ortools/algorithms/samples/set_cover.cc | 65 +++ ortools/algorithms/samples/set_cover.py | 56 +++ .../samples/simple_knapsack_program.cc | 2 + ortools/algorithms/set_cover_heuristics.cc | 19 +- ortools/algorithms/set_cover_heuristics.h | 8 +- ortools/algorithms/set_cover_lagrangian.cc | 159 ++++--- ortools/algorithms/set_cover_lagrangian.h | 11 +- ortools/algorithms/set_cover_model.cc | 164 ++++++- ortools/algorithms/set_cover_model.h | 87 ++-- ortools/algorithms/set_cover_orlib_test.cc | 24 +- 12 files changed, 813 insertions(+), 189 deletions(-) create mode 100644 ortools/algorithms/samples/set_cover.cc create mode 100755 ortools/algorithms/samples/set_cover.py diff --git a/ortools/algorithms/BUILD.bazel b/ortools/algorithms/BUILD.bazel index be5f372620..3d7f2284f3 100644 --- a/ortools/algorithms/BUILD.bazel +++ b/ortools/algorithms/BUILD.bazel @@ -275,6 +275,7 @@ cc_library( ":set_cover_model", "//ortools/base:threadpool", "@com_google_absl//absl/log:check", + "@com_google_absl//absl/synchronization", ], ) @@ -286,9 +287,10 @@ cc_library( ":set_cover_cc_proto", "//ortools/base:intops", "//ortools/base:strong_vector", - "//ortools/util:aligned_memory", "@com_google_absl//absl/log", "@com_google_absl//absl/log:check", + "@com_google_absl//absl/random", + "@com_google_absl//absl/random:distributions", "@com_google_absl//absl/strings", ], ) diff --git a/ortools/algorithms/python/set_cover.cc b/ortools/algorithms/python/set_cover.cc index 2d107fdfc0..a4263078c1 100644 --- a/ortools/algorithms/python/set_cover.cc +++ b/ortools/algorithms/python/set_cover.cc @@ -13,91 +13,235 @@ // A pybind11 wrapper for set_cover_*. +#include +#include +#include #include +#include #include "absl/base/nullability.h" #include "ortools/algorithms/set_cover_heuristics.h" #include "ortools/algorithms/set_cover_invariant.h" #include "ortools/algorithms/set_cover_model.h" #include "ortools/algorithms/set_cover_reader.h" +#include "pybind11/numpy.h" #include "pybind11/pybind11.h" #include "pybind11/pytypes.h" #include "pybind11/stl.h" #include "pybind11_protobuf/native_proto_caster.h" +using ::operations_research::BaseInt; +using ::operations_research::ClearRandomSubsets; using ::operations_research::ElementDegreeSolutionGenerator; +using ::operations_research::ElementIndex; using ::operations_research::GreedySolutionGenerator; using ::operations_research::GuidedLocalSearch; +using ::operations_research::GuidedTabuSearch; using ::operations_research::Preprocessor; using ::operations_research::RandomSolutionGenerator; using ::operations_research::ReadBeasleySetCoverProblem; using ::operations_research::ReadRailSetCoverProblem; +using ::operations_research::SetCoverDecision; using ::operations_research::SetCoverInvariant; using ::operations_research::SetCoverModel; +using ::operations_research::SparseColumn; +using ::operations_research::SparseRow; using ::operations_research::SteepestSearch; +using ::operations_research::SubsetBoolVector; +using ::operations_research::SubsetCostVector; using ::operations_research::SubsetIndex; +using ::operations_research::TabuList; using ::operations_research::TrivialSolutionGenerator; namespace py = pybind11; using ::py::arg; +using ::py::make_iterator; -// General note about TODOs: the corresponding functions/classes/methods are -// more complex to wrap, as they use nonstandard types, and are less important, -// as they are not as useful to most users (mostly useful to write some custom -// Python heuristics). +std::vector VectorIntToVectorSubsetIndex( + const std::vector& ints) { + std::vector subs; + std::transform(ints.begin(), ints.end(), subs.begin(), + [](int subset) -> SubsetIndex { return SubsetIndex(subset); }); + return subs; +} + +SubsetCostVector VectorDoubleToSubsetCostVector( + const std::vector& doubles) { + SubsetCostVector costs(doubles.begin(), doubles.end()); + return costs; +} + +class IntIterator { + public: + using value_type = int; + using difference_type = std::ptrdiff_t; + using pointer = int*; + using reference = int&; + using iterator_category = std::input_iterator_tag; + + explicit IntIterator(int max_value) + : max_value_(max_value), current_value_(0) {} + + int operator*() const { return current_value_; } + IntIterator& operator++() { + ++current_value_; + return *this; + } + + static IntIterator begin(int max_value) { return IntIterator{max_value}; } + static IntIterator end(int max_value) { return {max_value, max_value}; } + + friend bool operator==(const IntIterator& lhs, const IntIterator& rhs) { + return lhs.max_value_ == rhs.max_value_ && + lhs.current_value_ == rhs.current_value_; + } + + private: + IntIterator(int max_value, int current_value) + : max_value_(max_value), current_value_(current_value) {} + + const int max_value_; + int current_value_; +}; PYBIND11_MODULE(set_cover, m) { pybind11_protobuf::ImportNativeProtoCasters(); // set_cover_model.h + py::class_(m, "SetCoverModelStats") + .def_readwrite("min", &SetCoverModel::Stats::min) + .def_readwrite("max", &SetCoverModel::Stats::max) + .def_readwrite("median", &SetCoverModel::Stats::median) + .def_readwrite("mean", &SetCoverModel::Stats::mean) + .def_readwrite("stddev", &SetCoverModel::Stats::stddev) + .def("debug_string", &SetCoverModel::Stats::DebugString); + py::class_(m, "SetCoverModel") .def(py::init<>()) .def_property_readonly("num_elements", &SetCoverModel::num_elements) .def_property_readonly("num_subsets", &SetCoverModel::num_subsets) .def_property_readonly("num_nonzeros", &SetCoverModel::num_nonzeros) .def_property_readonly("fill_rate", &SetCoverModel::FillRate) + .def_property_readonly( + "subset_costs", + [](SetCoverModel& model) -> const std::vector& { + return model.subset_costs().get(); + }) + .def("columns", + [](SetCoverModel& model) -> std::vector> { + // Due to the inner StrongVector, make a deep copy. Anyway, + // columns() returns a const ref, so this keeps the semantics, not + // the efficiency. + std::vector> columns; + std::transform( + model.columns().begin(), model.columns().end(), + columns.begin(), + [](const SparseColumn& column) -> std::vector { + std::vector col(column.size()); + std::transform(column.begin(), column.end(), col.begin(), + [](ElementIndex element) -> BaseInt { + return element.value(); + }); + return col; + }); + return columns; + }) + .def("rows", + [](SetCoverModel& model) -> std::vector> { + // Due to the inner StrongVector, make a deep copy. Anyway, + // rows() returns a const ref, so this keeps the semantics, not + // the efficiency. + std::vector> rows; + std::transform( + model.rows().begin(), model.rows().end(), rows.begin(), + [](const SparseRow& row) -> std::vector { + std::vector r(row.size()); + std::transform(row.begin(), row.end(), r.begin(), + [](SubsetIndex element) -> BaseInt { + return element.value(); + }); + return r; + }); + return rows; + }) + .def("row_view_is_valid", &SetCoverModel::row_view_is_valid) + .def("SubsetRange", + [](SetCoverModel& model) { + return make_iterator<>(IntIterator::begin(model.num_subsets()), + IntIterator::end(model.num_subsets())); + }) + .def("ElementRange", + [](SetCoverModel& model) { + return make_iterator<>(IntIterator::begin(model.num_elements()), + IntIterator::end(model.num_elements())); + }) + .def_property_readonly("all_subsets", + [](SetCoverModel& model) -> std::vector { + std::vector subsets; + std::transform( + model.all_subsets().begin(), + model.all_subsets().end(), subsets.begin(), + [](const SubsetIndex element) -> BaseInt { + return element.value(); + }); + return subsets; + }) .def("add_empty_subset", &SetCoverModel::AddEmptySubset, arg("cost")) .def( "add_element_to_last_subset", - [](SetCoverModel& model, int element) { + [](SetCoverModel& model, BaseInt element) { model.AddElementToLastSubset(element); }, arg("element")) .def( "set_subset_cost", - [](SetCoverModel& model, int subset, double cost) { + [](SetCoverModel& model, BaseInt subset, double cost) { model.SetSubsetCost(subset, cost); }, arg("subset"), arg("cost")) .def( "add_element_to_subset", - [](SetCoverModel& model, int element, int subset) { + [](SetCoverModel& model, BaseInt element, BaseInt subset) { model.AddElementToSubset(element, subset); }, arg("subset"), arg("cost")) + .def("create_sparse_row_view", &SetCoverModel::CreateSparseRowView) .def("compute_feasibility", &SetCoverModel::ComputeFeasibility) .def( "reserve_num_subsets", - [](SetCoverModel& model, int num_subsets) { + [](SetCoverModel& model, BaseInt num_subsets) { model.ReserveNumSubsets(num_subsets); }, arg("num_subsets")) .def( "reserve_num_elements_in_subset", - [](SetCoverModel& model, int num_elements, int subset) { + [](SetCoverModel& model, BaseInt num_elements, BaseInt subset) { model.ReserveNumElementsInSubset(num_elements, subset); }, arg("num_elements"), arg("subset")) .def("export_model_as_proto", &SetCoverModel::ExportModelAsProto) - .def("import_model_from_proto", &SetCoverModel::ImportModelFromProto); - // TODO(user): add support for subset_costs, columns, rows, - // row_view_is_valid, SubsetRange, ElementRange, all_subsets, - // CreateSparseRowView, ComputeCostStats, ComputeRowStats, - // ComputeColumnStats, ComputeRowDeciles, ComputeColumnDeciles. + .def("import_model_from_proto", &SetCoverModel::ImportModelFromProto) + .def("compute_cost_stats", &SetCoverModel::ComputeCostStats) + .def("compute_row_stats", &SetCoverModel::ComputeRowStats) + .def("compute_column_stats", &SetCoverModel::ComputeColumnStats) + .def("compute_row_deciles", &SetCoverModel::ComputeRowDeciles) + .def("compute_column_deciles", &SetCoverModel::ComputeRowDeciles); // TODO(user): wrap IntersectingSubsetsIterator. // set_cover_invariant.h + py::class_(m, "SetCoverDecision") + .def(py::init<>()) + .def(py::init([](BaseInt subset, bool value) -> SetCoverDecision* { + return new SetCoverDecision(SubsetIndex(subset), value); + }), + arg("subset"), arg("value")) + .def("subset", + [](const SetCoverDecision& decision) -> BaseInt { + return decision.subset().value(); + }) + .def("decision", &SetCoverDecision::decision); + py::class_(m, "SetCoverInvariant") .def(py::init()) .def("initialize", &SetCoverInvariant::Initialize) @@ -118,44 +262,90 @@ PYBIND11_MODULE(set_cover, m) { }) .def("cost", &SetCoverInvariant::cost) .def("num_uncovered_elements", &SetCoverInvariant::num_uncovered_elements) + .def("is_selected", + [](SetCoverInvariant& invariant) -> std::vector { + return invariant.is_selected().get(); + }) + .def("num_free_elements", + [](SetCoverInvariant& invariant) -> std::vector { + return invariant.num_free_elements().get(); + }) + .def("num_coverage_le_1_elements", + [](SetCoverInvariant& invariant) -> std::vector { + return invariant.num_coverage_le_1_elements().get(); + }) + .def("coverage", + [](SetCoverInvariant& invariant) -> std::vector { + return invariant.coverage().get(); + }) + .def( + "compute_coverage_in_focus", + [](SetCoverInvariant& invariant, + const std::vector& focus) -> std::vector { + return invariant + .ComputeCoverageInFocus(VectorIntToVectorSubsetIndex(focus)) + .get(); + }, + arg("focus")) + .def("is_redundant", + [](SetCoverInvariant& invariant) -> std::vector { + return invariant.is_redundant().get(); + }) + .def("trace", &SetCoverInvariant::trace) .def("clear_trace", &SetCoverInvariant::ClearTrace) .def("clear_removability_information", &SetCoverInvariant::ClearRemovabilityInformation) + .def("new_removable_subsets", &SetCoverInvariant::new_removable_subsets) + .def("new_non_removable_subsets", + &SetCoverInvariant::new_non_removable_subsets) .def("compress_trace", &SetCoverInvariant::CompressTrace) + .def("load_solution", + [](SetCoverInvariant& invariant, + const std::vector& solution) -> void { + SubsetBoolVector sol(solution.begin(), solution.end()); + return invariant.LoadSolution(sol); + }) .def("check_consistency", &SetCoverInvariant::CheckConsistency) + .def( + "compute_is_redundant", + [](SetCoverInvariant& invariant, BaseInt subset) -> bool { + return invariant.ComputeIsRedundant(SubsetIndex(subset)); + }, + arg("subset")) + .def("make_fully_updated", &SetCoverInvariant::MakeFullyUpdated) .def( "flip", - [](SetCoverInvariant& invariant, int subset) { + [](SetCoverInvariant& invariant, BaseInt subset) { invariant.Flip(SubsetIndex(subset)); }, arg("subset")) .def( "flip_and_fully_update", - [](SetCoverInvariant& invariant, int subset) { + [](SetCoverInvariant& invariant, BaseInt subset) { invariant.FlipAndFullyUpdate(SubsetIndex(subset)); }, arg("subset")) .def( "select", - [](SetCoverInvariant& invariant, int subset) { + [](SetCoverInvariant& invariant, BaseInt subset) { invariant.Select(SubsetIndex(subset)); }, arg("subset")) .def( "select_and_fully_update", - [](SetCoverInvariant& invariant, int subset) { + [](SetCoverInvariant& invariant, BaseInt subset) { invariant.SelectAndFullyUpdate(SubsetIndex(subset)); }, arg("subset")) .def( "deselect", - [](SetCoverInvariant& invariant, int subset) { + [](SetCoverInvariant& invariant, BaseInt subset) { invariant.Deselect(SubsetIndex(subset)); }, arg("subset")) .def( "deselect_and_fully_update", - [](SetCoverInvariant& invariant, int subset) { + [](SetCoverInvariant& invariant, BaseInt subset) { invariant.DeselectAndFullyUpdate(SubsetIndex(subset)); }, arg("subset")) @@ -163,10 +353,6 @@ PYBIND11_MODULE(set_cover, m) { &SetCoverInvariant::ExportSolutionAsProto) .def("import_solution_from_proto", &SetCoverInvariant::ImportSolutionFromProto); - // TODO(user): add support for is_selected, num_free_elements, - // num_coverage_le_1_elements, coverage, ComputeCoverageInFocus, - // is_redundant, trace, new_removable_subsets, new_non_removable_subsets, - // LoadSolution, ComputeIsRedundant. // set_cover_heuristics.h py::class_(m, "Preprocessor") @@ -175,30 +361,57 @@ PYBIND11_MODULE(set_cover, m) { [](Preprocessor& heuristic) -> bool { return heuristic.NextSolution(); }) + .def("next_solution", + [](Preprocessor& heuristic, + const std::vector& focus) -> bool { + return heuristic.NextSolution(VectorIntToVectorSubsetIndex(focus)); + }) .def("num_columns_fixed_by_singleton_row", &Preprocessor::num_columns_fixed_by_singleton_row); - // TODO(user): add support for focus argument. py::class_(m, "TrivialSolutionGenerator") .def(py::init()) - .def("next_solution", [](TrivialSolutionGenerator& heuristic) -> bool { - return heuristic.NextSolution(); - }); - // TODO(user): add support for focus argument. + .def("next_solution", + [](TrivialSolutionGenerator& heuristic) -> bool { + return heuristic.NextSolution(); + }) + .def("next_solution", + [](TrivialSolutionGenerator& heuristic, + const std::vector& focus) -> bool { + return heuristic.NextSolution(VectorIntToVectorSubsetIndex(focus)); + }); py::class_(m, "RandomSolutionGenerator") .def(py::init()) - .def("next_solution", [](RandomSolutionGenerator& heuristic) -> bool { - return heuristic.NextSolution(); - }); - // TODO(user): add support for focus argument. + .def("next_solution", + [](RandomSolutionGenerator& heuristic) -> bool { + return heuristic.NextSolution(); + }) + .def("next_solution", + [](RandomSolutionGenerator& heuristic, + const std::vector& focus) -> bool { + return heuristic.NextSolution(VectorIntToVectorSubsetIndex(focus)); + }); py::class_(m, "GreedySolutionGenerator") .def(py::init()) - .def("next_solution", [](GreedySolutionGenerator& heuristic) -> bool { - return heuristic.NextSolution(); - }); - // TODO(user): add support for focus and cost arguments. + .def("next_solution", + [](GreedySolutionGenerator& heuristic) -> bool { + return heuristic.NextSolution(); + }) + .def("next_solution", + [](GreedySolutionGenerator& heuristic, + const std::vector& focus) -> bool { + return heuristic.NextSolution(VectorIntToVectorSubsetIndex(focus)); + }) + .def("next_solution", + [](GreedySolutionGenerator& heuristic, + const std::vector& focus, + const std::vector& costs) -> bool { + return heuristic.NextSolution( + VectorIntToVectorSubsetIndex(focus), + VectorDoubleToSubsetCostVector(costs)); + }); py::class_(m, "ElementDegreeSolutionGenerator") @@ -206,16 +419,40 @@ PYBIND11_MODULE(set_cover, m) { .def("next_solution", [](ElementDegreeSolutionGenerator& heuristic) -> bool { return heuristic.NextSolution(); + }) + .def("next_solution", + [](ElementDegreeSolutionGenerator& heuristic, + const std::vector& focus) -> bool { + return heuristic.NextSolution(VectorIntToVectorSubsetIndex(focus)); + }) + .def("next_solution", + [](ElementDegreeSolutionGenerator& heuristic, + const std::vector& focus, + const std::vector& costs) -> bool { + return heuristic.NextSolution( + VectorIntToVectorSubsetIndex(focus), + VectorDoubleToSubsetCostVector(costs)); }); - // TODO(user): add support for focus and cost arguments. py::class_(m, "SteepestSearch") .def(py::init()) .def("next_solution", [](SteepestSearch& heuristic, int num_iterations) -> bool { return heuristic.NextSolution(num_iterations); + }) + .def("next_solution", + [](SteepestSearch& heuristic, const std::vector& focus, + int num_iterations) -> bool { + return heuristic.NextSolution(VectorIntToVectorSubsetIndex(focus), + num_iterations); + }) + .def("next_solution", + [](SteepestSearch& heuristic, const std::vector& focus, + const std::vector& costs, int num_iterations) -> bool { + return heuristic.NextSolution( + VectorIntToVectorSubsetIndex(focus), + VectorDoubleToSubsetCostVector(costs), num_iterations); }); - // TODO(user): add support for focus and cost arguments. py::class_(m, "GuidedLocalSearch") .def(py::init()) @@ -223,12 +460,92 @@ PYBIND11_MODULE(set_cover, m) { .def("next_solution", [](GuidedLocalSearch& heuristic, int num_iterations) -> bool { return heuristic.NextSolution(num_iterations); + }) + .def("next_solution", + [](GuidedLocalSearch& heuristic, const std::vector& focus, + int num_iterations) -> bool { + return heuristic.NextSolution(VectorIntToVectorSubsetIndex(focus), + num_iterations); }); - // TODO(user): add support for focus and cost arguments. - // TODO(user): add support for ClearRandomSubsets, ClearRandomSubsets, - // ClearMostCoveredElements, ClearMostCoveredElements, TabuList, - // GuidedTabuSearch. + // Specialization for T = SubsetIndex ~= BaseInt (aka int for Python, whatever + // the size of BaseInt). + // A base type doesn't work, because TabuList uses `T::value` in the + // constructor. + py::class_>(m, "TabuList") + .def(py::init([](int size) -> TabuList* { + return new TabuList(SubsetIndex(size)); + }), + arg("size")) + .def("size", &TabuList::size) + .def("init", &TabuList::Init, arg("size")) + .def( + "add", + [](TabuList& list, BaseInt t) -> void { + return list.Add(SubsetIndex(t)); + }, + arg("t")) + .def( + "contains", + [](TabuList& list, BaseInt t) -> bool { + return list.Contains(SubsetIndex(t)); + }, + arg("t")); + + py::class_(m, "GuidedTabuSearch") + .def(py::init()) + .def("initialize", &GuidedTabuSearch::Initialize) + .def("next_solution", + [](GuidedTabuSearch& heuristic, int num_iterations) -> bool { + return heuristic.NextSolution(num_iterations); + }) + .def("next_solution", + [](GuidedTabuSearch& heuristic, const std::vector& focus, + int num_iterations) -> bool { + return heuristic.NextSolution(VectorIntToVectorSubsetIndex(focus), + num_iterations); + }) + .def("get_lagrangian_factor", &GuidedTabuSearch::SetLagrangianFactor, + arg("factor")) + .def("set_lagrangian_factor", &GuidedTabuSearch::GetLagrangianFactor) + .def("set_epsilon", &GuidedTabuSearch::SetEpsilon, arg("r")) + .def("get_epsilon", &GuidedTabuSearch::GetEpsilon) + .def("set_penalty_factor", &GuidedTabuSearch::SetPenaltyFactor, + arg("factor")) + .def("get_penalty_factor", &GuidedTabuSearch::GetPenaltyFactor) + .def("set_tabu_list_size", &GuidedTabuSearch::SetTabuListSize, + arg("size")) + .def("get_tabu_list_size", &GuidedTabuSearch::GetTabuListSize); + + m.def( + "clear_random_subsets", + [](BaseInt num_subsets, SetCoverInvariant* inv) -> std::vector { + const std::vector cleared = + ClearRandomSubsets(num_subsets, inv); + return {cleared.begin(), cleared.end()}; + }); + m.def("clear_random_subsets", + [](const std::vector& focus, BaseInt num_subsets, + SetCoverInvariant* inv) -> std::vector { + const std::vector cleared = ClearRandomSubsets( + VectorIntToVectorSubsetIndex(focus), num_subsets, inv); + return {cleared.begin(), cleared.end()}; + }); + + m.def( + "clear_most_covered_elements", + [](BaseInt num_subsets, SetCoverInvariant* inv) -> std::vector { + const std::vector cleared = + ClearMostCoveredElements(num_subsets, inv); + return {cleared.begin(), cleared.end()}; + }); + m.def("clear_most_covered_elements", + [](const std::vector& focus, BaseInt num_subsets, + SetCoverInvariant* inv) -> std::vector { + const std::vector cleared = ClearMostCoveredElements( + VectorIntToVectorSubsetIndex(focus), num_subsets, inv); + return {cleared.begin(), cleared.end()}; + }); // set_cover_reader.h m.def("read_beasly_set_cover_problem", &ReadBeasleySetCoverProblem); diff --git a/ortools/algorithms/samples/set_cover.cc b/ortools/algorithms/samples/set_cover.cc new file mode 100644 index 0000000000..c9feae012d --- /dev/null +++ b/ortools/algorithms/samples/set_cover.cc @@ -0,0 +1,65 @@ +// Copyright 2010-2024 Google LLC +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// [START program] +// [START import] +#include + +#include "ortools/algorithms/set_cover_heuristics.h" +#include "ortools/algorithms/set_cover_invariant.h" +#include "ortools/algorithms/set_cover_model.h" +#include "ortools/base/logging.h" +// [END import] + +namespace operations_research { + +void SimpleSetCoverProgram() { + // [START data] + SetCoverModel model; + model.AddEmptySubset(2.0); + model.AddElementToLastSubset(0); + model.AddEmptySubset(2.0); + model.AddElementToLastSubset(1); + model.AddEmptySubset(1.0); + model.AddElementToLastSubset(0); + model.AddElementToLastSubset(1); + // [END data] + + // [START solve] + SetCoverInvariant inv(&model); + GreedySolutionGenerator greedy(&inv); + bool found_solution = greedy.NextSolution(); + if (!found_solution) { + LOG(INFO) << "No solution found by the greedy heuristic."; + return; + } + SetCoverSolutionResponse solution = inv.ExportSolutionAsProto(); + // [END solve] + + // [START print_solution] + LOG(INFO) << "Total cost: " << solution.cost(); // == inv.cost() + LOG(INFO) << "Total number of selected subsets: " << solution.num_subsets(); + LOG(INFO) << "Chosen subsets:"; + for (int i = 0; i < solution.subset_size(); ++i) { + LOG(INFO) << " " << solution.subset(i); + } + // [END print_solution] +} + +} // namespace operations_research + +int main() { + operations_research::SimpleSetCoverProgram(); + return EXIT_SUCCESS; +} +// [END program] diff --git a/ortools/algorithms/samples/set_cover.py b/ortools/algorithms/samples/set_cover.py new file mode 100755 index 0000000000..aab98c6b0f --- /dev/null +++ b/ortools/algorithms/samples/set_cover.py @@ -0,0 +1,56 @@ +#!/usr/bin/env python3 +# Copyright 2010-2024 Google LLC +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""A simple set-covering problem.""" + +# [START program] +# [START import] +from ortools.algorithms.python import set_cover +# [END import] + + +def main(): + # [START data] + model = set_cover.SetCoverModel() + model.add_empty_subset(2.0) + model.add_element_to_last_subset(0) + model.add_empty_subset(2.0) + model.add_element_to_last_subset(1) + model.add_empty_subset(1.0) + model.add_element_to_last_subset(0) + model.add_element_to_last_subset(1) + # [END data] + + # [START solve] + inv = set_cover.SetCoverInvariant(model) + greedy = set_cover.GreedySolutionGenerator(inv) + has_found = greedy.next_solution() + if not has_found: + print("No solution found by the greedy heuristic.") + return + solution = inv.export_solution_as_proto() + # [END solve] + + # [START print_solution] + print(f"Total cost: {solution.cost}") # == inv.cost() + print(f"Total number of selected subsets: {solution.num_subsets}") + print("Chosen subsets:") + for subset in solution.subset: + print(f" {subset}") + # [END print_solution] + + +if __name__ == "__main__": + main() +# [END program] diff --git a/ortools/algorithms/samples/simple_knapsack_program.cc b/ortools/algorithms/samples/simple_knapsack_program.cc index 64a80d3a46..d9fe9e7091 100644 --- a/ortools/algorithms/samples/simple_knapsack_program.cc +++ b/ortools/algorithms/samples/simple_knapsack_program.cc @@ -15,12 +15,14 @@ // [START import] #include #include +#include #include #include #include #include #include "ortools/algorithms/knapsack_solver.h" +#include "ortools/base/logging.h" // [END import] namespace operations_research { diff --git a/ortools/algorithms/set_cover_heuristics.cc b/ortools/algorithms/set_cover_heuristics.cc index 63c7fe3066..4208c90e17 100644 --- a/ortools/algorithms/set_cover_heuristics.cc +++ b/ortools/algorithms/set_cover_heuristics.cc @@ -544,23 +544,23 @@ bool GuidedLocalSearch::NextSolution(absl::Span focus, } namespace { -void SampleSubsets(std::vector* list, std::size_t num_subsets) { - num_subsets = std::min(num_subsets, list->size()); +void SampleSubsets(std::vector* list, BaseInt num_subsets) { + num_subsets = std::min(num_subsets, static_cast(list->size())); CHECK_GE(num_subsets, 0); std::shuffle(list->begin(), list->end(), absl::BitGen()); list->resize(num_subsets); } } // namespace -std::vector ClearRandomSubsets(std::size_t num_subsets, +std::vector ClearRandomSubsets(BaseInt num_subsets, SetCoverInvariant* inv) { return ClearRandomSubsets(inv->model()->all_subsets(), num_subsets, inv); } std::vector ClearRandomSubsets(absl::Span focus, - std::size_t num_subsets, + BaseInt num_subsets, SetCoverInvariant* inv) { - num_subsets = std::min(num_subsets, focus.size()); + num_subsets = std::min(num_subsets, static_cast(focus.size())); CHECK_GE(num_subsets, 0); std::vector chosen_indices; for (const SubsetIndex subset : focus) { @@ -569,7 +569,7 @@ std::vector ClearRandomSubsets(absl::Span focus, } } SampleSubsets(&chosen_indices, num_subsets); - std::size_t num_deselected = 0; + BaseInt num_deselected = 0; for (const SubsetIndex subset : chosen_indices) { inv->Deselect(subset); ++num_deselected; @@ -585,14 +585,14 @@ std::vector ClearRandomSubsets(absl::Span focus, return chosen_indices; } -std::vector ClearMostCoveredElements(std::size_t max_num_subsets, +std::vector ClearMostCoveredElements(BaseInt max_num_subsets, SetCoverInvariant* inv) { return ClearMostCoveredElements(inv->model()->all_subsets(), max_num_subsets, inv); } std::vector ClearMostCoveredElements( - absl::Span focus, std::size_t max_num_subsets, + absl::Span focus, BaseInt max_num_subsets, SetCoverInvariant* inv) { // This is the vector we will return. std::vector sampled_subsets; @@ -625,7 +625,8 @@ std::vector ClearMostCoveredElements( // Actually *sample* sampled_subset. // TODO(user): find another algorithm if necessary. std::shuffle(sampled_subsets.begin(), sampled_subsets.end(), absl::BitGen()); - sampled_subsets.resize(std::min(sampled_subsets.size(), max_num_subsets)); + sampled_subsets.resize( + std::min(static_cast(sampled_subsets.size()), max_num_subsets)); // Testing has shown that sorting sampled_subsets is not necessary. // Now, un-select the subset in sampled_subsets. diff --git a/ortools/algorithms/set_cover_heuristics.h b/ortools/algorithms/set_cover_heuristics.h index 04d3b31b9f..154724cf61 100644 --- a/ortools/algorithms/set_cover_heuristics.h +++ b/ortools/algorithms/set_cover_heuristics.h @@ -477,12 +477,12 @@ class GuidedLocalSearch { // solution. There can be more than num_subsets variables cleared because the // intersecting subsets are also removed from the solution. Returns a list of // subset indices that can be reused as a focus. -std::vector ClearRandomSubsets(std::size_t num_subsets, +std::vector ClearRandomSubsets(BaseInt num_subsets, SetCoverInvariant* inv); // Same as above, but clears the subset indices in focus. std::vector ClearRandomSubsets(absl::Span focus, - std::size_t num_subsets, + BaseInt num_subsets, SetCoverInvariant* inv); // Clears the variables (subsets) that cover the most covered elements. This is @@ -490,12 +490,12 @@ std::vector ClearRandomSubsets(absl::Span focus, // randomly. // Returns the list of the chosen subset indices. // This indices can then be used ax a focus. -std::vector ClearMostCoveredElements(std::size_t num_subsets, +std::vector ClearMostCoveredElements(BaseInt num_subsets, SetCoverInvariant* inv); // Same as above, but clears the subset indices in focus. std::vector ClearMostCoveredElements( - absl::Span focus, std::size_t num_subsets, + absl::Span focus, BaseInt num_subsets, SetCoverInvariant* inv); } // namespace operations_research diff --git a/ortools/algorithms/set_cover_lagrangian.cc b/ortools/algorithms/set_cover_lagrangian.cc index 18dea932df..f20a98a051 100644 --- a/ortools/algorithms/set_cover_lagrangian.cc +++ b/ortools/algorithms/set_cover_lagrangian.cc @@ -20,6 +20,7 @@ #include #include "absl/log/check.h" +#include "absl/synchronization/blocking_counter.h" #include "ortools/algorithms/adjustable_k_ary_heap.h" #include "ortools/algorithms/set_cover_invariant.h" #include "ortools/algorithms/set_cover_model.h" @@ -74,7 +75,7 @@ namespace { // TODO(user): Investigate. Cost ScalarProduct(const SparseColumn& column, const ElementCostVector& dual) { Cost result = 0.0; - for (ColumnEntryIndex pos(0); pos.value() < column.size(); ++pos) { + for (const ColumnEntryIndex pos : column.index_range()) { result += dual[column[pos]]; } return result; @@ -82,52 +83,49 @@ Cost ScalarProduct(const SparseColumn& column, const ElementCostVector& dual) { // Computes the reduced costs for a subset of subsets. // This is a helper function for ParallelComputeReducedCosts(). -// It is called on a slice of subsets, defined by start and end. +// It is called on a slice of subsets, defined by slice_start and slice_end. // The reduced costs are computed using the multipliers vector. // The columns of the subsets are given by the columns view. // The result is stored in reduced_costs. -void FillReducedCostsSlice(SubsetIndex start, SubsetIndex end, +void FillReducedCostsSlice(SubsetIndex slice_start, SubsetIndex slice_end, const SubsetCostVector& costs, const ElementCostVector& multipliers, const SparseColumnView& columns, SubsetCostVector* reduced_costs) { - for (SubsetIndex subset = start; subset < end; ++subset) { + for (SubsetIndex subset = slice_start; subset < slice_end; ++subset) { (*reduced_costs)[subset] = costs[subset] - ScalarProduct(columns[subset], multipliers); } } + +BaseInt BlockSize(BaseInt size, int num_threads) { + return 1 + (size - 1) / num_threads; +} } // namespace // Computes the reduced costs for all subsets in parallel using ThreadPool. SubsetCostVector SetCoverLagrangian::ParallelComputeReducedCosts( const SubsetCostVector& costs, const ElementCostVector& multipliers) const { const SubsetIndex num_subsets(model_.num_subsets()); - // TODO(user): compute a close-to-optimal k-subset partitioning. - const SubsetIndex block_size = - SubsetIndex(1) + num_subsets / num_threads_; // [***] Arbitrary choice. const SparseColumnView& columns = model_.columns(); SubsetCostVector reduced_costs(num_subsets); - ThreadPool thread_pool("ParallelComputeReducedCosts", num_threads_); - thread_pool.StartWorkers(); - { - // TODO(user): check how costly it is to create a new ThreadPool. - // TODO(user): use a queue of subsets to process? instead of a fixed range. - - // This parallelization is not very efficient, because all the threads - // use the same costs vector. Maybe it should be local to the thread. - // It's unclear whether sharing columns and costs is better than having - // each thread use its own partial copy. - // Finally, it might be better to use a queue of subsets to process, instead - // of a fixed range. - for (SubsetIndex start(0); start < num_subsets; start += block_size) { - thread_pool.Schedule([start, block_size, num_subsets, &costs, - &multipliers, &columns, &reduced_costs]() { - const SubsetIndex end = std::min(start + block_size, num_subsets); - FillReducedCostsSlice(start, end, costs, multipliers, columns, - &reduced_costs); - }); - } - } // Synchronize all the threads. This is equivalent to a wait. + // TODO(user): compute a close-to-optimal k-subset partitioning of the columns + // based on their sizes. [***] + const SubsetIndex block_size(BlockSize(num_subsets.value(), num_threads_)); + absl::BlockingCounter num_threads_running(num_threads_); + SubsetIndex slice_start(0); + for (int thread_index = 0; thread_index < num_threads_; ++thread_index) { + const SubsetIndex slice_end = + std::min(slice_start + block_size, num_subsets); + thread_pool_->Schedule([&num_threads_running, slice_start, slice_end, + &costs, &multipliers, &columns, &reduced_costs]() { + FillReducedCostsSlice(slice_start, slice_end, costs, multipliers, columns, + &reduced_costs); + num_threads_running.DecrementCount(); + }); + slice_start = slice_end; + } + num_threads_running.Wait(); return reduced_costs; } @@ -147,14 +145,14 @@ SubsetCostVector SetCoverLagrangian::ComputeReducedCosts( namespace { // Helper function to compute the subgradient. -// It fills a slice of the subgradient vector from indices start to end. -// This is a helper function for ParallelComputeSubgradient(). -// The subgradient is computed using the reduced costs vector. -void FillSubgradientSlice(SubsetIndex start, SubsetIndex end, +// It fills a slice of the subgradient vector from indices slice_start to +// slice_end. This is a helper function for ParallelComputeSubgradient(). The +// subgradient is computed using the reduced costs vector. +void FillSubgradientSlice(SubsetIndex slice_start, SubsetIndex slice_end, const SparseColumnView& columns, const SubsetCostVector& reduced_costs, ElementCostVector* subgradient) { - for (SubsetIndex subset(start); subset < end; ++subset) { + for (SubsetIndex subset(slice_start); subset < slice_end; ++subset) { if (reduced_costs[subset] < 0.0) { for (const ElementIndex element : columns[subset]) { (*subgradient)[element] -= 1.0; @@ -181,8 +179,6 @@ ElementCostVector SetCoverLagrangian::ComputeSubgradient( ElementCostVector SetCoverLagrangian::ParallelComputeSubgradient( const SubsetCostVector& reduced_costs) const { const SubsetIndex num_subsets(model_.num_subsets()); - const SubsetIndex block_size = - SubsetIndex(1) + num_subsets / num_threads_; // [***] const SparseColumnView& columns = model_.columns(); ElementCostVector subgradient(model_.num_elements(), 1.0); // The subgradient has one component per element, each thread processes @@ -191,20 +187,22 @@ ElementCostVector SetCoverLagrangian::ParallelComputeSubgradient( // although this might be less well-balanced. std::vector subgradients( num_threads_, ElementCostVector(model_.num_elements())); - ThreadPool thread_pool("ParallelComputeSubgradient", num_threads_); - thread_pool.StartWorkers(); - { - int thread_index = 0; - for (SubsetIndex start(0); start < num_subsets; - start += block_size, ++thread_index) { - thread_pool.Schedule([start, block_size, num_subsets, &reduced_costs, - &columns, &subgradients, thread_index]() { - const SubsetIndex end = std::min(start + block_size, num_subsets); - FillSubgradientSlice(start, end, columns, reduced_costs, - &subgradients[thread_index]); - }); - } - } // Synchronize all the threads. + absl::BlockingCounter num_threads_running(num_threads_); + const SubsetIndex block_size(BlockSize(num_subsets.value(), num_threads_)); + SubsetIndex slice_start(0); + for (int thread_index = 0; thread_index < num_threads_; ++thread_index) { + const SubsetIndex slice_end = + std::min(slice_start + block_size, num_subsets); + thread_pool_->Schedule([&num_threads_running, slice_start, slice_end, + &reduced_costs, &columns, &subgradients, + thread_index]() { + FillSubgradientSlice(slice_start, slice_end, columns, reduced_costs, + &subgradients[thread_index]); + num_threads_running.DecrementCount(); + }); + slice_start = slice_end; + } + num_threads_running.Wait(); for (int thread_index = 0; thread_index < num_threads_; ++thread_index) { for (const ElementIndex element : model_.ElementRange()) { subgradient[element] += subgradients[thread_index][element]; @@ -216,17 +214,17 @@ ElementCostVector SetCoverLagrangian::ParallelComputeSubgradient( namespace { // Helper function to compute the value of the Lagrangian. // This is a helper function for ParallelComputeLagrangianValue(). -// It is called on a slice of elements, defined by start and end. +// It is called on a slice of elements, defined by slice_start and slice_end. // The value of the Lagrangian is computed using the reduced costs vector and // the multipliers vector. // The result is stored in lagrangian_value. -void FillLagrangianValueSlice(SubsetIndex start, SubsetIndex end, +void FillLagrangianValueSlice(SubsetIndex slice_start, SubsetIndex slice_end, const SubsetCostVector& reduced_costs, Cost* lagrangian_value) { - // This is min \sum_{j \in N} c_j(u) x_j. This captures the remark above (**), - // taking into account the possible values for x_j, and using them to minimize - // the terms. - for (SubsetIndex subset(start); subset < end; ++subset) { + // This is min \sum_{j \in N} c_j(u) x_j. This captures the remark above + // (**), taking into account the possible values for x_j, and using them to + // minimize the terms. + for (SubsetIndex subset(slice_start); subset < slice_end; ++subset) { if (reduced_costs[subset] < 0.0) { *lagrangian_value += reduced_costs[subset]; } @@ -258,30 +256,31 @@ Cost SetCoverLagrangian::ComputeLagrangianValue( Cost SetCoverLagrangian::ParallelComputeLagrangianValue( const SubsetCostVector& reduced_costs, const ElementCostVector& multipliers) const { - const SubsetIndex num_subsets(model_.num_subsets()); - const SubsetIndex block_size = - SubsetIndex(1) + num_subsets / num_threads_; // [***] Arbitrary. Cost lagrangian_value = 0.0; // This is \sum{i \in M} u_i. - for (const Cost u : multipliers) { lagrangian_value += u; } std::vector lagrangian_values(num_threads_, 0.0); - ThreadPool thread_pool("ParallelComputeLagrangianValue", num_threads_); - thread_pool.StartWorkers(); - { - int thread_index = 0; - for (SubsetIndex start(0); start < num_subsets; start += block_size) { - thread_pool.Schedule([start, block_size, num_subsets, thread_index, - &reduced_costs, &lagrangian_values]() { - const SubsetIndex end = std::min(start + block_size, num_subsets); - FillLagrangianValueSlice(start, end, reduced_costs, - &lagrangian_values[thread_index]); - }); - ++thread_index; - } - } // Synchronize all the threads. + absl::BlockingCounter num_threads_running(num_threads_); + const SubsetIndex block_size(BlockSize(model_.num_subsets(), num_threads_)); + const SubsetIndex num_subsets(model_.num_subsets()); + SubsetIndex slice_start(0); + for (int thread_index = 0; thread_index < num_threads_; ++thread_index) { + const SubsetIndex slice_end = + std::min(slice_start + block_size, num_subsets); + thread_pool_->Schedule([&num_threads_running, slice_start, block_size, + num_subsets, thread_index, &reduced_costs, + &lagrangian_values]() { + const SubsetIndex slice_end = + std::min(slice_start + block_size, num_subsets); + FillLagrangianValueSlice(slice_start, slice_end, reduced_costs, + &lagrangian_values[thread_index]); + num_threads_running.DecrementCount(); + }); + slice_start = slice_end; + } + num_threads_running.Wait(); for (const Cost l : lagrangian_values) { lagrangian_value += l; } @@ -290,8 +289,8 @@ Cost SetCoverLagrangian::ParallelComputeLagrangianValue( // Perform a subgradient step. // In the general case, for an Integer Program A.x <=b, the Lagragian -// multipliers vector at step k+1 is defined as: u^{k+1} = u^k + t_k (A x^k - b) -// with term t_k = lambda_k * (UB - L(u^k)) / |A x^k - b|^2. +// multipliers vector at step k+1 is defined as: u^{k+1} = u^k + t_k (A x^k - +// b) with term t_k = lambda_k * (UB - L(u^k)) / |A x^k - b|^2. // |.| is the 2-norm (i.e. Euclidean) // In our case, the problem A x <= b is in the form A x >= 1. We need to // replace A x - b by s_i(u) = 1 - sum_{j \in J_i} x_j(u). @@ -343,9 +342,9 @@ void SetCoverLagrangian::ParallelUpdateMultipliers( step_size * (upper_bound - lagrangian_value) / subgradient_square_norm; for (const ElementIndex element : model_.ElementRange()) { // Avoid multipliers to go negative and to go through the roof. 1e6 chosen - // arbitrarily. [***] + const Cost kRoof = 1e6; // Arbitrary value, from [1]. (*multipliers)[element] = std::clamp( - (*multipliers)[element] + factor * subgradient[element], 0.0, 1e6); + (*multipliers)[element] + factor * subgradient[element], 0.0, kRoof); } } @@ -503,9 +502,9 @@ SetCoverLagrangian::ComputeLowerBound(const SubsetCostVector& costs, for (int iter = 0; iter < 1000; ++iter) { reduced_costs = ParallelComputeReducedCosts(costs, multipliers); const Cost lagrangian_value = - ComputeLagrangianValue(reduced_costs, multipliers); - UpdateMultipliers(step_size, lagrangian_value, upper_bound, reduced_costs, - &multipliers); + ParallelComputeLagrangianValue(reduced_costs, multipliers); + ParallelUpdateMultipliers(step_size, lagrangian_value, upper_bound, + reduced_costs, &multipliers); lower_bound = std::max(lower_bound, lagrangian_value); // step_size should be updated like this. For the time besing, we keep the // step size, because the implementation of the rest is not adequate yet diff --git a/ortools/algorithms/set_cover_lagrangian.h b/ortools/algorithms/set_cover_lagrangian.h index aa63627ad1..9e946b173a 100644 --- a/ortools/algorithms/set_cover_lagrangian.h +++ b/ortools/algorithms/set_cover_lagrangian.h @@ -15,6 +15,7 @@ #define OR_TOOLS_ALGORITHMS_SET_COVER_LAGRANGIAN_H_ #include +#include #include #include @@ -44,7 +45,12 @@ namespace operations_research { class SetCoverLagrangian { public: explicit SetCoverLagrangian(SetCoverInvariant* inv, int num_threads = 1) - : inv_(inv), model_(*inv->model()), num_threads_(num_threads) {} + : inv_(inv), + model_(*inv->model()), + num_threads_(num_threads), + thread_pool_(new ThreadPool(num_threads)) { + thread_pool_->StartWorkers(); + } // Returns true if a solution was found. // TODO(user): Add time-outs and exit with a partial solution. This seems @@ -137,6 +143,9 @@ class SetCoverLagrangian { // The number of threads to use for parallelization. int num_threads_; + // The thread pool used for parallelization. + std::unique_ptr thread_pool_; + // Total (scalar) Lagrangian cost. Cost lagrangian_; diff --git a/ortools/algorithms/set_cover_model.cc b/ortools/algorithms/set_cover_model.cc index f7b5faf24c..e67718b197 100644 --- a/ortools/algorithms/set_cover_model.cc +++ b/ortools/algorithms/set_cover_model.cc @@ -17,17 +17,161 @@ #include #include #include +#include #include #include +#include #include #include #include "absl/log/check.h" +#include "absl/random/discrete_distribution.h" +#include "absl/random/distributions.h" +#include "absl/random/random.h" #include "ortools/algorithms/set_cover.pb.h" #include "ortools/base/logging.h" namespace operations_research { +namespace { + +// Returns a value in [min, min + scaling_factor * (raw_value - min + +// random_term)], where raw_value is drawn from a discrete distribution, and +// random_term is a double drawn uniformly in [0, 1]. +BaseInt DiscreteAffine(absl::BitGen& bitgen, + absl::discrete_distribution& dist, BaseInt min, + double scaling_factor) { + const BaseInt raw_value = dist(bitgen); + const double random_term = absl::Uniform(bitgen, 0, 1.0); + const BaseInt affine_value = + static_cast( + std::floor((raw_value - min + random_term) * scaling_factor)) + + min; + return affine_value; +} + +// For a given view (SparseColumnView or SparseRowView), returns the +// distribution of the sizes of the vectors in the view, which can be used in +// an absl::discrete_distribution. +template +std::tuple> ComputeSizeHistogram( + const View& view) { + BaseInt max_size = 0; + BaseInt min_size = std::numeric_limits::max(); + for (const auto& vec : view) { + const BaseInt size = vec.size(); + min_size = std::min(min_size, size); + max_size = std::max(max_size, size); + } + std::vector weights(max_size + 1, 0); + for (const auto& vec : view) { + const BaseInt size = vec.size(); + ++weights[size]; + } + return {min_size, weights}; +} + +template +std::tuple> +ComputeSizeDistribution(const View& view) { + const auto [min_size, weights] = ComputeSizeHistogram(view); + absl::discrete_distribution dist(weights.begin(), weights.end()); + return {min_size, dist}; +} +} // namespace + +SetCoverModel SetCoverModel::GenerateRandomModelFrom( + const SetCoverModel& seed_model, BaseInt num_elements, BaseInt num_subsets, + double row_scale, double column_scale, double cost_scale) { + SetCoverModel model; + DCHECK_GT(row_scale, 0.0); + DCHECK_GT(column_scale, 0.0); + DCHECK_GT(cost_scale, 0.0); + model.num_elements_ = num_elements; + model.num_nonzeros_ = 0; + model.ReserveNumSubsets(num_subsets); + model.UpdateAllSubsetsList(); + absl::BitGen bitgen; + + // Create the distribution of the cardinalities of the subsets based on the + // histogram of column sizes in the seed model. + auto [min_column_size, column_dist] = + ComputeSizeDistribution(seed_model.columns()); + + // Create the distribution of the degrees of the elements based on the + // histogram of row sizes in the seed model. + auto [min_row_size, row_dist] = ComputeSizeDistribution(seed_model.rows()); + + // Prepare the degrees of the elements in the generated model, and use them + // in a distribution to generate the columns. This ponderates the columns + // towards the elements with higher degrees. ??? + ElementToIntVector degrees(num_elements); + for (ElementIndex element(0); element.value() < num_elements; ++element) { + degrees[element] = + DiscreteAffine(bitgen, row_dist, min_row_size, row_scale); + } + absl::discrete_distribution degree_dist(degrees.begin(), + degrees.end()); + + // Vector indicating whether the generated model covers an element. + ElementBoolVector contains_element(num_elements, false); + + // Number of elements in the generated model, using the above vector. + BaseInt num_elements_covered(0); + + // Loop-local vector indicating whether the currently generated subset + // contains an element. + ElementBoolVector subset_contains_element(num_elements, false); + + for (SubsetIndex subset(0); subset.value() < num_subsets; ++subset) { + const BaseInt cardinality = + DiscreteAffine(bitgen, column_dist, min_column_size, column_scale); + model.columns_[subset].reserve(cardinality); + for (BaseInt iter = 0; iter < cardinality; ++iter) { + int num_tries = 0; + ElementIndex element; + // Choose an element that is not yet in the subset at random with a + // distribution that is proportional to the degree of the element. + do { + element = ElementIndex(degree_dist(bitgen)); + CHECK_LT(element.value(), num_elements); + ++num_tries; + if (num_tries > 10) { + return SetCoverModel(); + } + } while (subset_contains_element[element]); + ++model.num_nonzeros_; + model.columns_[subset].push_back(element); + subset_contains_element[element] = true; + if (!contains_element[element]) { + contains_element[element] = true; + ++num_elements_covered; + } + } + for (const ElementIndex element : model.columns_[subset]) { + subset_contains_element[element] = false; + } + } + CHECK_EQ(num_elements_covered, num_elements); + + // TODO(user): if necessary, use a better distribution for the costs. + // The generation of the costs is done in two steps. First, compute the + // minimum and maximum costs. + Cost min_cost = std::numeric_limits::infinity(); + Cost max_cost = -min_cost; + for (const Cost cost : seed_model.subset_costs()) { + min_cost = std::min(min_cost, cost); + max_cost = std::max(max_cost, cost); + } + // Then, generate random numbers in [min_cost, min_cost + cost_range], where + // cost_range is defined as: + const Cost cost_range = cost_scale * (max_cost - min_cost); + for (Cost& cost : model.subset_costs_) { + cost = min_cost + absl::Uniform(bitgen, 0, cost_range); + } + return model; +} + void SetCoverModel::UpdateAllSubsetsList() { const BaseInt old_size = all_subsets_.size(); DCHECK_LE(old_size, num_subsets()); @@ -92,8 +236,8 @@ void SetCoverModel::AddElementToSubset(ElementIndex element, } // Reserves num_subsets columns in the model. -void SetCoverModel::ReserveNumSubsets(BaseInt number_of_subsets) { - num_subsets_ = std::max(num_subsets_, number_of_subsets); +void SetCoverModel::ReserveNumSubsets(BaseInt num_subsets) { + num_subsets_ = std::max(num_subsets_, num_subsets); columns_.resize(num_subsets_, SparseColumn()); subset_costs_.resize(num_subsets_, 0.0); } @@ -121,8 +265,8 @@ void SetCoverModel::CreateSparseRowView() { rows_.resize(num_elements_, SparseRow()); ElementToIntVector row_sizes(num_elements_, 0); for (const SubsetIndex subset : SubsetRange()) { - // Sort the columns. It's not super-critical to improve performance here as - // this needs to be done only once. + // Sort the columns. It's not super-critical to improve performance here + // as this needs to be done only once. std::sort(columns_[subset].begin(), columns_[subset].end()); for (const ElementIndex element : columns_[subset]) { ++row_sizes[element]; @@ -256,7 +400,7 @@ SetCoverModel::Stats SetCoverModel::ComputeCostStats() { } SetCoverModel::Stats SetCoverModel::ComputeRowStats() { - std::vector row_sizes(num_elements(), 0); + std::vector row_sizes(num_elements(), 0); for (const SparseColumn& column : columns_) { for (const ElementIndex element : column) { ++row_sizes[element.value()]; @@ -266,15 +410,15 @@ SetCoverModel::Stats SetCoverModel::ComputeRowStats() { } SetCoverModel::Stats SetCoverModel::ComputeColumnStats() { - std::vector column_sizes(columns_.size()); + std::vector column_sizes(columns_.size()); for (const SubsetIndex subset : SubsetRange()) { column_sizes[subset.value()] = columns_[subset].size(); } return ComputeStats(std::move(column_sizes)); } -std::vector SetCoverModel::ComputeRowDeciles() const { - std::vector row_sizes(num_elements(), 0); +std::vector SetCoverModel::ComputeRowDeciles() const { + std::vector row_sizes(num_elements(), 0); for (const SparseColumn& column : columns_) { for (const ElementIndex element : column) { ++row_sizes[element.value()]; @@ -283,8 +427,8 @@ std::vector SetCoverModel::ComputeRowDeciles() const { return ComputeDeciles(std::move(row_sizes)); } -std::vector SetCoverModel::ComputeColumnDeciles() const { - std::vector column_sizes(columns_.size()); +std::vector SetCoverModel::ComputeColumnDeciles() const { + std::vector column_sizes(columns_.size()); for (const SubsetIndex subset : SubsetRange()) { column_sizes[subset.value()] = columns_[subset].size(); } diff --git a/ortools/algorithms/set_cover_model.h b/ortools/algorithms/set_cover_model.h index fa3f55430b..ea21f82444 100644 --- a/ortools/algorithms/set_cover_model.h +++ b/ortools/algorithms/set_cover_model.h @@ -14,13 +14,7 @@ #ifndef OR_TOOLS_ALGORITHMS_SET_COVER_MODEL_H_ #define OR_TOOLS_ALGORITHMS_SET_COVER_MODEL_H_ -#if defined(_MSC_VER) -#include -typedef SSIZE_T ssize_t; -#else -#include -#endif // defined(_MSC_VER) - +#include #include #include @@ -29,7 +23,6 @@ typedef SSIZE_T ssize_t; #include "ortools/algorithms/set_cover.pb.h" #include "ortools/base/strong_int.h" #include "ortools/base/strong_vector.h" -#include "ortools/util/aligned_memory.h" // Representation class for the weighted set-covering problem. // @@ -65,7 +58,7 @@ using Cost = double; // (2e9) elements and subsets. If need arises one day, BaseInt can be split // into SubsetBaseInt and ElementBaseInt. // Quick testing has shown a slowdown of about 20-25% when using int64_t. -using BaseInt = int; +using BaseInt = int32_t; // We make heavy use of strong typing to avoid obvious mistakes. // Subset index. @@ -84,32 +77,14 @@ using SubsetRange = util_intops::StrongIntRange; using ElementRange = util_intops::StrongIntRange; using ColumnEntryRange = util_intops::StrongIntRange; -// SIMD operations require vectors to be aligned at 64-bytes on x86-64 -// processors as of 2024-05-03. -// TODO(user): improve the code to make it possible to use unaligned memory. -constexpr int kSetCoverAlignmentInBytes = 64; +using SubsetCostVector = util_intops::StrongVector; +using ElementCostVector = util_intops::StrongVector; -using CostAllocator = AlignedAllocator; -using ElementAllocator = - AlignedAllocator; -using SubsetAllocator = - AlignedAllocator; +using SparseColumn = util_intops::StrongVector; +using SparseRow = util_intops::StrongVector; -using SubsetCostVector = - util_intops::StrongVector; -using ElementCostVector = - util_intops::StrongVector; - -using SparseColumn = - util_intops::StrongVector; -using SparseRow = - util_intops::StrongVector; - -using IntAllocator = AlignedAllocator; -using ElementToIntVector = - util_intops::StrongVector; -using SubsetToIntVector = - util_intops::StrongVector; +using ElementToIntVector = util_intops::StrongVector; +using SubsetToIntVector = util_intops::StrongVector; // Views of the sparse vectors. These need not be aligned as it's their contents // that need to be aligned. @@ -117,6 +92,13 @@ using SparseColumnView = util_intops::StrongVector; using SparseRowView = util_intops::StrongVector; using SubsetBoolVector = util_intops::StrongVector; +using ElementBoolVector = util_intops::StrongVector; + +// Useful for representing permutations, +using ElementToElementVector = + util_intops::StrongVector; +using SubsetToSubsetVector = + util_intops::StrongVector; // Main class for describing a weighted set-covering problem. class SetCoverModel { @@ -132,6 +114,34 @@ class SetCoverModel { rows_(), all_subsets_() {} + // Constructs a weighted set-covering problem from a seed model, with + // num_elements elements and num_subsets subsets. + // - The distributions of the degrees of the elements and the cardinalities of + // the subsets are based on those of the seed model. They are scaled + // affininely by row_scale and column_scale respectively. + // - By affine scaling, we mean that the minimum value of the distribution is + // not scaled, but the variation above this minimum value is. + // - For a given subset with a given cardinality in the generated model, its + // elements are sampled from the distribution of the degrees as computed + // above. + // - The costs of the subsets in the new model are sampled from the + // distribution of the costs of the subsets in the seed model, scaled by + // cost_scale. + // IMPORTANT NOTICE: The algorithm may not succeed in generating a model + // where all the elements can be covered. In that case, the model will be + // empty. + + static SetCoverModel GenerateRandomModelFrom(const SetCoverModel& seed_model, + BaseInt num_elements, + BaseInt num_subsets, + double row_scale, + double column_scale, + double cost_scale); + + // Returns true if the model is empty, i.e. has no elements, no subsets, and + // no nonzeros. + bool IsEmpty() const { return rows_.empty() || columns_.empty(); } + // Current number of elements to be covered in the model, i.e. the number of // elements in S. In matrix terms, this is the number of rows. BaseInt num_elements() const { return num_elements_; } @@ -141,7 +151,7 @@ class SetCoverModel { BaseInt num_subsets() const { return num_subsets_; } // Current number of nonzeros in the matrix. - ssize_t num_nonzeros() const { return num_nonzeros_; } + int64_t num_nonzeros() const { return num_nonzeros_; } double FillRate() const { return 1.0 * num_nonzeros() / (1.0 * num_elements() * num_subsets()); @@ -243,10 +253,10 @@ class SetCoverModel { Stats ComputeColumnStats(); // Computes deciles on rows and returns a vector of deciles. - std::vector ComputeRowDeciles() const; + std::vector ComputeRowDeciles() const; // Computes deciles on columns and returns a vector of deciles. - std::vector ComputeColumnDeciles() const; + std::vector ComputeColumnDeciles() const; private: // Updates the all_subsets_ vector so that it always contains 0 to @@ -259,8 +269,9 @@ class SetCoverModel { // Number of subsets. Maintained for ease of access. BaseInt num_subsets_; - // Number of nonzeros in the matrix. - ssize_t num_nonzeros_; + // Number of nonzeros in the matrix. The value is an int64_t because there can + // be more than 1 << 31 nonzeros even with BaseInt = int32_t. + int64_t num_nonzeros_; // True when the SparseRowView is up-to-date. bool row_view_is_valid_; diff --git a/ortools/algorithms/set_cover_orlib_test.cc b/ortools/algorithms/set_cover_orlib_test.cc index 8dd153e44b..849e2b76ad 100644 --- a/ortools/algorithms/set_cover_orlib_test.cc +++ b/ortools/algorithms/set_cover_orlib_test.cc @@ -149,7 +149,7 @@ void ComputeLagrangianLowerBound(std::string name, SetCoverInvariant* inv) { const SetCoverModel* model = inv->model(); WallTimer timer; timer.Start(); - SetCoverLagrangian lagrangian(inv, /*num_threads=*/4); + SetCoverLagrangian lagrangian(inv, /*num_threads=*/8); const auto [lower_bound, reduced_costs, multipliers] = lagrangian.ComputeLowerBound(model->subset_costs(), inv->cost()); LogCostAndTiming(name, "LagrangianLowerBound", lower_bound, @@ -197,11 +197,11 @@ double RunSolver(std::string name, SetCoverModel* model) { global_timer.Start(); RunChvatalAndSteepest(name, model); // SetCoverInvariant inv = ComputeLPLowerBound(name, model); - RunMip(name, model); + // RunMip(name, model); RunChvatalAndGLS(name, model); SetCoverInvariant inv = RunElementDegreeGreedyAndSteepest(name, model); ComputeLagrangianLowerBound(name, &inv); - // IterateClearAndMip(name, inv); + // IterateClearAndMip(name, inv); IterateClearElementDegreeAndSteepest(name, &inv); return inv.cost(); } @@ -407,4 +407,22 @@ RAIL_TEST("rail4872.txt", 1527, 1861, MANYSECONDS); // [2] #undef SCP_TEST #undef RAIL_TEST +TEST(SetCoverHugeTest, GenerateProblem) { + SetCoverModel seed_model = + ReadRailSetCoverProblem(file::JoinPathRespectAbsolute( + ::testing::SrcDir(), data_dir, "rail4284.txt")); + seed_model.CreateSparseRowView(); + const BaseInt num_wanted_subsets(100'000'000); + const BaseInt num_wanted_elements(40'000); + const double row_scale = 1.1; + const double column_scale = 1.1; + const double cost_scale = 10.0; + SetCoverModel model = SetCoverModel::GenerateRandomModelFrom( + seed_model, num_wanted_elements, num_wanted_subsets, row_scale, + column_scale, cost_scale); + SetCoverInvariant inv = + RunElementDegreeGreedyAndSteepest("rail4284_huge.txt", &model); + LOG(INFO) << "Cost: " << inv.cost(); +} + } // namespace operations_research From 2664715126f3dc16f1e9fc74d0c363cf08c30db4 Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Wed, 25 Sep 2024 07:59:17 +0200 Subject: [PATCH 021/105] constraint_solver: export from google3 --- .../constraint_solver/constraint_solver.cc | 1 + ortools/constraint_solver/constraint_solver.h | 15 +++++ .../constraint_solver/constraint_solveri.h | 64 ++++++++++++++++++- ortools/constraint_solver/local_search.cc | 53 +++++++++++++-- 4 files changed, 126 insertions(+), 7 deletions(-) diff --git a/ortools/constraint_solver/constraint_solver.cc b/ortools/constraint_solver/constraint_solver.cc index dbc5626cc9..09f5812b95 100644 --- a/ortools/constraint_solver/constraint_solver.cc +++ b/ortools/constraint_solver/constraint_solver.cc @@ -2673,6 +2673,7 @@ const char ModelVisitor::kFailuresLimitArgument[] = "failures_limit"; const char ModelVisitor::kFinalStatesArgument[] = "final_states"; const char ModelVisitor::kFixedChargeArgument[] = "fixed_charge"; const char ModelVisitor::kIndex2Argument[] = "index2"; +const char ModelVisitor::kIndex3Argument[] = "index3"; const char ModelVisitor::kIndexArgument[] = "index"; const char ModelVisitor::kInitialState[] = "initial_state"; const char ModelVisitor::kIntervalArgument[] = "interval"; diff --git a/ortools/constraint_solver/constraint_solver.h b/ortools/constraint_solver/constraint_solver.h index 8750b2242b..203cb42ba5 100644 --- a/ortools/constraint_solver/constraint_solver.h +++ b/ortools/constraint_solver/constraint_solver.h @@ -166,6 +166,8 @@ template class LightIntFunctionElementCt; template class LightIntIntFunctionElementCt; +template +class LightIntIntIntFunctionElementCt; inline int64_t CpRandomSeed() { return absl::GetFlag(FLAGS_cp_random_seed) == -1 @@ -1259,6 +1261,18 @@ class Solver { std::move(deep_serialize))); } + /// Light three-dimension function-based element constraint ensuring + /// var == values(index1, index2, index3). + /// The constraint does not perform bound reduction of the resulting variable + /// until the index variables are bound. + template + Constraint* MakeLightElement(F values, IntVar* const var, + IntVar* const index1, IntVar* const index2, + IntVar* const index3) { + return RevAlloc(new LightIntIntIntFunctionElementCt( + this, var, index1, index2, index3, std::move(values))); + } + /// Returns the expression expr such that vars[expr] == value. /// It assumes that vars are all different. IntExpr* MakeIndexExpression(const std::vector& vars, int64_t value); @@ -3710,6 +3724,7 @@ class ModelVisitor : public BaseObject { static const char kFinalStatesArgument[]; static const char kFixedChargeArgument[]; static const char kIndex2Argument[]; + static const char kIndex3Argument[]; static const char kIndexArgument[]; static const char kInitialState[]; static const char kIntervalArgument[]; diff --git a/ortools/constraint_solver/constraint_solveri.h b/ortools/constraint_solver/constraint_solveri.h index ff7911e5b7..4bf288af5c 100644 --- a/ortools/constraint_solver/constraint_solveri.h +++ b/ortools/constraint_solver/constraint_solveri.h @@ -891,10 +891,66 @@ class LightIntIntFunctionElementCt : public Constraint { IntVar* const var_; IntVar* const index1_; IntVar* const index2_; - Solver::IndexEvaluator2 values_; + F values_; std::function deep_serialize_; }; +// ----- LightIntIntIntFunctionElementCt ----- + +template +class LightIntIntIntFunctionElementCt : public Constraint { + public: + LightIntIntIntFunctionElementCt(Solver* const solver, IntVar* const var, + IntVar* const index1, IntVar* const index2, + IntVar* const index3, F values) + : Constraint(solver), + var_(var), + index1_(index1), + index2_(index2), + index3_(index3), + values_(std::move(values)) {} + ~LightIntIntIntFunctionElementCt() override {} + void Post() override { + Demon* demon = MakeConstraintDemon0( + solver(), this, &LightIntIntIntFunctionElementCt::IndexBound, + "IndexBound"); + index1_->WhenBound(demon); + index2_->WhenBound(demon); + index3_->WhenBound(demon); + } + void InitialPropagate() override { IndexBound(); } + + std::string DebugString() const override { + return "LightIntIntFunctionElementCt"; + } + + void Accept(ModelVisitor* const visitor) const override { + visitor->BeginVisitConstraint(ModelVisitor::kLightElementEqual, this); + visitor->VisitIntegerExpressionArgument(ModelVisitor::kTargetArgument, + var_); + visitor->VisitIntegerExpressionArgument(ModelVisitor::kIndexArgument, + index1_); + visitor->VisitIntegerExpressionArgument(ModelVisitor::kIndex2Argument, + index2_); + visitor->VisitIntegerExpressionArgument(ModelVisitor::kIndex3Argument, + index3_); + visitor->EndVisitConstraint(ModelVisitor::kLightElementEqual, this); + } + + private: + void IndexBound() { + if (index1_->Bound() && index2_->Bound() && index3_->Bound()) { + var_->SetValue(values_(index1_->Min(), index2_->Min(), index3_->Min())); + } + } + + IntVar* const var_; + IntVar* const index1_; + IntVar* const index2_; + IntVar* const index3_; + F values_; +}; + /// The base class for all local search operators. /// /// A local search operator is an object that defines the neighborhood of a @@ -1691,6 +1747,12 @@ LocalSearchOperator* MakeLocalSearchOperator( const std::vector& secondary_vars, std::function start_empty_path_class); +template +LocalSearchOperator* MakeLocalSearchOperatorWithArg( + Solver* solver, const std::vector& vars, + const std::vector& secondary_vars, + std::function start_empty_path_class, ArgType arg); + template LocalSearchOperator* MakeLocalSearchOperatorWithNeighbors( Solver* solver, const std::vector& vars, diff --git a/ortools/constraint_solver/local_search.cc b/ortools/constraint_solver/local_search.cc index 9830d80cbe..e15bce5b43 100644 --- a/ortools/constraint_solver/local_search.cc +++ b/ortools/constraint_solver/local_search.cc @@ -1473,11 +1473,16 @@ class SwapActiveChainOperator : public BaseInactiveNodeToPathOperator { public: SwapActiveChainOperator(const std::vector& vars, const std::vector& secondary_vars, - std::function start_empty_path_class) + std::function start_empty_path_class, + int max_chain_size) : BaseInactiveNodeToPathOperator(vars, secondary_vars, 2, std::move(start_empty_path_class)), last_before_chain_(-1), - last_chain_end_(-1) {} + last_chain_end_(-1), + current_chain_size_(0), + max_chain_size_(max_chain_size) { + DCHECK_GE(max_chain_size_, 1); + } ~SwapActiveChainOperator() override {} bool MakeNeighbor() override; bool IsIncremental() const override { return true; } @@ -1488,6 +1493,7 @@ class SwapActiveChainOperator : public BaseInactiveNodeToPathOperator { // potentially be out of sync with the last incremental moves. This requires // resetting incrementalism. last_chain_end_ = -1; + current_chain_size_ = 0; } protected: @@ -1507,10 +1513,15 @@ class SwapActiveChainOperator : public BaseInactiveNodeToPathOperator { std::string DebugString() const override { return "SwapActiveChainOperator"; } private: - void OnNodeInitialization() override { last_chain_end_ = -1; } + void OnNodeInitialization() override { + last_chain_end_ = -1; + current_chain_size_ = 0; + } int64_t last_before_chain_; int64_t last_chain_end_; + int current_chain_size_; + const int max_chain_size_; }; bool SwapActiveChainOperator::MakeNeighbor() { @@ -1523,17 +1534,27 @@ bool SwapActiveChainOperator::MakeNeighbor() { if (!IsPathEnd(chain_end) && before_chain != chain_end && MakeChainInactive(before_chain, chain_end) && MakeActive(GetInactiveNode(), before_chain)) { + ++current_chain_size_; return true; } else { last_chain_end_ = -1; + current_chain_size_ = 0; return false; } } + if (current_chain_size_ >= max_chain_size_) { + // Move to the next before_chain. + SetNextBaseToIncrement(0); + current_chain_size_ = 0; + return false; + } if (!IsPathEnd(last_chain_end_) && MakeChainInactive(last_chain_end_, Next(last_chain_end_))) { + ++current_chain_size_; return true; } last_chain_end_ = -1; + current_chain_size_ = 0; return false; } @@ -2529,6 +2550,15 @@ LocalSearchOperator* MakeLocalSearchOperator( new T(vars, secondary_vars, std::move(start_empty_path_class), nullptr)); } +template +LocalSearchOperator* MakeLocalSearchOperatorWithArg( + Solver* solver, const std::vector& vars, + const std::vector& secondary_vars, + std::function start_empty_path_class, ArgType arg) { + return solver->RevAlloc(new T( + vars, secondary_vars, std::move(start_empty_path_class), std::move(arg))); +} + template LocalSearchOperator* MakeLocalSearchOperatorWithNeighbors( Solver* solver, const std::vector& vars, @@ -2550,6 +2580,17 @@ LocalSearchOperator* MakeLocalSearchOperatorWithNeighbors( vars, secondary_vars, std::move(start_empty_path_class))); \ } +#define MAKE_LOCAL_SEARCH_OPERATOR_WITH_ARG(OperatorClass, ArgType) \ + template <> \ + LocalSearchOperator* MakeLocalSearchOperatorWithArg( \ + Solver * solver, const std::vector& vars, \ + const std::vector& secondary_vars, \ + std::function start_empty_path_class, ArgType arg) { \ + return solver->RevAlloc( \ + new OperatorClass(vars, secondary_vars, \ + std::move(start_empty_path_class), std::move(arg))); \ + } + #define MAKE_LOCAL_SEARCH_OPERATOR_WITH_NEIGHBORS(OperatorClass) \ template <> \ LocalSearchOperator* MakeLocalSearchOperatorWithNeighbors( \ @@ -2574,7 +2615,7 @@ MAKE_LOCAL_SEARCH_OPERATOR(MakeActiveOperator) MAKE_LOCAL_SEARCH_OPERATOR(MakeInactiveOperator) MAKE_LOCAL_SEARCH_OPERATOR(MakeChainInactiveOperator) MAKE_LOCAL_SEARCH_OPERATOR(SwapActiveOperator) -MAKE_LOCAL_SEARCH_OPERATOR(SwapActiveChainOperator) +MAKE_LOCAL_SEARCH_OPERATOR_WITH_ARG(SwapActiveChainOperator, int) MAKE_LOCAL_SEARCH_OPERATOR(ExtendedSwapActiveOperator) MAKE_LOCAL_SEARCH_OPERATOR(MakeActiveAndRelocate) MAKE_LOCAL_SEARCH_OPERATOR(RelocateAndMakeActiveOperator) @@ -2642,8 +2683,8 @@ LocalSearchOperator* Solver::MakeOperator( this, vars, secondary_vars, nullptr); } case Solver::SWAPACTIVECHAIN: { - return MakeLocalSearchOperator( - this, vars, secondary_vars, nullptr); + return MakeLocalSearchOperatorWithArg( + this, vars, secondary_vars, nullptr, kint32max); } case Solver::EXTENDEDSWAPACTIVE: { return MakeLocalSearchOperator( From 8e24fad4e3d37d2dde03216db0b376e8a0557867 Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Wed, 25 Sep 2024 17:15:50 +0200 Subject: [PATCH 022/105] base: add memutil.h --- ortools/base/BUILD.bazel | 8 ++++++++ ortools/base/memutil.h | 28 ++++++++++++++++++++++++++++ 2 files changed, 36 insertions(+) create mode 100644 ortools/base/memutil.h diff --git a/ortools/base/BUILD.bazel b/ortools/base/BUILD.bazel index 0a3979cd7a..3a93dede6f 100644 --- a/ortools/base/BUILD.bazel +++ b/ortools/base/BUILD.bazel @@ -367,6 +367,14 @@ cc_library( deps = [], ) +cc_library( + name = "memutil", + hdrs = ["memutil.h"], + deps = [ + "@com_google_absl//absl/strings", + ], +) + cc_library( name = "murmur", hdrs = ["murmur.h"], diff --git a/ortools/base/memutil.h b/ortools/base/memutil.h new file mode 100644 index 0000000000..1d6fd87bae --- /dev/null +++ b/ortools/base/memutil.h @@ -0,0 +1,28 @@ +// Copyright 2010-2024 Google LLC +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef OR_TOOLS_BASE_MEMUTIL_H_ +#define OR_TOOLS_BASE_MEMUTIL_H_ + +#include "absl/strings/internal/memutil.h" + +namespace strings { +char* memdup(const char* s, size_t slen) { + void* copy; + if ((copy = malloc(slen)) == nullptr) return nullptr; + memcpy(copy, s, slen); + return reinterpret_cast(copy); +} +} // namespace strings + +#endif // OR_TOOLS_BASE_MEMUTIL_H_ From 858d4a7e512b5b12e28379f1a48043c6143ec1d8 Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Wed, 25 Sep 2024 17:16:23 +0200 Subject: [PATCH 023/105] routing and constraint solver export from google3 --- .../constraint_solver/constraint_solveri.h | 8 +- ortools/constraint_solver/local_search.cc | 4 +- ortools/routing/decision_builders.cc | 7 +- ortools/routing/filters.cc | 409 +++++++++--------- ortools/routing/filters.h | 84 +++- ortools/routing/ils.cc | 31 +- ortools/routing/ils.h | 8 +- ortools/routing/ils.proto | 88 ++++ ortools/routing/lp_scheduling.cc | 204 +++------ ortools/routing/lp_scheduling.h | 70 +-- ortools/routing/parameters.cc | 33 +- ortools/routing/parameters.proto | 5 +- ortools/routing/routing.cc | 14 +- ortools/routing/routing.h | 40 +- ortools/util/piecewise_linear_function.cc | 67 ++- ortools/util/piecewise_linear_function.h | 87 ++++ 16 files changed, 694 insertions(+), 465 deletions(-) diff --git a/ortools/constraint_solver/constraint_solveri.h b/ortools/constraint_solver/constraint_solveri.h index 4bf288af5c..6243c1793f 100644 --- a/ortools/constraint_solver/constraint_solveri.h +++ b/ortools/constraint_solver/constraint_solveri.h @@ -1862,7 +1862,8 @@ class LocalSearchState { DEFINE_STRONG_INT_TYPE(ConstraintId, int); // Adds a variable domain to this state, returns a handler to the new domain. VariableDomainId AddVariableDomain(int64_t relaxed_min, int64_t relaxed_max); - void RelaxVariableDomain(VariableDomainId domain_id); + // Relaxes the domain, returns false iff the domain was already relaxed. + bool RelaxVariableDomain(VariableDomainId domain_id); bool TightenVariableDomainMin(VariableDomainId domain_id, int64_t value); bool TightenVariableDomainMax(VariableDomainId domain_id, int64_t value); int64_t VariableDomainMin(VariableDomainId domain_id) const; @@ -2105,8 +2106,9 @@ class LocalSearchState::Variable { } void Relax() const { if (state_ == nullptr) return; - state_->RelaxVariableDomain(domain_id_); - state_->PropagateRelax(domain_id_); + if (state_->RelaxVariableDomain(domain_id_)) { + state_->PropagateRelax(domain_id_); + } } bool Exists() const { return state_ != nullptr; } diff --git a/ortools/constraint_solver/local_search.cc b/ortools/constraint_solver/local_search.cc index e15bce5b43..ec33c28127 100644 --- a/ortools/constraint_solver/local_search.cc +++ b/ortools/constraint_solver/local_search.cc @@ -3282,7 +3282,7 @@ LocalSearchState::Variable LocalSearchState::DummyVariable() { return {nullptr, VariableDomainId(-1)}; } -void LocalSearchState::RelaxVariableDomain(VariableDomainId domain_id) { +bool LocalSearchState::RelaxVariableDomain(VariableDomainId domain_id) { DCHECK(state_domains_are_all_nonempty_); if (!state_has_relaxed_domains_) { trailed_num_committed_empty_domains_ = num_committed_empty_domains_; @@ -3297,7 +3297,9 @@ void LocalSearchState::RelaxVariableDomain(VariableDomainId domain_id) { --num_committed_empty_domains_; } current_domains_[domain_id] = relaxed_domains_[domain_id]; + return true; } + return false; } int64_t LocalSearchState::VariableDomainMin(VariableDomainId domain_id) const { diff --git a/ortools/routing/decision_builders.cc b/ortools/routing/decision_builders.cc index e6c3b01de7..a8ea4f2f6c 100644 --- a/ortools/routing/decision_builders.cc +++ b/ortools/routing/decision_builders.cc @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include @@ -356,10 +357,10 @@ class SetCumulsFromLocalDimensionCosts : public DecisionBuilder { std::vector* break_start_end_values) { cumul_values->clear(); break_start_end_values->clear(); - const RouteDimensionTravelInfo& dimension_travel_info = + const RouteDimensionTravelInfo* const dimension_travel_info = dimension_travel_info_per_route_.empty() - ? RouteDimensionTravelInfo() - : dimension_travel_info_per_route_[vehicle]; + ? nullptr + : &dimension_travel_info_per_route_[vehicle]; const Resource* resource = nullptr; if (rg_index_ >= 0 && model_.ResourceVar(vehicle, rg_index_)->Bound()) { const int resource_index = diff --git a/ortools/routing/filters.cc b/ortools/routing/filters.cc index 03f0fb1c56..9eedfea741 100644 --- a/ortools/routing/filters.cc +++ b/ortools/routing/filters.cc @@ -43,7 +43,6 @@ #include "ortools/base/map_util.h" #include "ortools/base/small_map.h" #include "ortools/base/strong_vector.h" -#include "ortools/base/types.h" #include "ortools/constraint_solver/constraint_solver.h" #include "ortools/constraint_solver/constraint_solveri.h" #include "ortools/routing/lp_scheduling.h" @@ -230,38 +229,43 @@ class ActiveNodeGroupFilter : public IntVarLocalSearchFilter { explicit ActiveNodeGroupFilter(const RoutingModel& routing_model) : IntVarLocalSearchFilter(routing_model.Nexts()), routing_model_(routing_model), - group_is_active_(routing_model.GetSameActivityGroupsCount(), false) {} + active_count_per_group_(routing_model.GetSameActivityGroupsCount(), + {.active = 0, .unknown = 0}), + node_is_active_(routing_model.Nexts().size(), false), + node_is_unknown_(routing_model.Nexts().size(), false) {} + bool Accept(const Assignment* delta, const Assignment* /*deltadelta*/, int64_t /*objective_min*/, int64_t /*objective_max*/) override { - absl::flat_hash_map active_count_per_group; - absl::flat_hash_map lns_count_per_group; + active_count_per_group_.Revert(); const Assignment::IntContainer& container = delta->IntVarContainer(); for (const IntVarElement& new_element : container.elements()) { IntVar* const var = new_element.Var(); int64_t index = -1; if (!FindIndex(var, &index)) continue; + const int group = routing_model_.GetSameActivityGroupOfIndex(index); + ActivityCounts counts = active_count_per_group_.Get(group); + // Change contribution to counts: remove old state, add new state. + if (node_is_unknown_[index]) --counts.unknown; + if (node_is_active_[index]) --counts.active; if (new_element.Min() != new_element.Max()) { - // LNS detected. - lns_count_per_group[routing_model_.GetSameActivityGroupOfIndex( - index)]++; - continue; - } - int& active_count = gtl::LookupOrInsert( - &active_count_per_group, - routing_model_.GetSameActivityGroupOfIndex(index), 0); - if (new_element.Min() != index) { - if (active_count < 0) return false; - ++active_count; - } else { - if (active_count > 0) return false; - --active_count; + ++counts.unknown; + } else if (new_element.Min() != index) { + ++counts.active; } + active_count_per_group_.Set(group, counts); } - for (const auto& [group, count] : active_count_per_group) { + for (const int group : active_count_per_group_.ChangedIndices()) { + const ActivityCounts counts = active_count_per_group_.Get(group); const int group_size = routing_model_.GetSameActivityIndicesOfGroup(group).size(); - if (std::abs(count) + lns_count_per_group[group] == group_size) continue; - if ((count > 0) != group_is_active_[group]) return false; + // The group constraint is respected iff either 0 or group size is inside + // interval [num_active, num_active + num_unknown], + if (counts.active == 0) continue; + if (counts.active <= group_size && + group_size <= counts.active + counts.unknown) { + continue; + } + return false; } return true; } @@ -269,28 +273,38 @@ class ActiveNodeGroupFilter : public IntVarLocalSearchFilter { private: void OnSynchronize(const Assignment* /*delta*/) override { - for (int group = 0; group < routing_model_.GetSameActivityGroupsCount(); - ++group) { - bool is_group_active = false; + const int num_groups = routing_model_.GetSameActivityGroupsCount(); + for (int group = 0; group < num_groups; ++group) { + ActivityCounts counts = {.active = 0, .unknown = 0}; for (int node : routing_model_.GetSameActivityIndicesOfGroup(group)) { if (IsVarSynced(node)) { - is_group_active = (Value(node) != node); - break; + const bool is_active = (Value(node) != node); + node_is_active_[node] = is_active; + node_is_unknown_[node] = false; + counts.active += is_active ? 1 : 0; + } else { + ++counts.unknown; + node_is_unknown_[node] = true; + node_is_active_[node] = false; } } -#ifndef NDEBUG - for (int node : routing_model_.GetSameActivityIndicesOfGroup(group)) { - if (IsVarSynced(node)) { - DCHECK_EQ((Value(node) != node), is_group_active); - } - } -#endif // NDEBUG - group_is_active_[group] = is_group_active; + active_count_per_group_.Set(group, counts); } + active_count_per_group_.Commit(); } const RoutingModel& routing_model_; - std::vector group_is_active_; + struct ActivityCounts { + int active; + int unknown; + }; + CommittableVector active_count_per_group_; + // node_is_active_[node] is true iff node was synced and active at last + // Synchronize(). + std::vector node_is_active_; + // node_is_unknown_[node] is true iff node was not synced at last + // Synchronize(). + std::vector node_is_unknown_; }; } // namespace @@ -310,54 +324,53 @@ class NodeDisjunctionFilter : public IntVarLocalSearchFilter { bool filter_cost) : IntVarLocalSearchFilter(routing_model.Nexts()), routing_model_(routing_model), - active_per_disjunction_(routing_model.GetNumberOfDisjunctions(), 0), - inactive_per_disjunction_(routing_model.GetNumberOfDisjunctions(), 0), + count_per_disjunction_(routing_model.GetNumberOfDisjunctions(), + {.active = 0, .inactive = 0}), synchronized_objective_value_(std::numeric_limits::min()), accepted_objective_value_(std::numeric_limits::min()), filter_cost_(filter_cost), has_mandatory_disjunctions_(routing_model.HasMandatoryDisjunctions()) {} + using Disjunction = RoutingModel::DisjunctionIndex; + bool Accept(const Assignment* delta, const Assignment* /*deltadelta*/, int64_t /*objective_min*/, int64_t objective_max) override { - const int64_t kUnassigned = -1; - const Assignment::IntContainer& container = delta->IntVarContainer(); - gtl::small_map> - disjunction_active_deltas; - gtl::small_map> - disjunction_inactive_deltas; + count_per_disjunction_.Revert(); bool lns_detected = false; - // Update active/inactive count per disjunction for each element of delta. - for (const IntVarElement& new_element : container.elements()) { - IntVar* const var = new_element.Var(); - int64_t index = kUnassigned; - if (FindIndex(var, &index)) { - const bool is_inactive = - (new_element.Min() <= index && new_element.Max() >= index); - if (new_element.Min() != new_element.Max()) { - lns_detected = true; - } - for (const RoutingModel::DisjunctionIndex disjunction_index : - routing_model_.GetDisjunctionIndices(index)) { - const bool is_var_synced = IsVarSynced(index); - if (!is_var_synced || (Value(index) == index) != is_inactive) { - ++gtl::LookupOrInsert(is_inactive ? &disjunction_inactive_deltas - : &disjunction_active_deltas, - disjunction_index, 0); - if (is_var_synced) { - --gtl::LookupOrInsert(is_inactive ? &disjunction_active_deltas - : &disjunction_inactive_deltas, - disjunction_index, 0); - } - } - } + // Update the active/inactive counts of each modified disjunction. + for (const IntVarElement& element : delta->IntVarContainer().elements()) { + int64_t node = -1; + if (!FindIndex(element.Var(), &node)) continue; + lns_detected |= element.Min() != element.Max(); + // Compute difference in how this node contributes to activity counts. + const bool is_var_synced = IsVarSynced(node); + const bool was_active = is_var_synced && Value(node) != node; + const bool is_active = node < element.Min() || element.Max() < node; + ActivityCount contribution_delta = {.active = 0, .inactive = 0}; + if (is_var_synced) { + contribution_delta.active -= was_active; + contribution_delta.inactive -= !was_active; + } + contribution_delta.active += is_active; + contribution_delta.inactive += !is_active; + // Common shortcut: if the change is neutral, counts stay the same. + if (contribution_delta.active == 0 && contribution_delta.inactive == 0) { + continue; + } + // Change counts of all disjunctions affected by this node. + for (const Disjunction disjunction : + routing_model_.GetDisjunctionIndices(node)) { + ActivityCount new_count = + count_per_disjunction_.Get(disjunction.value()); + new_count.active += contribution_delta.active; + new_count.inactive += contribution_delta.inactive; + count_per_disjunction_.Set(disjunction.value(), new_count); } } // Check if any disjunction has too many active nodes. - for (const auto [disjunction_index, active_nodes_delta] : - disjunction_active_deltas) { - // Too many active nodes. - if (active_per_disjunction_[disjunction_index] + active_nodes_delta > - routing_model_.GetDisjunctionMaxCardinality(disjunction_index)) { + for (const int index : count_per_disjunction_.ChangedIndices()) { + if (count_per_disjunction_.Get(index).active > + routing_model_.GetDisjunctionMaxCardinality(Disjunction(index))) { return false; } } @@ -367,70 +380,32 @@ class NodeDisjunctionFilter : public IntVarLocalSearchFilter { } // Update penalty costs for disjunctions. accepted_objective_value_ = synchronized_objective_value_; - for (const auto [disjunction_index, inactive_nodes_delta] : - disjunction_inactive_deltas) { - const int64_t penalty = - routing_model_.GetDisjunctionPenalty(disjunction_index); + for (const int index : count_per_disjunction_.ChangedIndices()) { + // If num inactives did not change, skip. Common shortcut. + const int old_inactives = + count_per_disjunction_.GetCommitted(index).inactive; + const int new_inactives = count_per_disjunction_.Get(index).inactive; + if (old_inactives == new_inactives) continue; + // If this disjunction has no penalty for inactive nodes, skip. + const Disjunction disjunction(index); + const int64_t penalty = routing_model_.GetDisjunctionPenalty(disjunction); if (penalty == 0) continue; - const int current_inactive_nodes = - inactive_per_disjunction_[disjunction_index]; - const int max_inactive_cardinality = - routing_model_.GetDisjunctionNodeIndices(disjunction_index).size() - - routing_model_.GetDisjunctionMaxCardinality(disjunction_index); - // Too many inactive nodes. - const int inactive_nodes_above_limit = - (current_inactive_nodes + inactive_nodes_delta) - - max_inactive_cardinality; - if (inactive_nodes_above_limit > 0 && penalty < 0) { - // Nodes are mandatory, i.e. exactly max_cardinality nodes must be - // performed, so the move is not acceptable. - return false; - } - const RoutingModel::PenaltyCostBehavior penalty_cost_behavior = - routing_model_.GetDisjunctionPenaltyCostBehavior(disjunction_index); - switch (penalty_cost_behavior) { - case RoutingModel::PenaltyCostBehavior::PENALIZE_ONCE: - if (inactive_nodes_above_limit > 0) { // penalty cost to update - if (current_inactive_nodes <= max_inactive_cardinality) { - // Add penalty if there were not too many inactive nodes before - // the move. - CapAddTo(penalty, &accepted_objective_value_); - } - } else { // no more penalty cost - if (current_inactive_nodes > max_inactive_cardinality) { - // Remove penalty if there were too many inactive nodes before the - // move. - accepted_objective_value_ = - CapSub(accepted_objective_value_, penalty); - } - } - break; - case RoutingModel::PenaltyCostBehavior::PENALIZE_PER_INACTIVE: - if (inactive_nodes_above_limit > 0) { // penalty cost to update - if (current_inactive_nodes <= max_inactive_cardinality) { - // Add penalty if there were not too many inactive nodes before - // the move. - CapAddTo(penalty * inactive_nodes_above_limit, - &accepted_objective_value_); - } else if (inactive_nodes_delta != 0) { - // Update penalty cost if there are new or fewer inactive nodes. - CapAddTo(penalty * inactive_nodes_delta, - &accepted_objective_value_); - } - } else { // no more penalty cost - if (current_inactive_nodes > max_inactive_cardinality) { - // Remove penalty if there were too many inactive nodes before the - // move. - const int current_inactive_nodes_above_limit = - current_inactive_nodes - max_inactive_cardinality; - accepted_objective_value_ = - CapSub(accepted_objective_value_, - penalty * current_inactive_nodes_above_limit); - } - } - break; + // Compute the new cost of activity bound violations. + const int max_inactives = + routing_model_.GetDisjunctionNodeIndices(disjunction).size() - + routing_model_.GetDisjunctionMaxCardinality(disjunction); + int new_violation = std::max(0, new_inactives - max_inactives); + int old_violation = std::max(0, old_inactives - max_inactives); + // If nodes are mandatory, there can be no violation. + if (penalty < 0 && new_violation > 0) return false; + if (routing_model_.GetDisjunctionPenaltyCostBehavior(disjunction) == + RoutingModel::PenaltyCostBehavior::PENALIZE_ONCE) { + new_violation = std::min(1, new_violation); + old_violation = std::min(1, old_violation); } + CapAddTo(CapProd(penalty, (new_violation - old_violation)), + &accepted_objective_value_); } // Only compare to max as a cost lower bound is computed. return accepted_objective_value_ <= objective_max; @@ -446,51 +421,44 @@ class NodeDisjunctionFilter : public IntVarLocalSearchFilter { private: void OnSynchronize(const Assignment* /*delta*/) override { synchronized_objective_value_ = 0; - for (RoutingModel::DisjunctionIndex i(0); - i < active_per_disjunction_.size(); ++i) { - active_per_disjunction_[i] = 0; - inactive_per_disjunction_[i] = 0; - const std::vector& disjunction_indices = - routing_model_.GetDisjunctionNodeIndices(i); - for (const int64_t index : disjunction_indices) { - if (IsVarSynced(index)) { - if (Value(index) != index) { - ++active_per_disjunction_[i]; - } else { - ++inactive_per_disjunction_[i]; - } - } + count_per_disjunction_.Revert(); + const int num_disjunctions = routing_model_.GetNumberOfDisjunctions(); + for (Disjunction disjunction(0); disjunction < num_disjunctions; + ++disjunction) { + // Count number of active/inactive nodes of this disjunction. + ActivityCount count = {.active = 0, .inactive = 0}; + const auto& nodes = routing_model_.GetDisjunctionNodeIndices(disjunction); + for (const int64_t node : nodes) { + if (!IsVarSynced(node)) continue; + const int is_active = Value(node) != node; + count.active += is_active; + count.inactive += !is_active; } + count_per_disjunction_.Set(disjunction.value(), count); + // Add penalty of this disjunction to total cost. if (!filter_cost_) continue; - const int64_t penalty = routing_model_.GetDisjunctionPenalty(i); - const int max_cardinality = - routing_model_.GetDisjunctionMaxCardinality(i); - const RoutingModel::PenaltyCostBehavior penalty_cost_behavior = - routing_model_.GetDisjunctionPenaltyCostBehavior(i); - - const int inactive_nodes_above_limit = - inactive_per_disjunction_[i] - - (disjunction_indices.size() - max_cardinality); - if (inactive_nodes_above_limit > 0 && penalty > 0) { - switch (penalty_cost_behavior) { - case RoutingModel::PenaltyCostBehavior::PENALIZE_ONCE: - CapAddTo(penalty, &synchronized_objective_value_); - break; - case RoutingModel::PenaltyCostBehavior::PENALIZE_PER_INACTIVE: - CapAddTo(penalty * inactive_nodes_above_limit, - &synchronized_objective_value_); - break; + const int64_t penalty = routing_model_.GetDisjunctionPenalty(disjunction); + const int max_actives = + routing_model_.GetDisjunctionMaxCardinality(disjunction); + int violation = count.inactive - (nodes.size() - max_actives); + if (violation > 0 && penalty > 0) { + if (routing_model_.GetDisjunctionPenaltyCostBehavior(disjunction) == + RoutingModel::PenaltyCostBehavior::PENALIZE_ONCE) { + violation = std::min(1, violation); } + CapAddTo(CapProd(penalty, violation), &synchronized_objective_value_); } } + count_per_disjunction_.Commit(); + accepted_objective_value_ = synchronized_objective_value_; } const RoutingModel& routing_model_; - - util_intops::StrongVector - active_per_disjunction_; - util_intops::StrongVector - inactive_per_disjunction_; + struct ActivityCount { + int active = 0; + int inactive = 0; + }; + CommittableVector count_per_disjunction_; int64_t synchronized_objective_value_; int64_t accepted_objective_value_; const bool filter_cost_; @@ -1149,7 +1117,7 @@ class PathCumulFilter : public BasePathFilter { PathCumulFilter(const RoutingModel& routing_model, const RoutingDimension& dimension, bool propagate_own_objective_value, - bool filter_objective_cost, bool can_use_lp); + bool filter_objective_cost, bool may_use_optimizers); ~PathCumulFilter() override {} std::string DebugString() const override { return "PathCumulFilter(" + name_ + ")"; @@ -1163,7 +1131,7 @@ class PathCumulFilter : public BasePathFilter { : accepted_objective_value_; } bool UsesDimensionOptimizers() { - if (!can_use_lp_) return false; + if (!may_use_optimizers_) return false; for (int vehicle = 0; vehicle < routing_model_.vehicles(); ++vehicle) { if (FilterWithDimensionCumulOptimizerForVehicle(vehicle)) return true; } @@ -1271,7 +1239,7 @@ class PathCumulFilter : public BasePathFilter { } bool FilterWithDimensionCumulOptimizerForVehicle(int vehicle) const { - if (!can_use_lp_ || FilterCumulPiecewiseLinearCosts()) { + if (!may_use_optimizers_ || FilterCumulPiecewiseLinearCosts()) { return false; } @@ -1414,13 +1382,13 @@ class PathCumulFilter : public BasePathFilter { absl::btree_set delta_paths_; const std::string name_; - LocalDimensionCumulOptimizer* optimizer_; + LocalDimensionCumulOptimizer* lp_optimizer_; LocalDimensionCumulOptimizer* mp_optimizer_; const std::function path_accessor_; const bool filter_objective_cost_; // This boolean indicates if the LP optimizer can be used if necessary to // optimize the dimension cumuls. - const bool can_use_lp_; + const bool may_use_optimizers_; const bool propagate_own_objective_value_; std::vector min_path_cumuls_; @@ -1440,7 +1408,8 @@ std::vector SumOfVectors(const std::vector& v1, PathCumulFilter::PathCumulFilter(const RoutingModel& routing_model, const RoutingDimension& dimension, bool propagate_own_objective_value, - bool filter_objective_cost, bool can_use_lp) + bool filter_objective_cost, + bool may_use_optimizers) : BasePathFilter(routing_model.Nexts(), dimension.cumuls().size(), routing_model.GetPathsMetadata()), routing_model_(routing_model), @@ -1472,11 +1441,11 @@ PathCumulFilter::PathCumulFilter(const RoutingModel& routing_model, delta_max_end_cumul_(0), delta_nodes_with_precedences_and_changed_cumul_(routing_model.Size()), name_(dimension.name()), - optimizer_(routing_model.GetMutableLocalCumulLPOptimizer(dimension)), + lp_optimizer_(routing_model.GetMutableLocalCumulLPOptimizer(dimension)), mp_optimizer_(routing_model.GetMutableLocalCumulMPOptimizer(dimension)), path_accessor_([this](int64_t node) { return GetNext(node); }), filter_objective_cost_(filter_objective_cost), - can_use_lp_(can_use_lp), + may_use_optimizers_(may_use_optimizers), propagate_own_objective_value_(propagate_own_objective_value) { cumul_soft_bounds_.resize(cumuls_.size()); cumul_soft_lower_bounds_.resize(cumuls_.size()); @@ -1555,7 +1524,7 @@ PathCumulFilter::PathCumulFilter(const RoutingModel& routing_model, #ifndef NDEBUG for (int vehicle = 0; vehicle < routing_model.vehicles(); vehicle++) { if (FilterWithDimensionCumulOptimizerForVehicle(vehicle)) { - DCHECK_NE(optimizer_, nullptr); + DCHECK_NE(lp_optimizer_, nullptr); DCHECK_NE(mp_optimizer_, nullptr); } } @@ -1723,7 +1692,7 @@ void PathCumulFilter::OnBeforeSynchronizePaths() { LocalDimensionCumulOptimizer* const optimizer = (FilterSoftSpanQuadraticCost(vehicle) || FilterBreakCost(vehicle)) ? mp_optimizer_ - : optimizer_; + : lp_optimizer_; DCHECK(optimizer != nullptr); const DimensionSchedulingStatus status = optimizer->ComputeRouteCumulCostWithoutFixedTransits( @@ -2053,7 +2022,7 @@ bool PathCumulFilter::FinalizeAcceptPath(int64_t /*objective_min*/, CapAdd(cumul_cost_delta_, CapProd(global_span_cost_coefficient_, CapSub(new_max_end, new_min_start))); - if (can_use_lp_ && optimizer_ != nullptr && + if (may_use_optimizers_ && lp_optimizer_ != nullptr && accepted_objective_value_ <= objective_max) { const size_t num_touched_paths = GetTouchedPathStarts().size(); std::vector path_delta_cost_values(num_touched_paths, 0); @@ -2066,7 +2035,7 @@ bool PathCumulFilter::FinalizeAcceptPath(int64_t /*objective_min*/, } int64_t path_delta_cost_with_lp = 0; const DimensionSchedulingStatus status = - optimizer_->ComputeRouteCumulCostWithoutFixedTransits( + lp_optimizer_->ComputeRouteCumulCostWithoutFixedTransits( vehicle, path_accessor_, /*resource=*/nullptr, filter_objective_cost_ ? &path_delta_cost_with_lp : nullptr); if (status == DimensionSchedulingStatus::INFEASIBLE) { @@ -2242,11 +2211,11 @@ int64_t PathCumulFilter::ComputePathMaxStartFromEndCumul( IntVarLocalSearchFilter* MakePathCumulFilter(const RoutingDimension& dimension, bool propagate_own_objective_value, bool filter_objective_cost, - bool can_use_lp) { + bool may_use_optimizers) { RoutingModel& model = *dimension.model(); return model.solver()->RevAlloc( new PathCumulFilter(model, dimension, propagate_own_objective_value, - filter_objective_cost, can_use_lp)); + filter_objective_cost, may_use_optimizers)); } namespace { @@ -2812,11 +2781,11 @@ class LPCumulFilter : public IntVarLocalSearchFilter { void OnSynchronize(const Assignment* delta) override; int64_t GetSynchronizedObjectiveValue() const override; std::string DebugString() const override { - return "LPCumulFilter(" + optimizer_.dimension()->name() + ")"; + return "LPCumulFilter(" + lp_optimizer_.dimension()->name() + ")"; } private: - GlobalDimensionCumulOptimizer& optimizer_; + GlobalDimensionCumulOptimizer& lp_optimizer_; GlobalDimensionCumulOptimizer& mp_optimizer_; const bool filter_objective_cost_; int64_t synchronized_cost_without_transit_; @@ -2826,11 +2795,11 @@ class LPCumulFilter : public IntVarLocalSearchFilter { }; LPCumulFilter::LPCumulFilter(const std::vector& nexts, - GlobalDimensionCumulOptimizer* optimizer, + GlobalDimensionCumulOptimizer* lp_optimizer, GlobalDimensionCumulOptimizer* mp_optimizer, bool filter_objective_cost) : IntVarLocalSearchFilter(nexts), - optimizer_(*optimizer), + lp_optimizer_(*lp_optimizer), mp_optimizer_(*mp_optimizer), filter_objective_cost_(filter_objective_cost), synchronized_cost_without_transit_(-1), @@ -2861,8 +2830,8 @@ bool LPCumulFilter::Accept(const Assignment* delta, if (!filter_objective_cost_) { // No need to compute the cost of the LP, only verify its feasibility. delta_cost_without_transit_ = 0; - const DimensionSchedulingStatus status = - optimizer_.ComputeCumuls(next_accessor, {}, nullptr, nullptr, nullptr); + const DimensionSchedulingStatus status = lp_optimizer_.ComputeCumuls( + next_accessor, {}, nullptr, nullptr, nullptr); if (status == DimensionSchedulingStatus::OPTIMAL) return true; if (status == DimensionSchedulingStatus::RELAXED_OPTIMAL_ONLY && mp_optimizer_.ComputeCumuls(next_accessor, {}, nullptr, nullptr, @@ -2874,7 +2843,7 @@ bool LPCumulFilter::Accept(const Assignment* delta, } const DimensionSchedulingStatus status = - optimizer_.ComputeCumulCostWithoutFixedTransits( + lp_optimizer_.ComputeCumulCostWithoutFixedTransits( next_accessor, &delta_cost_without_transit_); if (status == DimensionSchedulingStatus::INFEASIBLE) { delta_cost_without_transit_ = std::numeric_limits::max(); @@ -2899,7 +2868,7 @@ int64_t LPCumulFilter::GetAcceptedObjectiveValue() const { void LPCumulFilter::OnSynchronize(const Assignment* /*delta*/) { // TODO(user): Try to optimize this so the LP is not called when the last // computed delta cost corresponds to the solution being synchronized. - const RoutingModel& model = *optimizer_.dimension()->model(); + const RoutingModel& model = *lp_optimizer_.dimension()->model(); const auto& next_accessor = [this, &model](int64_t index) { return IsVarSynced(index) ? Value(index) : model.IsStart(index) ? model.End(model.VehicleIndex(index)) @@ -2911,10 +2880,10 @@ void LPCumulFilter::OnSynchronize(const Assignment* /*delta*/) { } DimensionSchedulingStatus status = filter_objective_cost_ - ? optimizer_.ComputeCumulCostWithoutFixedTransits( + ? lp_optimizer_.ComputeCumulCostWithoutFixedTransits( next_accessor, &synchronized_cost_without_transit_) - : optimizer_.ComputeCumuls(next_accessor, {}, nullptr, nullptr, - nullptr); + : lp_optimizer_.ComputeCumuls(next_accessor, {}, nullptr, nullptr, + nullptr); if (status == DimensionSchedulingStatus::INFEASIBLE) { // TODO(user): This should only happen if the LP solver times out. // DCHECK the fail wasn't due to an infeasible model. @@ -2941,13 +2910,13 @@ int64_t LPCumulFilter::GetSynchronizedObjectiveValue() const { } // namespace IntVarLocalSearchFilter* MakeGlobalLPCumulFilter( - GlobalDimensionCumulOptimizer* optimizer, + GlobalDimensionCumulOptimizer* lp_optimizer, GlobalDimensionCumulOptimizer* mp_optimizer, bool filter_objective_cost) { - DCHECK_NE(optimizer, nullptr); + DCHECK_NE(lp_optimizer, nullptr); DCHECK_NE(mp_optimizer, nullptr); - const RoutingModel& model = *optimizer->dimension()->model(); + const RoutingModel& model = *lp_optimizer->dimension()->model(); return model.solver()->RevAlloc(new LPCumulFilter( - model.Nexts(), optimizer, mp_optimizer, filter_objective_cost)); + model.Nexts(), lp_optimizer, mp_optimizer, filter_objective_cost)); } namespace { @@ -3175,11 +3144,11 @@ void ResourceGroupAssignmentFilter::OnSynchronizePathFromStart(int64_t start) { vehicle_to_resource_class_assignment_costs_[v] = {route_cost}; return; } - // NOTE(user): Even if filter_objective_cost_ is false, we still need to - // call ComputeVehicleToResourceClassAssignmentCosts() for every vehicle - // requiring resource assignment to keep track of whether or not a given - // vehicle-to-resource-class assignment is possible by storing 0 or -1 in - // vehicle_to_resource_class_assignment_costs_. + // NOTE(user): Even if filter_objective_cost_ is false, we + // still need to call ComputeVehicleToResourceClassAssignmentCosts() for every + // vehicle requiring resource assignment to keep track of whether or not a + // given vehicle-to-resource-class assignment is possible by storing 0 or -1 + // in vehicle_to_resource_class_assignment_costs_. if (!ComputeVehicleToResourceClassAssignmentCosts( v, resource_group_, ignored_resources_per_class_, next_accessor, dimension_.transit_evaluator(v), filter_objective_cost_, @@ -3315,16 +3284,17 @@ class ResourceAssignmentFilter : public LocalSearchFilter { }; ResourceAssignmentFilter::ResourceAssignmentFilter( - const std::vector& nexts, LocalDimensionCumulOptimizer* optimizer, + const std::vector& nexts, + LocalDimensionCumulOptimizer* lp_optimizer, LocalDimensionCumulOptimizer* mp_optimizer, bool propagate_own_objective_value, bool filter_objective_cost) : propagate_own_objective_value_(propagate_own_objective_value), - dimension_name_(optimizer->dimension()->name()) { - const RoutingModel& model = *optimizer->dimension()->model(); + dimension_name_(lp_optimizer->dimension()->name()) { + const RoutingModel& model = *lp_optimizer->dimension()->model(); for (const auto& resource_group : model.GetResourceGroups()) { resource_group_assignment_filters_.push_back( model.solver()->RevAlloc(new ResourceGroupAssignmentFilter( - nexts, resource_group.get(), optimizer, mp_optimizer, + nexts, resource_group.get(), lp_optimizer, mp_optimizer, filter_objective_cost))); } } @@ -3361,14 +3331,14 @@ void ResourceAssignmentFilter::Synchronize(const Assignment* assignment, } // namespace LocalSearchFilter* MakeResourceAssignmentFilter( - LocalDimensionCumulOptimizer* optimizer, + LocalDimensionCumulOptimizer* lp_optimizer, LocalDimensionCumulOptimizer* mp_optimizer, bool propagate_own_objective_value, bool filter_objective_cost) { - const RoutingModel& model = *optimizer->dimension()->model(); - DCHECK_NE(optimizer, nullptr); + const RoutingModel& model = *lp_optimizer->dimension()->model(); + DCHECK_NE(lp_optimizer, nullptr); DCHECK_NE(mp_optimizer, nullptr); return model.solver()->RevAlloc(new ResourceAssignmentFilter( - model.Nexts(), optimizer, mp_optimizer, propagate_own_objective_value, + model.Nexts(), lp_optimizer, mp_optimizer, propagate_own_objective_value, filter_objective_cost)); } @@ -4243,16 +4213,23 @@ bool LightVehicleBreaksChecker::Check() const { if (!path_data_[path].span.Exists()) continue; const int64_t total_transit = path_data_[path].total_transit.Min(); // Compute lower bound of path span from break and path time windows. + const PathData& data = path_data_[path]; int64_t lb_span_tw = total_transit; - const int64_t start_max = path_data_[path].start_cumul.Max(); - const int64_t end_min = path_data_[path].end_cumul.Min(); - for (const auto& br : path_data_[path].vehicle_breaks) { + const int64_t start_max = data.start_cumul.Max(); + const int64_t end_min = data.end_cumul.Min(); + for (const auto& br : data.vehicle_breaks) { if (!br.is_performed_min) continue; if (br.start_max < end_min && start_max < br.end_min) { CapAddTo(br.duration_min, &lb_span_tw); } } - if (!path_data_[path].span.SetMin(lb_span_tw)) return false; + if (!data.span.SetMin(lb_span_tw)) return false; + if (!data.start_cumul.SetMax(CapSub(data.end_cumul.Max(), lb_span_tw))) { + return false; + } + if (!data.end_cumul.SetMin(CapAdd(data.start_cumul.Min(), lb_span_tw))) { + return false; + } } return true; } diff --git a/ortools/routing/filters.h b/ortools/routing/filters.h index c88578bef5..ee332c8f81 100644 --- a/ortools/routing/filters.h +++ b/ortools/routing/filters.h @@ -14,6 +14,7 @@ #ifndef OR_TOOLS_ROUTING_FILTERS_H_ #define OR_TOOLS_ROUTING_FILTERS_H_ +#include #include #include #include @@ -22,6 +23,7 @@ #include #include +#include "absl/log/check.h" #include "absl/strings/string_view.h" #include "absl/types/span.h" #include "ortools/base/types.h" @@ -36,6 +38,84 @@ namespace operations_research::routing { +// A vector that allows to revert back to a previously committed state, +// get the set of changed indices, and get current and committed values. +template +class CommittableVector { + public: + // Makes a vector with initial elements all committed to value. + CommittableVector(size_t num_elements, const T& value) + : elements_(num_elements, {value, value}), changed_(num_elements) {} + + // Return the size of the vector. + size_t Size() const { return elements_.size(); } + + // Returns a copy of the value stored at index in the current state. + // Does not return a reference, because the class needs to know when elements + // are modified. + T Get(size_t index) const { + DCHECK_LT(index, elements_.size()); + return elements_[index].current; + } + + // Set the value stored at index in the current state to given value. + void Set(size_t index, const T& value) { + DCHECK_GE(index, 0); + DCHECK_LT(index, elements_.size()); + changed_.Set(index); + elements_[index].current = value; + } + + // Changes the values of the vector to those in the last Commit(). + void Revert() { + for (const size_t index : changed_.PositionsSetAtLeastOnce()) { + elements_[index].current = elements_[index].committed; + } + changed_.ClearAll(); + } + + // Makes the current state committed, clearing all changes. + void Commit() { + for (const size_t index : changed_.PositionsSetAtLeastOnce()) { + elements_[index].committed = elements_[index].current; + } + changed_.ClearAll(); + } + + // Sets all elements of this vector to given value, and commits to this state. + // Supposes that there are no changes since the last Commit() or Revert(). + void SetAllAndCommit(const T& value) { + DCHECK_EQ(0, changed_.NumberOfSetCallsWithDifferentArguments()); + elements_.assign(elements_.size(), {value, value}); + } + + // Returns a copy of the value stored at index in the last committed state. + T GetCommitted(size_t index) const { + DCHECK_LT(index, elements_.size()); + return elements_[index].committed; + } + + // Return true iff the value at index has been Set() since the last Commit() + // or Revert(), even if the current value is the same as the committed value. + bool HasChanged(size_t index) const { return changed_[index]; } + + // Returns the set of indices that have been Set() since the last Commit() or + // Revert(). + const std::vector& ChangedIndices() const { + return changed_.PositionsSetAtLeastOnce(); + } + + private: + struct VersionedElement { + T current; + T committed; + }; + // Holds current and committed versions of values of this vector. + std::vector elements_; + // Holds indices that were Set() since the last Commit() or Revert(). + SparseBitset changed_; +}; + /// Returns a filter tracking route constraints. IntVarLocalSearchFilter* MakeRouteConstraintFilter( const RoutingModel& routing_model); @@ -76,7 +156,7 @@ IntVarLocalSearchFilter* MakeVehicleVarFilter( IntVarLocalSearchFilter* MakePathCumulFilter(const RoutingDimension& dimension, bool propagate_own_objective_value, bool filter_objective_cost, - bool can_use_lp); + bool may_use_optimizers); /// Returns a filter handling dimension cumul bounds. IntVarLocalSearchFilter* MakeCumulBoundsPropagatorFilter( @@ -84,7 +164,7 @@ IntVarLocalSearchFilter* MakeCumulBoundsPropagatorFilter( /// Returns a filter checking global linear constraints and costs. IntVarLocalSearchFilter* MakeGlobalLPCumulFilter( - GlobalDimensionCumulOptimizer* optimizer, + GlobalDimensionCumulOptimizer* lp_optimizer, GlobalDimensionCumulOptimizer* mp_optimizer, bool filter_objective_cost); /// Returns a filter checking the feasibility and cost of the resource diff --git a/ortools/routing/ils.cc b/ortools/routing/ils.cc index d9463fa6d9..8af8cbb820 100644 --- a/ortools/routing/ils.cc +++ b/ortools/routing/ils.cc @@ -93,6 +93,11 @@ std::unique_ptr MakeRuinProcedure( return std::make_unique( model, rnd, ruin.random_walk().num_removed_visits(), num_neighbors_for_route_selection); + case RuinStrategy::kSisr: + return std::make_unique( + model, rnd, ruin.sisr().max_removed_sequence_size(), + ruin.sisr().avg_num_removed_visits(), ruin.sisr().bypass_factor(), + num_neighbors_for_route_selection); default: LOG(DFATAL) << "Unsupported ruin procedure."; return nullptr; @@ -895,9 +900,14 @@ int64_t RandomWalkRemovalRuinProcedure::GetNextNodeToRemove( } SISRRuinProcedure::SISRRuinProcedure(RoutingModel* model, std::mt19937* rnd, - int num_neighbors) + int max_removed_sequence_size, + int avg_num_removed_visits, + double bypass_factor, int num_neighbors) : model_(*model), rnd_(*rnd), + max_removed_sequence_size_(max_removed_sequence_size), + avg_num_removed_visits_(avg_num_removed_visits), + bypass_factor_(bypass_factor), neighbors_manager_(model->GetOrCreateNodeNeighborsByCostClass( {num_neighbors, /*add_vehicle_starts_to_neighbors=*/false, @@ -921,23 +931,17 @@ std::function SISRRuinProcedure::Ruin( routing_solution_.Reset(assignment); ruined_routes_.SparseClearAll(); - // TODO(user): add to proto. - const int max_cardinality_removed_sequences = 10; - - // TODO(user): add to proto. - const int avg_num_removed_visits = 10; - const double max_sequence_size = - std::min(max_cardinality_removed_sequences, + std::min(max_removed_sequence_size_, ComputeAverageNonEmptyRouteSize(model_, *assignment)); const double max_num_removed_sequences = - (4 * avg_num_removed_visits) / (1 + max_sequence_size) - 1; + (4 * avg_num_removed_visits_) / (1 + max_sequence_size) - 1; DCHECK_GE(max_num_removed_sequences, 1); const int num_sequences_to_remove = std::floor(std::uniform_real_distribution( - 1.0, max_num_removed_sequences)(rnd_)); + 1.0, max_num_removed_sequences + 1)(rnd_)); // We start by disrupting the route where the seed visit is served. const int seed_route = RuinRoute(*assignment, seed_node, max_sequence_size); @@ -979,7 +983,7 @@ int SISRRuinProcedure::RuinRoute(const Assignment& assignment, routing_solution_.GetRouteSize(route), global_max_sequence_size); int sequence_size = std::floor( - std::uniform_real_distribution(1.0, max_sequence_size)(rnd_)); + std::uniform_real_distribution(1.0, max_sequence_size + 1)(rnd_)); if (sequence_size == 1 || sequence_size == max_sequence_size || boolean_dist_(rnd_)) { @@ -1011,14 +1015,11 @@ void SISRRuinProcedure::RuinRouteWithSequenceProcedure(int64_t seed_visit, void SISRRuinProcedure::RuinRouteWithSplitSequenceProcedure(int64_t route, int64_t seed_visit, int sequence_size) { - // TODO(user): add to proto. - const double alpha = 0.01; - const int max_num_bypassed_visits = routing_solution_.GetRouteSize(route) - sequence_size; int num_bypassed_visits = 1; while (num_bypassed_visits < max_num_bypassed_visits && - probability_dist_(rnd_) >= alpha * probability_dist_(rnd_)) { + probability_dist_(rnd_) >= bypass_factor_ * probability_dist_(rnd_)) { ++num_bypassed_visits; } diff --git a/ortools/routing/ils.h b/ortools/routing/ils.h index 4b9b35568c..cd8dd82472 100644 --- a/ortools/routing/ils.h +++ b/ortools/routing/ils.h @@ -206,9 +206,12 @@ class CompositeRuinProcedure : public RuinProcedure { // combination of user-defined parameters and solution and instance properties. // Every selected route is then disrupted by removing a contiguous sequence of // visits, possibly bypassing a contiguous subsequence. +// See also SISRRuinStrategy in ils.proto. class SISRRuinProcedure : public RuinProcedure { public: - SISRRuinProcedure(RoutingModel* model, std::mt19937* rnd, int num_neighbors); + SISRRuinProcedure(RoutingModel* model, std::mt19937* rnd, + int max_removed_sequence_size, int avg_num_removed_visits, + double bypass_factor, int num_neighbors); std::function Ruin(const Assignment* assignment) override; @@ -226,6 +229,9 @@ class SISRRuinProcedure : public RuinProcedure { const RoutingModel& model_; std::mt19937& rnd_; + int max_removed_sequence_size_; + int avg_num_removed_visits_; + double bypass_factor_; const RoutingModel::NodeNeighborsByCostClass* const neighbors_manager_; std::uniform_int_distribution customer_dist_; std::bernoulli_distribution boolean_dist_; diff --git a/ortools/routing/ils.proto b/ortools/routing/ils.proto index 8df144bedb..77e0328f1b 100644 --- a/ortools/routing/ils.proto +++ b/ortools/routing/ils.proto @@ -50,11 +50,99 @@ message RandomWalkRuinStrategy { optional uint32 num_removed_visits = 7; } +// Ruin strategy based on the "Slack Induction by String Removals for Vehicle +// Routing Problems" by Jan Christiaens and Greet Vanden Berghe, Transportation +// Science 2020. +// Link to paper: +// https://kuleuven.limo.libis.be/discovery/fulldisplay?docid=lirias1988666&context=SearchWebhook&vid=32KUL_KUL:Lirias&lang=en&search_scope=lirias_profile&adaptor=SearchWebhook&tab=LIRIAS&query=any,contains,LIRIAS1988666&offset=0 +// +// Note that, in this implementation, the notion of "string" is replaced by +// "sequence". +// +// The main idea of this ruin is to remove a number of geographically close +// sequences of nodes. In particular, at every ruin application, a maximum +// number max_ruined_routes of routes are disrupted. The value for +// max_ruined_routes is defined as +// (4 * avg_num_removed_visits) / (1 + max_sequence_size) + 1 +// with +// - avg_num_removed_visits: user-defined parameter ruling the average number of +// visits that are removed in face of several ruin applications (see also the +// proto message below) +// - max_sequence_size is defined as +// min{max_removed_sequence_size, average_route_size} +// with +// - max_removed_sequence_size: user-defined parameter that specifies +// the maximum number of visits removed from a single route (see also the +// proto message below) +// - average_route_size: the average size of a non-empty route in the current +// solution +// +// The actual number of ruined routes is then obtained as +// floor(U(1, max_ruined_routes + 1)) +// where U is a continuous uniform distribution of real values in the given +// interval. +// +// The routes affected by the ruin procedure are selected as follows. +// First, a non start/end seed node is randomly selected. The route serving this +// node is the first ruined route. Then, until the required number of routes has +// been ruined, neighbor nodes of the initial seed node are scanned and the +// associated not yet ruined routes are disrupted. Nodes defining the selected +// routes are designated as seed nodes for the "sequence" and "split sequence" +// removal procedures described below. +// +// For every selected route, a maximum number route_max_sequence_size of nodes +// are removed. In particular, route_max_sequence_size is defined as +// min{route_size, max_sequence_size} +// with route_size being the size of the current route. +// +// Then, the actual number of removed nodes num_removed_nodes is defined as +// floor(U(1, route_max_sequence_size + 1)) +// where U is a continuous uniform distribution of real values in the given +// interval. +// +// As mentioned above, the selected num_removed_nodes number of nodes is removed +// either via the "sequence" removal or "split sequence" removal procedures. The +// two removal procedures are executed with equal probabilities. +// +// The "sequence" removal procedure removes a randomly selected sequence of size +// num_removed_nodes that includes the seed node. +// +// The "split sequence" removal procedure also removes a randomly selected +// sequence of size num_removed_nodes that includes the seed node, but it can +// possibly preserve a subsequence of contiguous nodes. +// In particular, the procedure first selects a sequence of size +// num_removed_nodes + num_bypassed, then num_bypassed contiguous nodes in the +// selected sequence are preserved while the others removed. +// +// The definition of num_bypassed is as follows. First num_bypassed = 1. The +// current value of num_bypassed is maintaned if +// U(0, 1) < bypass_factor * U(0, 1) +// or the maximum value for num_bypassed, equal to +// route_size - num_removed_nodes +// is reached. The value is incremented of a unit otherwise, +// and the process is repeated. The value assigned to bypass_factor affects the +// number of preserved visits (see also the proto message below). +message SISRRuinStrategy { + // Maximum number of removed visits per sequence. The parameter name in the + // paper is L^{max} and the suggested value is 10. + optional uint32 max_removed_sequence_size = 1; + + // Number of visits that are removed on average. The parameter name in the + // paper is \bar{c} and the suggested value is 10. + optional uint32 avg_num_removed_visits = 2; + + // Value in [0, 1] ruling the number of preserved customers in the split + // sequence removal. The parameter name in the paper is \alpha and the + // suggested value is 0.01. + optional double bypass_factor = 3; +} + // Ruin strategies, used in perturbation based on ruin and recreate approaches. message RuinStrategy { oneof strategy { SpatiallyCloseRoutesRuinStrategy spatially_close_routes = 1; RandomWalkRuinStrategy random_walk = 2; + SISRRuinStrategy sisr = 3; } } diff --git a/ortools/routing/lp_scheduling.cc b/ortools/routing/lp_scheduling.cc index 6f37bfebb0..4a0482dc53 100644 --- a/ortools/routing/lp_scheduling.cc +++ b/ortools/routing/lp_scheduling.cc @@ -17,11 +17,8 @@ #include #include #include -#include #include -#include #include -#include #include #include #include @@ -29,8 +26,6 @@ #include #include "absl/algorithm/container.h" -#include "absl/container/flat_hash_map.h" -#include "absl/container/flat_hash_set.h" #include "absl/log/check.h" #include "absl/strings/str_format.h" #include "absl/strings/string_view.h" @@ -40,7 +35,6 @@ #include "ortools/base/logging.h" #include "ortools/base/map_util.h" #include "ortools/base/mathutil.h" -#include "ortools/base/strong_vector.h" #include "ortools/base/types.h" #include "ortools/constraint_solver/constraint_solver.h" #include "ortools/glop/parameters.pb.h" @@ -52,6 +46,7 @@ #include "ortools/sat/cp_model.pb.h" #include "ortools/sat/lp_utils.h" #include "ortools/util/flat_matrix.h" +#include "ortools/util/piecewise_linear_function.h" #include "ortools/util/saturated_arithmetic.h" #include "ortools/util/sorted_interval_list.h" @@ -219,7 +214,7 @@ DimensionSchedulingStatus LocalDimensionCumulOptimizer::ComputeRouteCumulCost( const DimensionSchedulingStatus status = optimizer_core_.OptimizeSingleRouteWithResource( vehicle, next_accessor, - /*dimension_travel_info=*/{}, + /*dimension_travel_info=*/nullptr, /*resource=*/nullptr, /*optimize_vehicle_costs=*/optimal_cost != nullptr, solver_[vehicle].get(), /*cumul_values=*/nullptr, @@ -238,7 +233,7 @@ LocalDimensionCumulOptimizer::ComputeRouteCumulCostWithoutFixedTransits( const RoutingModel::ResourceGroup::Resource* resource, int64_t* optimal_cost_without_transits) { return optimizer_core_.OptimizeSingleRouteWithResource( - vehicle, next_accessor, /*dimension_travel_info=*/{}, resource, + vehicle, next_accessor, /*dimension_travel_info=*/nullptr, resource, /*optimize_vehicle_costs=*/optimal_cost_without_transits != nullptr, solver_[vehicle].get(), /*cumul_values=*/nullptr, /*break_values=*/nullptr, optimal_cost_without_transits, nullptr); @@ -254,14 +249,14 @@ std::vector LocalDimensionCumulOptimizer:: std::vector>* optimal_cumuls, std::vector>* optimal_breaks) { return optimizer_core_.OptimizeSingleRouteWithResources( - vehicle, next_accessor, transit_accessor, {}, resources, resource_indices, - optimize_vehicle_costs, solver_[vehicle].get(), optimal_cumuls, - optimal_breaks, optimal_costs_without_transits, nullptr); + vehicle, next_accessor, transit_accessor, nullptr, resources, + resource_indices, optimize_vehicle_costs, solver_[vehicle].get(), + optimal_cumuls, optimal_breaks, optimal_costs_without_transits, nullptr); } DimensionSchedulingStatus LocalDimensionCumulOptimizer::ComputeRouteCumuls( int vehicle, const std::function& next_accessor, - const RoutingModel::RouteDimensionTravelInfo& dimension_travel_info, + const RoutingModel::RouteDimensionTravelInfo* dimension_travel_info, const RoutingModel::ResourceGroup::Resource* resource, std::vector* optimal_cumuls, std::vector* optimal_breaks) { @@ -275,7 +270,7 @@ DimensionSchedulingStatus LocalDimensionCumulOptimizer::ComputeRouteCumuls( DimensionSchedulingStatus LocalDimensionCumulOptimizer::ComputeRouteCumulsAndCostWithoutFixedTransits( int vehicle, const std::function& next_accessor, - const RoutingModel::RouteDimensionTravelInfo& dimension_travel_info, + const RoutingModel::RouteDimensionTravelInfo* dimension_travel_info, std::vector* optimal_cumuls, std::vector* optimal_breaks, int64_t* optimal_cost_without_transits) { return optimizer_core_.OptimizeSingleRouteWithResource( @@ -287,7 +282,7 @@ LocalDimensionCumulOptimizer::ComputeRouteCumulsAndCostWithoutFixedTransits( DimensionSchedulingStatus LocalDimensionCumulOptimizer::ComputeRouteSolutionCostWithoutFixedTransits( int vehicle, const std::function& next_accessor, - const RoutingModel::RouteDimensionTravelInfo& dimension_travel_info, + const RoutingModel::RouteDimensionTravelInfo* dimension_travel_info, absl::Span solution_cumul_values, absl::Span solution_break_values, int64_t* solution_cost, int64_t* cost_offset, bool reuse_previous_model_if_possible, bool clear_lp, @@ -303,7 +298,7 @@ LocalDimensionCumulOptimizer::ComputeRouteSolutionCostWithoutFixedTransits( DimensionSchedulingStatus LocalDimensionCumulOptimizer::ComputePackedRouteCumuls( int vehicle, const std::function& next_accessor, - const RoutingModel::RouteDimensionTravelInfo& dimension_travel_info, + const RoutingModel::RouteDimensionTravelInfo* dimension_travel_info, const RoutingModel::ResourceGroup::Resource* resource, std::vector* packed_cumuls, std::vector* packed_breaks) { return optimizer_core_.OptimizeAndPackSingleRoute( @@ -580,7 +575,7 @@ DimensionCumulOptimizerCore::DimensionCumulOptimizerCore( DimensionSchedulingStatus DimensionCumulOptimizerCore::ComputeSingleRouteSolutionCostWithoutFixedTransits( int vehicle, const std::function& next_accessor, - const RouteDimensionTravelInfo& dimension_travel_info, + const RouteDimensionTravelInfo* dimension_travel_info, RoutingLinearSolverWrapper* solver, absl::Span solution_cumul_values, absl::Span solution_break_values, @@ -691,7 +686,7 @@ void ClearIfNonNull(std::vector* v) { DimensionSchedulingStatus DimensionCumulOptimizerCore::OptimizeSingleRouteWithResource( int vehicle, const std::function& next_accessor, - const RouteDimensionTravelInfo& dimension_travel_info, + const RouteDimensionTravelInfo* dimension_travel_info, const RoutingModel::ResourceGroup::Resource* resource, bool optimize_vehicle_costs, RoutingLinearSolverWrapper* solver, std::vector* cumul_values, std::vector* break_values, @@ -830,7 +825,7 @@ std::vector DimensionCumulOptimizerCore::OptimizeSingleRouteWithResources( int vehicle, const std::function& next_accessor, const std::function& transit_accessor, - const RouteDimensionTravelInfo& dimension_travel_info, + const RouteDimensionTravelInfo* dimension_travel_info, absl::Span resources, absl::Span resource_indices, bool optimize_vehicle_costs, RoutingLinearSolverWrapper* solver, @@ -969,10 +964,10 @@ DimensionSchedulingStatus DimensionCumulOptimizerCore::Optimize( !model->IsEnd(next_accessor(model->Start(vehicle))) || model->IsVehicleUsedWhenEmpty(vehicle); const bool optimize_vehicle_costs = optimize_costs && vehicle_is_used; - const RouteDimensionTravelInfo& dimension_travel_info = + const RouteDimensionTravelInfo* const dimension_travel_info = dimension_travel_info_per_route.empty() - ? RouteDimensionTravelInfo() - : dimension_travel_info_per_route[vehicle]; + ? nullptr + : &dimension_travel_info_per_route[vehicle]; if (!SetRouteCumulConstraints( vehicle, next_accessor, dimension_->transit_evaluator(vehicle), dimension_travel_info, cumul_offset, optimize_vehicle_costs, solver, @@ -1078,7 +1073,7 @@ DimensionSchedulingStatus DimensionCumulOptimizerCore::OptimizeAndPack( DimensionSchedulingStatus DimensionCumulOptimizerCore::OptimizeAndPackSingleRoute( int vehicle, const std::function& next_accessor, - const RouteDimensionTravelInfo& dimension_travel_info, + const RouteDimensionTravelInfo* dimension_travel_info, const RoutingModel::ResourceGroup::Resource* resource, RoutingLinearSolverWrapper* solver, std::vector* cumul_values, std::vector* break_values) { @@ -1271,6 +1266,24 @@ bool DimensionCumulOptimizerCore::TightenRouteCumulBounds( return true; } +std::vector PiecewiseLinearFunctionToSlopeAndYIntercept( + const FloatSlopePiecewiseLinearFunction& pwl_function, int index_start, + int index_end) { + const auto& x_anchors = pwl_function.x_anchors(); + const auto& y_anchors = pwl_function.y_anchors(); + if (index_end < 0) index_end = x_anchors.size() - 1; + const int num_segments = index_end - index_start; + DCHECK_GE(num_segments, 1); + std::vector slope_and_y_intercept(num_segments); + for (int seg = index_start; seg < index_end; ++seg) { + auto& [slope, y_intercept] = slope_and_y_intercept[seg - index_start]; + slope = (y_anchors[seg + 1] - y_anchors[seg]) / + static_cast(x_anchors[seg + 1] - x_anchors[seg]); + y_intercept = y_anchors[seg] - slope * x_anchors[seg]; + } + return slope_and_y_intercept; +} + std::vector SlopeAndYInterceptToConvexityRegions( absl::Span slope_and_y_intercept) { CHECK(!slope_and_y_intercept.empty()); @@ -1286,27 +1299,7 @@ std::vector SlopeAndYInterceptToConvexityRegions( return convex; } -std::vector PiecewiseLinearFormulationToSlopeAndYIntercept( - const RoutingModel::RouteDimensionTravelInfo::TransitionInfo:: - PiecewiseLinearFormulation& pwl_function, - int index_start, int index_end) { - if (index_end < 0) index_end = pwl_function.x_anchors.size() - 1; - const int num_segments = index_end - index_start; - DCHECK_GE(num_segments, 1); - std::vector slope_and_y_intercept(num_segments); - for (int seg = index_start; seg < index_end; ++seg) { - auto& [slope, y_intercept] = slope_and_y_intercept[seg - index_start]; - slope = (pwl_function.y_anchors[seg + 1] - pwl_function.y_anchors[seg]) / - static_cast(pwl_function.x_anchors[seg + 1] - - pwl_function.x_anchors[seg]); - y_intercept = - pwl_function.y_anchors[seg] - slope * pwl_function.x_anchors[seg]; - } - return slope_and_y_intercept; -} - namespace { - // Find a "good" scaling factor for constraints with non-integers coefficients. // See sat::FindBestScalingAndComputeErrors() for more infos. double FindBestScaling(const std::vector& coefficients, @@ -1321,88 +1314,17 @@ double FindBestScaling(const std::vector& coefficients, wanted_absolute_activity_precision, &unused_relative_coeff_error, &unused_scaled_sum_error); } - -// Returns the value of pwl(x) with pwl a PiecewiseLinearFormulation, knowing -// that x ∈ [pwl.x[upper_segment_index-1], pwl.x[upper_segment_index]]. -int64_t PieceWiseLinearFormulationValueKnownSegment( - const RoutingModel::RouteDimensionTravelInfo::TransitionInfo:: - PiecewiseLinearFormulation& pwl, - int64_t x, int upper_segment_index, double delta = 0) { - DCHECK_GE(upper_segment_index, 1); - DCHECK_LE(upper_segment_index, pwl.x_anchors.size() - 1); - const double alpha = - static_cast(pwl.y_anchors[upper_segment_index] - - pwl.y_anchors[upper_segment_index - 1]) / - (pwl.x_anchors[upper_segment_index] - - pwl.x_anchors[upper_segment_index - 1]); - const double beta = pwl.y_anchors[upper_segment_index] - - pwl.x_anchors[upper_segment_index] * alpha; - return std::ceil(alpha * x + beta + delta); -} - } // namespace -PiecewiseEvaluationStatus ComputePiecewiseLinearFormulationValue( - const RoutingModel::RouteDimensionTravelInfo::TransitionInfo:: - PiecewiseLinearFormulation& pwl, - int64_t x, int64_t* value, double delta) { - // Search for first element xi such that xi < x. - const auto upper_segment = - std::upper_bound(pwl.x_anchors.begin(), pwl.x_anchors.end(), x); - const int upper_segment_index = - std::distance(pwl.x_anchors.begin(), upper_segment); - - // Checking bounds - if (upper_segment_index == 0) { - return PiecewiseEvaluationStatus::SMALLER_THAN_LOWER_BOUND; - } else if (upper_segment == pwl.x_anchors.end()) { - if (x == pwl.x_anchors.back()) { - *value = std::ceil(pwl.y_anchors.back() + delta); - return PiecewiseEvaluationStatus::WITHIN_BOUNDS; - } - return PiecewiseEvaluationStatus::LARGER_THAN_UPPER_BOUND; - } - - *value = PieceWiseLinearFormulationValueKnownSegment( - pwl, x, upper_segment_index, delta); - return PiecewiseEvaluationStatus::WITHIN_BOUNDS; -} - -int64_t ComputeConvexPiecewiseLinearFormulationValue( - const RoutingModel::RouteDimensionTravelInfo::TransitionInfo:: - PiecewiseLinearFormulation& pwl, - int64_t x, double delta) { - int64_t y_value; - const PiecewiseEvaluationStatus status = - ComputePiecewiseLinearFormulationValue(pwl, x, &y_value, delta); - switch (status) { - case PiecewiseEvaluationStatus::UNSPECIFIED: - // The status should be specified. - LOG(FATAL) << "Unspecified PiecewiseEvaluationStatus."; - break; - case PiecewiseEvaluationStatus::WITHIN_BOUNDS: - // x is in the bounds, therefore, simply return the computed value. - return y_value; - case PiecewiseEvaluationStatus::SMALLER_THAN_LOWER_BOUND: - // In the convex case, if x <= lower_bound, the most restrictive - // constraint will be the first one. - return PieceWiseLinearFormulationValueKnownSegment(pwl, x, 1, delta); - case PiecewiseEvaluationStatus::LARGER_THAN_UPPER_BOUND: - // In the convex case, if x >= upper_bound, the most restrictive - // constraint will be the last one. - return PieceWiseLinearFormulationValueKnownSegment( - pwl, x, pwl.x_anchors.size() - 1, delta); - } -} - bool DimensionCumulOptimizerCore::SetRouteTravelConstraints( - const RouteDimensionTravelInfo& dimension_travel_info, + const RouteDimensionTravelInfo* dimension_travel_info, absl::Span lp_slacks, absl::Span fixed_transit, RoutingLinearSolverWrapper* solver) { const std::vector& lp_cumuls = current_route_cumul_variables_; const int path_size = lp_cumuls.size(); - if (dimension_travel_info.transition_info.empty()) { + if (dimension_travel_info == nullptr || + dimension_travel_info->transition_info.empty()) { // Travel is not travel-start dependent. // Add all path constraints to LP: // cumul[i] + fixed_transit[i] + slack[i] == cumul[i+1] @@ -1438,14 +1360,10 @@ bool DimensionCumulOptimizerCore::SetRouteTravelConstraints( absl::StrFormat("relative_compression_cost(%ld)", pos)); const RoutingModel::RouteDimensionTravelInfo::TransitionInfo& - transition_info = dimension_travel_info.transition_info[pos]; - const RouteDimensionTravelInfo::TransitionInfo::PiecewiseLinearFormulation& - travel_function = transition_info.travel_start_dependent_travel; - const int num_pwl_anchors = travel_function.x_anchors.size(); - DCHECK_GE(num_pwl_anchors, 2) - << "Travel value PWL must have at least 2 points"; - DCHECK_EQ(num_pwl_anchors, travel_function.y_anchors.size()) - << "Travel value PWL must have as many x anchors than y."; + transition_info = dimension_travel_info->transition_info[pos]; + const FloatSlopePiecewiseLinearFunction& travel_function = + transition_info.travel_start_dependent_travel; + const auto& travel_x_anchors = travel_function.x_anchors(); // 1. Create the travel value variable and set its constraints. // 1.a. Create Variables for the start and value of a travel @@ -1455,7 +1373,7 @@ bool DimensionCumulOptimizerCore::SetRouteTravelConstraints( const int64_t compressed_travel_value_lower_bound = transition_info.compressed_travel_value_lower_bound; const int64_t travel_value_upper_bound = - dimension_travel_info.transition_info[pos].travel_value_upper_bound; + transition_info.travel_value_upper_bound; // The lower bound of travel_value is already implemented by constraints as // travel_value >= compressed_travel_value (defined below) and // compressed_travel_value has compressed_travel_value_lower_bound as a @@ -1479,15 +1397,16 @@ bool DimensionCumulOptimizerCore::SetRouteTravelConstraints( // Find segments that are in bounds // Only the segments in [index_anchor_start, index_anchor_end[ are in // bounds, the others can therefore be discarded. + const int num_pwl_anchors = travel_x_anchors.size(); int index_anchor_start = 0; while (index_anchor_start < num_pwl_anchors - 1 && - travel_function.x_anchors[index_anchor_start + 1] <= + travel_x_anchors[index_anchor_start + 1] <= current_route_min_cumuls_[pos] + pre_travel_transit) { ++index_anchor_start; } int index_anchor_end = num_pwl_anchors - 1; while (index_anchor_end > 0 && - travel_function.x_anchors[index_anchor_end - 1] >= + travel_x_anchors[index_anchor_end - 1] >= current_route_max_cumuls_[pos] + pre_travel_transit) { --index_anchor_end; } @@ -1497,7 +1416,7 @@ bool DimensionCumulOptimizerCore::SetRouteTravelConstraints( // Precompute the slopes and y-intercept as they will be used to detect // convexities and in the constraints. const std::vector slope_and_y_intercept = - PiecewiseLinearFormulationToSlopeAndYIntercept( + PiecewiseLinearFunctionToSlopeAndYIntercept( travel_function, index_anchor_start, index_anchor_end); // Optimize binary variables by detecting convexities @@ -1531,7 +1450,7 @@ bool DimensionCumulOptimizerCore::SetRouteTravelConstraints( // If the travel_start value is outside the PWL, the closest segment // will be used. This is why some bounds are infinite. const int64_t lower_bound_interval = - seg > 0 ? travel_function.x_anchors[index_anchor_start + seg] + seg > 0 ? travel_x_anchors[index_anchor_start + seg] : current_route_min_cumuls_[pos] + pre_travel_transit; int64_t end_of_seg = seg + 1; while (end_of_seg < num_pwl_anchors - 1 && !convexities[end_of_seg]) { @@ -1539,7 +1458,7 @@ bool DimensionCumulOptimizerCore::SetRouteTravelConstraints( } const int64_t higher_bound_interval = end_of_seg < num_pwl_anchors - 1 - ? travel_function.x_anchors[index_anchor_start + end_of_seg] + ? travel_x_anchors[index_anchor_start + end_of_seg] : current_route_max_cumuls_[pos] + pre_travel_transit; const int travel_start_in_segment_ct = solver->AddLinearConstraint( lower_bound_interval, higher_bound_interval, {{travel_start, 1}}); @@ -1640,25 +1559,25 @@ bool DimensionCumulOptimizerCore::SetRouteTravelConstraints( // compressed_travel_value to not give the incentive to compress a little // bit in order to same some cost per travel. solver->SetObjectiveCoefficient( - travel_value, dimension_travel_info.travel_cost_coefficient); + travel_value, dimension_travel_info->travel_cost_coefficient); // 4. Adds a convex cost in epsilon // Here we DCHECK that the cost function is indeed convex - const RouteDimensionTravelInfo::TransitionInfo::PiecewiseLinearFormulation& - cost_function = - dimension_travel_info.transition_info[pos].travel_compression_cost; + const FloatSlopePiecewiseLinearFunction& cost_function = + transition_info.travel_compression_cost; + const auto& cost_x_anchors = cost_function.x_anchors(); + const std::vector cost_slope_and_y_intercept = - PiecewiseLinearFormulationToSlopeAndYIntercept(cost_function); - const double cost_max = ComputeConvexPiecewiseLinearFormulationValue( - cost_function, + PiecewiseLinearFunctionToSlopeAndYIntercept(cost_function); + const double cost_max = cost_function.ComputeConvexValue( travel_value_upper_bound - compressed_travel_value_lower_bound); double previous_slope = 0; - for (int seg = 0; seg < cost_function.x_anchors.size() - 1; ++seg) { + for (int seg = 0; seg < cost_x_anchors.size() - 1; ++seg) { const auto [slope, y_intercept] = cost_slope_and_y_intercept[seg]; // Check convexity DCHECK_GE(slope, previous_slope) << "Compression error is not convex. Segment " << (1 + seg) - << " out of " << (cost_function.x_anchors.size() - 1); + << " out of " << (cost_x_anchors.size() - 1); previous_slope = slope; const double factor = FindBestScaling( {1.0, -slope, y_intercept}, /*lower_bounds=*/ @@ -1723,7 +1642,7 @@ bool RouteIsValid(const RoutingModel& model, int vehicle, bool DimensionCumulOptimizerCore::SetRouteCumulConstraints( int vehicle, const std::function& next_accessor, const std::function& transit_accessor, - const RouteDimensionTravelInfo& dimension_travel_info, int64_t cumul_offset, + const RouteDimensionTravelInfo* dimension_travel_info, int64_t cumul_offset, bool optimize_costs, RoutingLinearSolverWrapper* solver, int64_t* route_transit_cost, int64_t* route_cost_offset) { RoutingModel* const model = dimension_->model(); @@ -1753,7 +1672,8 @@ bool DimensionCumulOptimizerCore::SetRouteCumulConstraints( if (!ExtractRouteCumulBounds(path, cumul_offset)) { return false; } - if (dimension_travel_info.transition_info.empty()) { + if (dimension_travel_info == nullptr || + dimension_travel_info->transition_info.empty()) { if (!TightenRouteCumulBounds(path, fixed_transit, cumul_offset)) { return false; } @@ -1762,7 +1682,7 @@ bool DimensionCumulOptimizerCore::SetRouteCumulConstraints( std::vector min_transit(path_size - 1); for (int pos = 0; pos < path_size - 1; ++pos) { const RouteDimensionTravelInfo::TransitionInfo& transition = - dimension_travel_info.transition_info[pos]; + dimension_travel_info->transition_info[pos]; min_transit[pos] = transition.pre_travel_transit_value + transition.compressed_travel_value_lower_bound + transition.post_travel_transit_value; diff --git a/ortools/routing/lp_scheduling.h b/ortools/routing/lp_scheduling.h index d2706ff995..7176298e08 100644 --- a/ortools/routing/lp_scheduling.h +++ b/ortools/routing/lp_scheduling.h @@ -45,6 +45,7 @@ #include "ortools/sat/cp_model_solver.h" #include "ortools/sat/model.h" #include "ortools/sat/sat_parameters.pb.h" +#include "ortools/util/piecewise_linear_function.h" #include "ortools/util/sorted_interval_list.h" namespace operations_research::routing { @@ -648,7 +649,7 @@ class DimensionCumulOptimizerCore { // resource. If the resource is null, it is simply ignored. DimensionSchedulingStatus OptimizeSingleRouteWithResource( int vehicle, const std::function& next_accessor, - const RouteDimensionTravelInfo& dimension_travel_info, + const RouteDimensionTravelInfo* dimension_travel_info, const Resource* resource, bool optimize_vehicle_costs, RoutingLinearSolverWrapper* solver, std::vector* cumul_values, std::vector* break_values, int64_t* cost_without_transit, @@ -659,7 +660,7 @@ class DimensionCumulOptimizerCore { // constraints for cumuls and breaks. DimensionSchedulingStatus ComputeSingleRouteSolutionCostWithoutFixedTransits( int vehicle, const std::function& next_accessor, - const RouteDimensionTravelInfo& dimension_travel_info, + const RouteDimensionTravelInfo* dimension_travel_info, RoutingLinearSolverWrapper* solver, absl::Span solution_cumul_values, absl::Span solution_break_values, @@ -675,7 +676,7 @@ class DimensionCumulOptimizerCore { std::vector OptimizeSingleRouteWithResources( int vehicle, const std::function& next_accessor, const std::function& transit_accessor, - const RouteDimensionTravelInfo& dimension_travel_info, + const RouteDimensionTravelInfo* dimension_travel_info, absl::Span resources, absl::Span resource_indices, bool optimize_vehicle_costs, RoutingLinearSolverWrapper* solver, @@ -708,7 +709,7 @@ class DimensionCumulOptimizerCore { DimensionSchedulingStatus OptimizeAndPackSingleRoute( int vehicle, const std::function& next_accessor, - const RouteDimensionTravelInfo& dimension_travel_info, + const RouteDimensionTravelInfo* dimension_travel_info, const Resource* resource, RoutingLinearSolverWrapper* solver, std::vector* cumul_values, std::vector* break_values); @@ -738,7 +739,7 @@ class DimensionCumulOptimizerCore { bool SetRouteCumulConstraints( int vehicle, const std::function& next_accessor, const std::function& transit_accessor, - const RouteDimensionTravelInfo& dimension_travel_info, + const RouteDimensionTravelInfo* dimension_travel_info, int64_t cumul_offset, bool optimize_costs, RoutingLinearSolverWrapper* solver, int64_t* route_transit_cost, int64_t* route_cost_offset); @@ -747,7 +748,7 @@ class DimensionCumulOptimizerCore { // static or time-dependent travel values. // Returns false if some infeasibility was detected, true otherwise. bool SetRouteTravelConstraints( - const RouteDimensionTravelInfo& dimension_travel_info, + const RouteDimensionTravelInfo* dimension_travel_info, absl::Span lp_slacks, absl::Span fixed_transit, RoutingLinearSolverWrapper* solver); @@ -865,7 +866,7 @@ class LocalDimensionCumulOptimizer { // Returns false if the route is not feasible. DimensionSchedulingStatus ComputeRouteCumuls( int vehicle, const std::function& next_accessor, - const RoutingModel::RouteDimensionTravelInfo& dimension_travel_info, + const RoutingModel::RouteDimensionTravelInfo* dimension_travel_info, const RoutingModel::ResourceGroup::Resource* resource, std::vector* optimal_cumuls, std::vector* optimal_breaks); @@ -874,7 +875,7 @@ class LocalDimensionCumulOptimizer { // ComputeRouteCumuls(). DimensionSchedulingStatus ComputeRouteCumulsAndCostWithoutFixedTransits( int vehicle, const std::function& next_accessor, - const RoutingModel::RouteDimensionTravelInfo& dimension_travel_info, + const RoutingModel::RouteDimensionTravelInfo* dimension_travel_info, std::vector* optimal_cumuls, std::vector* optimal_breaks, int64_t* optimal_cost_without_transits); @@ -883,7 +884,7 @@ class LocalDimensionCumulOptimizer { // defined by its cumuls and breaks. DimensionSchedulingStatus ComputeRouteSolutionCostWithoutFixedTransits( int vehicle, const std::function& next_accessor, - const RoutingModel::RouteDimensionTravelInfo& dimension_travel_info, + const RoutingModel::RouteDimensionTravelInfo* dimension_travel_info, absl::Span solution_cumul_values, absl::Span solution_break_values, int64_t* solution_cost, int64_t* cost_offset = nullptr, @@ -897,7 +898,7 @@ class LocalDimensionCumulOptimizer { // time window. DimensionSchedulingStatus ComputePackedRouteCumuls( int vehicle, const std::function& next_accessor, - const RoutingModel::RouteDimensionTravelInfo& dimension_travel_info, + const RoutingModel::RouteDimensionTravelInfo* dimension_travel_info, const RoutingModel::ResourceGroup::Resource* resource, std::vector* packed_cumuls, std::vector* packed_breaks); @@ -1009,35 +1010,8 @@ bool ComputeVehicleToResourceClassAssignmentCosts( std::vector>* cumul_values, std::vector>* break_values); -// Simple struct returned by ComputePiecewiseLinearFormulationValue() to -// indicate if the value could be computed and if not, on what side the value -// was from the definition interval. -enum class PiecewiseEvaluationStatus { - UNSPECIFIED = 0, - WITHIN_BOUNDS, - SMALLER_THAN_LOWER_BOUND, - LARGER_THAN_UPPER_BOUND -}; - -// Computes pwl(x) for pwl a PieceWiseLinearFormulation. -// Returns a PieceWiseEvaluationStatus to indicate if the value could be -// computed (filled in value) and if not, why. -PiecewiseEvaluationStatus ComputePiecewiseLinearFormulationValue( - const RoutingModel::RouteDimensionTravelInfo::TransitionInfo:: - PiecewiseLinearFormulation& pwl, - int64_t x, int64_t* value, double delta = 0); - -// Like ComputePiecewiseLinearFormulationValue(), computes pwl(x) for pwl a -// PiecewiseLinearFormulation. For convex PiecewiseLinearFormulations, if x is -// outside the bounds of the function, instead of returning an error like in -// PiecewiseLinearFormulation, the function will still be defined by its outer -// segments. -int64_t ComputeConvexPiecewiseLinearFormulationValue( - const RoutingModel::RouteDimensionTravelInfo::TransitionInfo:: - PiecewiseLinearFormulation& pwl, - int64_t x, double delta = 0); - -// Structure to store the slope and y_intercept of a segment. +// Structure to store the slope and y_intercept of a segment of a piecewise +// linear function. struct SlopeAndYIntercept { double slope; double y_intercept; @@ -1048,6 +1022,16 @@ struct SlopeAndYIntercept { } }; +// Given a FloatSlopePiecewiseLinearFunction, returns a vector of slope and +// y-intercept corresponding to each segment. Only the segments in +// [index_start, index_end[ will be considered. +// TODO(user): Consider making the following two functions methods of +// FloatSlopePiecewiseLinearFunction. They're only called in lp_scheduling.cc +// and ../tour_optimization/model_test.cc, but they might come in handy. +std::vector PiecewiseLinearFunctionToSlopeAndYIntercept( + const FloatSlopePiecewiseLinearFunction& pwl_function, int index_start = 0, + int index_end = -1); + // Converts a vector of SlopeAndYIntercept to a vector of convexity regions. // Convexity regions are defined such that, all segment in a convexity region // form a convex function. The boolean in the vector is set to true if the @@ -1057,14 +1041,6 @@ struct SlopeAndYIntercept { std::vector SlopeAndYInterceptToConvexityRegions( absl::Span slope_and_y_intercept); -// Given a PiecewiseLinearFormulation, returns a vector of slope and y-intercept -// corresponding to each segment. Only the segments in [index_start, index_end[ -// will be considered. -std::vector PiecewiseLinearFormulationToSlopeAndYIntercept( - const RoutingModel::RouteDimensionTravelInfo::TransitionInfo:: - PiecewiseLinearFormulation& pwl_function, - int index_start = 0, int index_end = -1); - } // namespace operations_research::routing #endif // OR_TOOLS_ROUTING_LP_SCHEDULING_H_ diff --git a/ortools/routing/parameters.cc b/ortools/routing/parameters.cc index a991cb07a9..57b93f3651 100644 --- a/ortools/routing/parameters.cc +++ b/ortools/routing/parameters.cc @@ -122,7 +122,7 @@ RoutingSearchParameters CreateDefaultRoutingSearchParameters() { o->set_use_make_inactive(BOOL_TRUE); o->set_use_make_chain_inactive(BOOL_TRUE); o->set_use_swap_active(BOOL_TRUE); - o->set_use_swap_active_chain(BOOL_FALSE); + o->set_use_swap_active_chain(BOOL_TRUE); o->set_use_extended_swap_active(BOOL_FALSE); o->set_use_shortest_path_swap_active(BOOL_TRUE); o->set_use_node_pair_swap_active(BOOL_FALSE); @@ -143,6 +143,7 @@ RoutingSearchParameters CreateDefaultRoutingSearchParameters() { p.set_use_multi_armed_bandit_concatenate_operators(false); p.set_multi_armed_bandit_compound_operator_memory_coefficient(0.04); p.set_multi_armed_bandit_compound_operator_exploration_coefficient(1e12); + p.set_max_swap_active_chain_size(10); p.set_relocate_expensive_chain_num_arcs_to_consider(4); p.set_heuristic_expensive_chain_lns_num_arcs_to_consider(4); p.set_heuristic_close_nodes_lns_num_nodes(5); @@ -317,6 +318,28 @@ void FindErrorsInIteratedLocalSearchParameters( "ruin_strategy is set to RandomWalkRuinStrategy" " but random_walk.num_removed_visits is 0 (should be " "strictly positive)")); + } else if (ruin.strategy_case() == RuinStrategy::kSisr) { + if (ruin.sisr().avg_num_removed_visits() == 0) { + errors.emplace_back( + "iterated_local_search_parameters.ruin_recreate_parameters." + "ruin is set to SISRRuinStrategy" + " but sisr.avg_num_removed_visits is 0 (should be strictly " + "positive)"); + } + if (ruin.sisr().max_removed_sequence_size() == 0) { + errors.emplace_back( + "iterated_local_search_parameters.ruin_recreate_parameters.ruin " + "is set to SISRRuinStrategy but " + "sisr.max_removed_sequence_size is 0 (should be strictly " + "positive)"); + } + if (ruin.sisr().bypass_factor() < 0 || + ruin.sisr().bypass_factor() > 1) { + errors.emplace_back(StrCat( + "iterated_local_search_parameters.ruin_recreate_parameters." + "ruin is set to SISRRuinStrategy" + " but sisr.bypass_factor is not in [0, 1]")); + } } } @@ -687,6 +710,14 @@ std::vector FindErrorsInRoutingSearchParameters( " search"); } + if (search_parameters.max_swap_active_chain_size() < 1 && + search_parameters.local_search_operators().use_swap_active_chain() == + OptionalBoolean::BOOL_TRUE) { + errors.emplace_back( + "max_swap_active_chain_size must be greater than 1 if " + "local_search_operators.use_swap_active_chain is BOOL_TRUE"); + } + FindErrorsInIteratedLocalSearchParameters(search_parameters, errors); return errors; diff --git a/ortools/routing/parameters.proto b/ortools/routing/parameters.proto index 0863ccac4f..96a4fc9b17 100644 --- a/ortools/routing/parameters.proto +++ b/ortools/routing/parameters.proto @@ -36,7 +36,7 @@ package operations_research.routing; // then the routing library will pick its preferred value for that parameter // automatically: this should be the case for most parameters. // To see those "default" parameters, call GetDefaultRoutingSearchParameters(). -// Next ID: 66 +// Next ID: 67 message RoutingSearchParameters { reserved 19; @@ -459,6 +459,9 @@ message RoutingSearchParameters { // unsuccessful in the past operators double multi_armed_bandit_compound_operator_exploration_coefficient = 43; + // Maximum size of the chain to make inactive in SwapActiveChainOperator. + int32 max_swap_active_chain_size = 66; + // Number of expensive arcs to consider cutting in the RelocateExpensiveChain // neighborhood operator (see // LocalSearchNeighborhoodOperators.use_relocate_expensive_chain()). diff --git a/ortools/routing/routing.cc b/ortools/routing/routing.cc index fd4098192a..bbe5d39088 100644 --- a/ortools/routing/routing.cc +++ b/ortools/routing/routing.cc @@ -50,7 +50,6 @@ #include "absl/time/time.h" #include "absl/types/span.h" #include "google/protobuf/util/message_differencer.h" -#include "ortools/base/dump_vars.h" #include "ortools/base/int_type.h" #include "ortools/base/logging.h" #include "ortools/base/map_util.h" @@ -138,16 +137,6 @@ std::string RoutingModel::RouteDimensionTravelInfo::TransitionInfo::DebugString( line_prefix, travel_compression_cost.DebugString(line_prefix + "\t")); } -std::string RoutingModel::RouteDimensionTravelInfo::TransitionInfo:: - PiecewiseLinearFormulation::DebugString(std::string line_prefix) const { - if (x_anchors.size() <= 10) { - return "{ " + DUMP_VARS(x_anchors, y_anchors).str() + "}"; - } - return absl::StrFormat("{\n%s%s\n%s%s\n}", line_prefix, - DUMP_VARS(x_anchors).str(), line_prefix, - DUMP_VARS(y_anchors).str()); -} - const Assignment* RoutingModel::PackCumulsOfOptimizerDimensionsFromAssignment( const Assignment* original_assignment, absl::Duration duration_limit, bool* time_limit_was_reached) { @@ -4707,7 +4696,8 @@ void RoutingModel::CreateNeighborhoodOperators( CreateCPOperator(); local_search_operators_[SWAP_ACTIVE] = CreateCPOperator(); local_search_operators_[SWAP_ACTIVE_CHAIN] = - CreateCPOperator(); + CreateCPOperatorWithArg( + parameters.max_swap_active_chain_size()); local_search_operators_[EXTENDED_SWAP_ACTIVE] = CreateCPOperator(); std::vector> alternative_sets(disjunctions_.size()); diff --git a/ortools/routing/routing.h b/ortools/routing/routing.h index 3ce1ff1519..d80d093b3a 100644 --- a/ortools/routing/routing.h +++ b/ortools/routing/routing.h @@ -172,7 +172,6 @@ #include "absl/container/flat_hash_map.h" #include "absl/container/flat_hash_set.h" -#include "absl/container/inlined_vector.h" #include "absl/hash/hash.h" #include "absl/log/check.h" #include "absl/strings/string_view.h" @@ -1521,36 +1520,21 @@ class RoutingModel { const Assignment* PackCumulsOfOptimizerDimensionsFromAssignment( const Assignment* original_assignment, absl::Duration duration_limit, bool* time_limit_was_reached = nullptr); + +#ifndef SWIG /// Contains the information needed by the solver to optimize a dimension's /// cumuls with travel-start dependent transit values. struct RouteDimensionTravelInfo { /// Contains the information for a single transition on the route. struct TransitionInfo { - /// The following struct defines a piecewise linear formulation, with - /// int64_t values for the "anchor" x and y values, and potential double - /// values for the slope of each linear function. - // TODO(user): Adjust the inlined vector sizes based on experiments. - struct PiecewiseLinearFormulation { - /// The set of *increasing* anchor cumul values for the interpolation. - absl::InlinedVector x_anchors; - /// The y values used for the interpolation: - /// For any x anchor value, let i be an index such that - /// x_anchors[i] ≤ x < x_anchors[i+1], then the y value for x is - /// y_anchors[i] * (1-λ) + y_anchors[i+1] * λ, with - /// λ = (x - x_anchors[i]) / (x_anchors[i+1] - x_anchors[i]). - absl::InlinedVector y_anchors; - - std::string DebugString(std::string line_prefix = "") const; - }; - /// Models the (real) travel value Tᵣ, for this transition based on the /// departure value of the travel. - PiecewiseLinearFormulation travel_start_dependent_travel; + FloatSlopePiecewiseLinearFunction travel_start_dependent_travel; /// travel_compression_cost models the cost of the difference between the /// (real) travel value Tᵣ given by travel_start_dependent_travel and the /// compressed travel value considered in the scheduling. - PiecewiseLinearFormulation travel_compression_cost; + FloatSlopePiecewiseLinearFunction travel_compression_cost; /// The parts of the transit which occur pre/post travel between the /// nodes. The total transit between the two nodes i and j is @@ -1580,6 +1564,8 @@ class RoutingModel { std::string DebugString(std::string line_prefix = "") const; }; +#endif // SWIG + #ifndef SWIG // TODO(user): Revisit if coordinates are added to the RoutingModel class. void SetSweepArranger(SweepArranger* sweep_arranger); @@ -2345,6 +2331,11 @@ class RoutingModel { LocalSearchOperator* CreateCPOperator() { return CreateCPOperator(MakeLocalSearchOperator); } + template + LocalSearchOperator* CreateCPOperatorWithArg(ArgType arg) { + return CreateCPOperatorWithArg(MakeLocalSearchOperatorWithArg, + std::move(arg)); + } using NeighborAccessor = std::function&(int, int)>; template LocalSearchOperator* CreateCPOperatorWithNeighbors( @@ -2368,6 +2359,15 @@ class RoutingModel { : vehicle_vars_, vehicle_start_class_callback_, std::move(get_neighbors)); } + template + LocalSearchOperator* CreateCPOperatorWithArg(const T& operator_factory, + ArgType arg) { + return operator_factory(solver_.get(), nexts_, + CostsAreHomogeneousAcrossVehicles() + ? std::vector() + : vehicle_vars_, + vehicle_start_class_callback_, std::move(arg)); + } template LocalSearchOperator* CreateOperator(const Arg& arg) { return solver_->RevAlloc(new T(nexts_, diff --git a/ortools/util/piecewise_linear_function.cc b/ortools/util/piecewise_linear_function.cc index e691f9ae4a..23bc4eda23 100644 --- a/ortools/util/piecewise_linear_function.cc +++ b/ortools/util/piecewise_linear_function.cc @@ -14,15 +14,22 @@ #include "ortools/util/piecewise_linear_function.h" #include +#include #include -#include #include #include #include +#include "absl/algorithm/container.h" #include "absl/container/btree_set.h" +#include "absl/container/inlined_vector.h" +#include "absl/log/check.h" #include "absl/strings/str_format.h" +#include "absl/strings/string_view.h" +#include "ortools/base/dump_vars.h" #include "ortools/base/logging.h" +#include "ortools/base/mathutil.h" +#include "ortools/base/types.h" #include "ortools/util/saturated_arithmetic.h" namespace operations_research { @@ -78,6 +85,7 @@ uint64_t UnsignedCapProd(uint64_t left, uint64_t right) { } } // namespace +// PiecewiseSegment PiecewiseSegment::PiecewiseSegment(int64_t point_x, int64_t point_y, int64_t slope, int64_t other_point_x) : slope_(slope), reference_x_(point_x), reference_y_(point_y) { @@ -269,6 +277,7 @@ std::string PiecewiseSegment::DebugString() const { return result; } +// PiecewiseLinearFunction const int PiecewiseLinearFunction::kNotFound = -1; PiecewiseLinearFunction::PiecewiseLinearFunction( @@ -800,4 +809,60 @@ bool PiecewiseLinearFunction::IsNonIncreasingInternal() const { return true; } +// FloatSlopePiecewiseLinearFunction +const int FloatSlopePiecewiseLinearFunction::kNoValue = -1; + +FloatSlopePiecewiseLinearFunction::FloatSlopePiecewiseLinearFunction( + absl::InlinedVector x_anchors, + absl::InlinedVector y_anchors) + : x_anchors_(std::move(x_anchors)), y_anchors_(std::move(y_anchors)) { + DCHECK(absl::c_is_sorted(x_anchors_)); + DCHECK_EQ(x_anchors_.size(), y_anchors_.size()); + DCHECK_NE(x_anchors_.size(), 1); +} + +std::string FloatSlopePiecewiseLinearFunction::DebugString( + absl::string_view line_prefix) const { + if (x_anchors_.size() <= 10) { + return "{ " + DUMP_VARS(x_anchors_, y_anchors_).str() + "}"; + } + return absl::StrFormat("{\n%s%s\n%s%s\n}", line_prefix, + DUMP_VARS(x_anchors_).str(), line_prefix, + DUMP_VARS(y_anchors_).str()); +} + +int64_t FloatSlopePiecewiseLinearFunction::ComputeInBoundsValue( + int64_t x) const { + const int segment_index = GetSegmentIndex(x); + if (segment_index == kNoValue) return kNoValue; + return GetValueOnSegment(x, segment_index); +} + +int64_t FloatSlopePiecewiseLinearFunction::ComputeConvexValue(int64_t x) const { + if (x_anchors_.empty()) return kNoValue; + + int segment_index = kNoValue; + if (x <= x_anchors_[0]) { + segment_index = 0; + } else if (x >= x_anchors_.back()) { + segment_index = x_anchors_.size() - 2; + } else { + segment_index = GetSegmentIndex(x); + } + + return GetValueOnSegment(x, segment_index); +} + +int64_t FloatSlopePiecewiseLinearFunction::GetValueOnSegment( + int64_t x, int segment_index) const { + DCHECK_GE(segment_index, 0); + DCHECK_LE(segment_index, x_anchors_.size() - 2); + const double slope = + static_cast(y_anchors_[segment_index + 1] - + y_anchors_[segment_index]) / + (x_anchors_[segment_index + 1] - x_anchors_[segment_index]); + return MathUtil::Round(slope * (x - x_anchors_[segment_index]) + + y_anchors_[segment_index]); +} + } // namespace operations_research diff --git a/ortools/util/piecewise_linear_function.h b/ortools/util/piecewise_linear_function.h index 26af5778bf..b076baddd8 100644 --- a/ortools/util/piecewise_linear_function.h +++ b/ortools/util/piecewise_linear_function.h @@ -22,10 +22,16 @@ #include #include +#include #include #include #include +#include "absl/algorithm/container.h" +#include "absl/container/inlined_vector.h" +#include "absl/log/check.h" +#include "absl/strings/string_view.h" + namespace operations_research { // This structure stores one straight line. It contains the start point, the // end point and the slope. @@ -268,5 +274,86 @@ class PiecewiseLinearFunction { bool is_non_decreasing_; bool is_non_increasing_; }; + +// The following class defines a piecewise linear formulation with potential +// double values for the slope of each linear function. +// This formulation is meant to be used with a small number of segments (see +// InlinedVector sizes below). +// These segments are determined by int64_t values for the "anchor" x and y +// values, such that (x_anchors_[i], y_anchors_[i]) and +// (x_anchors_[i+1], y_anchors_[i+1]) are respectively the start and end point +// of the i-th segment. +// TODO(user): Adjust the inlined vector sizes based on experiments. +class FloatSlopePiecewiseLinearFunction { + public: + static const int kNoValue; + + FloatSlopePiecewiseLinearFunction() = default; + FloatSlopePiecewiseLinearFunction(absl::InlinedVector x_anchors, + absl::InlinedVector y_anchors); + FloatSlopePiecewiseLinearFunction( + FloatSlopePiecewiseLinearFunction&& other) noexcept { + *this = std::move(other); + } + + FloatSlopePiecewiseLinearFunction& operator=( + FloatSlopePiecewiseLinearFunction&& other) noexcept { + x_anchors_ = std::move(other.x_anchors_); + y_anchors_ = std::move(other.y_anchors_); + return *this; + } + + std::string DebugString(absl::string_view line_prefix = {}) const; + + const absl::InlinedVector& x_anchors() const { + return x_anchors_; + } + const absl::InlinedVector& y_anchors() const { + return y_anchors_; + } + + // Computes the y value associated to 'x'. Returns kNoValue if 'x' is out of + // bounds, i.e. lower than the first x_anchor and largest than the last. + int64_t ComputeInBoundsValue(int64_t x) const; + + // Computes the y value associated to 'x'. Unlike ComputeInBoundsValue(), if + // 'x' is outside the bounds of the function, the function will still be + // defined by its outer segments. + int64_t ComputeConvexValue(int64_t x) const; + + private: + // Returns the index of the segment x belongs to, i.e. the index i such that + // x_anchors_[i] ≤ x < x_anchors_[i+1]. For x = x_anchors_.back(), also + // returns the last segment (i.e. x_anchors_.size() - 2). + // Returns kNoValue if x is out of bounds for the function. + int GetSegmentIndex(int64_t x) const { + if (x_anchors_.empty() || x < x_anchors_[0] || x > x_anchors_.back()) { + return kNoValue; + } + if (x == x_anchors_.back()) return x_anchors_.size() - 2; + + // Search for first element xi such that xi > x. + const auto upper_segment = absl::c_upper_bound(x_anchors_, x); + const int segment_index = + std::distance(x_anchors_.begin(), upper_segment) - 1; + DCHECK_GE(segment_index, 0); + DCHECK_LE(segment_index, x_anchors_.size() - 2); + return segment_index; + } + + // Returns the value of 'x' on the linear segment determined by + // x_anchors_[segment_index] and x_anchors_[segment_index + 1]. + int64_t GetValueOnSegment(int64_t x, int segment_index) const; + + // The set of *increasing* anchor cumul values for the interpolation. + absl::InlinedVector x_anchors_; + // The y values used for the interpolation: + // For any x anchor value, let i be an index such that + // x_anchors[i] ≤ x < x_anchors[i+1], then the y value for x is + // y_anchors[i] * (1-λ) + y_anchors[i+1] * λ, with + // λ = (x - x_anchors[i]) / (x_anchors[i+1] - x_anchors[i]). + absl::InlinedVector y_anchors_; +}; + } // namespace operations_research #endif // OR_TOOLS_UTIL_PIECEWISE_LINEAR_FUNCTION_H_ From 100b6bc5c39b6df2744653dfb208a571e17cc6a1 Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Wed, 25 Sep 2024 17:16:55 +0200 Subject: [PATCH 024/105] sat: update go/cpmodel --- ortools/sat/cp_model.proto | 1 + ortools/sat/docs/README.md | 6 +- ortools/sat/docs/boolean_logic.md | 12 +- ortools/sat/docs/channeling.md | 16 +- ortools/sat/docs/integer_arithmetic.md | 26 +- ortools/sat/docs/model.md | 6 +- ortools/sat/docs/scheduling.md | 22 +- ortools/sat/docs/solver.md | 26 +- ortools/sat/go/cpmodel/BUILD.bazel | 10 +- ortools/sat/go/cpmodel/cp_model.go | 133 ++-- ortools/sat/go/cpmodel/cp_model_test.go | 607 ++++++++---------- ortools/sat/go/cpmodel/cp_solver.go | 4 +- ortools/sat/go/cpmodel/cp_solver_c.cc | 16 +- ortools/sat/samples/assumptions_sample_sat.go | 6 +- ortools/sat/samples/binpacking_problem_sat.go | 6 +- .../sat/samples/boolean_product_sample_sat.go | 8 +- ortools/sat/samples/channeling_sample_sat.go | 10 +- .../earliness_tardiness_cost_sample_sat.go | 10 +- ortools/sat/samples/interval_sample_sat.go | 4 +- ortools/sat/samples/literal_sample_sat.go | 4 +- ortools/sat/samples/no_overlap_sample_sat.go | 6 +- ortools/sat/samples/nqueens_sat.go | 12 +- ortools/sat/samples/nurses_sat.go | 4 +- .../samples/optional_interval_sample_sat.go | 4 +- .../sat/samples/rabbits_and_pheasants_sat.go | 6 +- ortools/sat/samples/ranking_sample_sat.go | 8 +- .../search_for_all_solutions_sample_sat.go | 8 +- ortools/sat/samples/simple_sat_program.go | 6 +- .../samples/solution_hinting_sample_sat.go | 6 +- ...print_intermediate_solutions_sample_sat.go | 8 +- .../solve_with_time_limit_sample_sat.go | 10 +- .../sat/samples/step_function_sample_sat.go | 10 +- ortools/sat/sat_parameters.proto | 6 +- 33 files changed, 465 insertions(+), 562 deletions(-) diff --git a/ortools/sat/cp_model.proto b/ortools/sat/cp_model.proto index b76dc95aa1..b57d2056ba 100644 --- a/ortools/sat/cp_model.proto +++ b/ortools/sat/cp_model.proto @@ -18,6 +18,7 @@ syntax = "proto3"; package operations_research.sat; option csharp_namespace = "Google.OrTools.Sat"; +option go_package = "github.com/google/or-tools/ortools/sat/proto/cpmodel"; option java_package = "com.google.ortools.sat"; option java_multiple_files = true; option java_outer_classname = "CpModelProtobuf"; diff --git a/ortools/sat/docs/README.md b/ortools/sat/docs/README.md index c00f1d65a5..82df9dec24 100644 --- a/ortools/sat/docs/README.md +++ b/ortools/sat/docs/README.md @@ -226,8 +226,8 @@ package main import ( "fmt" - "github.com/golang/glog" - cmpb "ortools/sat/cp_model_go_proto" + log "github.com/golang/glog" + cmpb "github.com/google/or-tools/ortools/sat/proto/cpmodel" "ortools/sat/go/cpmodel" ) @@ -264,7 +264,7 @@ func simpleSatProgram() error { func main() { if err := simpleSatProgram(); err != nil { - glog.Exitf("simpleSatProgram returned with error: %v", err) + log.Exitf("simpleSatProgram returned with error: %v", err) } } ``` diff --git a/ortools/sat/docs/boolean_logic.md b/ortools/sat/docs/boolean_logic.md index 9f0d10c8fb..db47873d84 100644 --- a/ortools/sat/docs/boolean_logic.md +++ b/ortools/sat/docs/boolean_logic.md @@ -113,7 +113,7 @@ public class LiteralSampleSat package main import ( - "github.com/golang/glog" + log "github.com/golang/glog" "ortools/sat/go/cpmodel" ) @@ -123,7 +123,7 @@ func literalSampleSat() { x := model.NewBoolVar().WithName("x") notX := x.Not() - glog.Infof("x = %d, x.Not() = %d", x.Index(), notX.Index()) + log.Infof("x = %d, x.Not() = %d", x.Index(), notX.Index()) } func main() { @@ -525,10 +525,10 @@ package main import ( "fmt" - "github.com/golang/glog" - "golang/protobuf/v2/proto/proto" + log "github.com/golang/glog" + sppb "github.com/google/or-tools/ortools/sat/proto/satparameters" + "google.golang.org/protobuf/proto" "ortools/sat/go/cpmodel" - sppb "ortools/sat/sat_parameters_go_proto" ) func booleanProductSample() error { @@ -576,7 +576,7 @@ func booleanProductSample() error { func main() { err := booleanProductSample() if err != nil { - glog.Exitf("booleanProductSample returned with error: %v", err) + log.Exitf("booleanProductSample returned with error: %v", err) } } ``` diff --git a/ortools/sat/docs/channeling.md b/ortools/sat/docs/channeling.md index cf16b8960d..f4811905ff 100644 --- a/ortools/sat/docs/channeling.md +++ b/ortools/sat/docs/channeling.md @@ -308,11 +308,11 @@ package main import ( "fmt" - "github.com/golang/glog" - "golang/protobuf/v2/proto/proto" - cmpb "ortools/sat/cp_model_go_proto" + log "github.com/golang/glog" + cmpb "github.com/google/or-tools/ortools/sat/proto/cpmodel" + sppb "github.com/google/or-tools/ortools/sat/proto/satparameters" + "google.golang.org/protobuf/proto" "ortools/sat/go/cpmodel" - sppb "ortools/sat/sat_parameters_go_proto" ) func channelingSampleSat() error { @@ -367,7 +367,7 @@ func channelingSampleSat() error { func main() { if err := channelingSampleSat(); err != nil { - glog.Exitf("channelingSampleSat returned with error: %v", err) + log.Exitf("channelingSampleSat returned with error: %v", err) } } ``` @@ -895,7 +895,7 @@ package main import ( "fmt" - "github.com/golang/glog" + log "github.com/golang/glog" "ortools/sat/go/cpmodel" ) @@ -907,7 +907,7 @@ const ( ) type item struct { - Cost, Copies int64_t + Cost, Copies int64 } func binpackingProblemSat() error { @@ -993,7 +993,7 @@ func binpackingProblemSat() error { func main() { if err := binpackingProblemSat(); err != nil { - glog.Exitf("binpackingProblemSat returned with error: %v", err) + log.Exitf("binpackingProblemSat returned with error: %v", err) } } ``` diff --git a/ortools/sat/docs/integer_arithmetic.md b/ortools/sat/docs/integer_arithmetic.md index 2fcd3c5629..1a7e0e3aac 100644 --- a/ortools/sat/docs/integer_arithmetic.md +++ b/ortools/sat/docs/integer_arithmetic.md @@ -275,8 +275,8 @@ package main import ( "fmt" - "github.com/golang/glog" - cmpb "ortools/sat/cp_model_go_proto" + log "github.com/golang/glog" + cmpb "github.com/google/or-tools/ortools/sat/proto/cpmodel" "ortools/sat/go/cpmodel" ) @@ -315,7 +315,7 @@ func rabbitsAndPheasants() error { func main() { if err := rabbitsAndPheasants(); err != nil { - glog.Exitf("rabbitsAndPheasants returned with error: %v", err) + log.Exitf("rabbitsAndPheasants returned with error: %v", err) } } ``` @@ -675,11 +675,11 @@ package main import ( "fmt" - "github.com/golang/glog" - "golang/protobuf/v2/proto/proto" - cmpb "ortools/sat/cp_model_go_proto" + log "github.com/golang/glog" + cmpb "github.com/google/or-tools/ortools/sat/proto/cpmodel" + sppb "github.com/google/or-tools/ortools/sat/proto/satparameters" + "google.golang.org/protobuf/proto" "ortools/sat/go/cpmodel" - sppb "ortools/sat/sat_parameters_go_proto" ) const ( @@ -742,7 +742,7 @@ func earlinessTardinessCostSampleSat() error { func main() { if err := earlinessTardinessCostSampleSat(); err != nil { - glog.Exitf("earlinessTardinessCostSampleSat returned with error: %v", err) + log.Exitf("earlinessTardinessCostSampleSat returned with error: %v", err) } } ``` @@ -1131,11 +1131,11 @@ package main import ( "fmt" - "github.com/golang/glog" - "golang/protobuf/v2/proto/proto" - cmpb "ortools/sat/cp_model_go_proto" + log "github.com/golang/glog" + cmpb "github.com/google/or-tools/ortools/sat/proto/cpmodel" + sppb "github.com/google/or-tools/ortools/sat/proto/satparameters" + "google.golang.org/protobuf/proto" "ortools/sat/go/cpmodel" - sppb "ortools/sat/sat_parameters_go_proto" ) func stepFunctionSampleSat() error { @@ -1208,7 +1208,7 @@ func stepFunctionSampleSat() error { func main() { if err := stepFunctionSampleSat(); err != nil { - glog.Exitf("stepFunctionSampleSat returned with error: %v", err) + log.Exitf("stepFunctionSampleSat returned with error: %v", err) } } ``` diff --git a/ortools/sat/docs/model.md b/ortools/sat/docs/model.md index 3949f8ce2b..2218611998 100644 --- a/ortools/sat/docs/model.md +++ b/ortools/sat/docs/model.md @@ -310,8 +310,8 @@ package main import ( "fmt" - "github.com/golang/glog" - cmpb "ortools/sat/cp_model_go_proto" + log "github.com/golang/glog" + cmpb "github.com/google/or-tools/ortools/sat/proto/cpmodel" "ortools/sat/go/cpmodel" ) @@ -353,7 +353,7 @@ func solutionHintingSampleSat() error { func main() { if err := solutionHintingSampleSat(); err != nil { - glog.Exitf("solutionHintingSampleSat returned with error: %v", err) + log.Exitf("solutionHintingSampleSat returned with error: %v", err) } } ``` diff --git a/ortools/sat/docs/scheduling.md b/ortools/sat/docs/scheduling.md index 19a8b5264c..00aacda186 100644 --- a/ortools/sat/docs/scheduling.md +++ b/ortools/sat/docs/scheduling.md @@ -195,7 +195,7 @@ package main import ( "fmt" - "github.com/golang/glog" + log "github.com/golang/glog" "ortools/sat/go/cpmodel" ) @@ -231,7 +231,7 @@ func intervalSampleSat() error { func main() { if err := intervalSampleSat(); err != nil { - glog.Exitf("intervalSampleSat returned with error: %v", err) + log.Exitf("intervalSampleSat returned with error: %v", err) } } ``` @@ -421,7 +421,7 @@ package main import ( "fmt" - "github.com/golang/glog" + log "github.com/golang/glog" "ortools/sat/go/cpmodel" ) @@ -454,7 +454,7 @@ func optionalIntervalSampleSat() error { func main() { if err := optionalIntervalSampleSat(); err != nil { - glog.Exitf("optionalIntervalSampleSat returned with error: %v", err) + log.Exitf("optionalIntervalSampleSat returned with error: %v", err) } } ``` @@ -841,8 +841,8 @@ package main import ( "fmt" - "github.com/golang/glog" - cmpb "ortools/sat/cp_model_go_proto" + log "github.com/golang/glog" + cmpb "github.com/google/or-tools/ortools/sat/proto/cpmodel" "ortools/sat/go/cpmodel" ) @@ -909,7 +909,7 @@ func noOverlapSampleSat() error { func main() { if err := noOverlapSampleSat(); err != nil { - glog.Exitf("noOverlapSampleSat returned with error: %v", err) + log.Exitf("noOverlapSampleSat returned with error: %v", err) } } ``` @@ -1865,8 +1865,8 @@ package main import ( "fmt" - "github.com/golang/glog" - cmpb "ortools/sat/cp_model_go_proto" + log "github.com/golang/glog" + cmpb "github.com/google/or-tools/ortools/sat/proto/cpmodel" "ortools/sat/go/cpmodel" ) @@ -1937,7 +1937,7 @@ func rankingSampleSat() error { for t := 0; t < numTasks; t++ { start := model.NewIntVarFromDomain(horizon) - duration := cpmodel.NewConstant(int64_t(t + 1)) + duration := cpmodel.NewConstant(int64(t + 1)) end := model.NewIntVarFromDomain(horizon) var presence cpmodel.BoolVar if t < numTasks/2 { @@ -2008,7 +2008,7 @@ func rankingSampleSat() error { func main() { if err := rankingSampleSat(); err != nil { - glog.Exitf("rankingSampleSat returned with error: %v", err) + log.Exitf("rankingSampleSat returned with error: %v", err) } } ``` diff --git a/ortools/sat/docs/solver.md b/ortools/sat/docs/solver.md index 5881234d43..6feee57dde 100644 --- a/ortools/sat/docs/solver.md +++ b/ortools/sat/docs/solver.md @@ -194,11 +194,11 @@ package main import ( "fmt" - "github.com/golang/glog" - "golang/protobuf/v2/proto/proto" - cmpb "ortools/sat/cp_model_go_proto" + log "github.com/golang/glog" + cmpb "github.com/google/or-tools/ortools/sat/proto/cpmodel" + sppb "github.com/google/or-tools/ortools/sat/proto/satparameters" + "google.golang.org/protobuf/proto" "ortools/sat/go/cpmodel" - sppb "ortools/sat/sat_parameters_go_proto" ) func solveWithTimeLimitSampleSat() error { @@ -240,7 +240,7 @@ func solveWithTimeLimitSampleSat() error { func main() { if err := solveWithTimeLimitSampleSat(); err != nil { - glog.Exitf("solveWithTimeLimitSampleSat returned with error: %v", err) + log.Exitf("solveWithTimeLimitSampleSat returned with error: %v", err) } } ``` @@ -535,10 +535,10 @@ package main import ( "fmt" - "github.com/golang/glog" - "golang/protobuf/v2/proto/proto" + log "github.com/golang/glog" + sppb "github.com/google/or-tools/ortools/sat/proto/satparameters" + "google.golang.org/protobuf/proto" "ortools/sat/go/cpmodel" - sppb "ortools/sat/sat_parameters_go_proto" ) func solveAndPrintIntermediateSolutionsSampleSat() error { @@ -583,7 +583,7 @@ func solveAndPrintIntermediateSolutionsSampleSat() error { func main() { if err := solveAndPrintIntermediateSolutionsSampleSat(); err != nil { - glog.Exitf("solveAndPrintIntermediateSolutionsSampleSat returned with error: %v", err) + log.Exitf("solveAndPrintIntermediateSolutionsSampleSat returned with error: %v", err) } } ``` @@ -872,10 +872,10 @@ package main import ( "fmt" - "github.com/golang/glog" - "golang/protobuf/v2/proto/proto" + log "github.com/golang/glog" + sppb "github.com/google/or-tools/ortools/sat/proto/satparameters" + "google.golang.org/protobuf/proto" "ortools/sat/go/cpmodel" - sppb "ortools/sat/sat_parameters_go_proto" ) func searchForAllSolutionsSampleSat() error { @@ -917,7 +917,7 @@ func searchForAllSolutionsSampleSat() error { func main() { if err := searchForAllSolutionsSampleSat(); err != nil { - glog.Exitf("searchForAllSolutionsSampleSat returned with error: %v", err) + log.Exitf("searchForAllSolutionsSampleSat returned with error: %v", err) } } ``` diff --git a/ortools/sat/go/cpmodel/BUILD.bazel b/ortools/sat/go/cpmodel/BUILD.bazel index 707ce53b2f..25b32f8db7 100644 --- a/ortools/sat/go/cpmodel/BUILD.bazel +++ b/ortools/sat/go/cpmodel/BUILD.bazel @@ -18,8 +18,8 @@ go_library( srcs = [ "cp_model.go", "cp_solver.go", - "cp_solver_c.cc", - "cp_solver_c.h", + #"cp_solver_c.cc", + #"cp_solver_c.h", "domain.go", ], cdeps = [":cp_solver_c"], @@ -57,12 +57,12 @@ cc_library( srcs = ["cp_solver_c.cc"], hdrs = ["cp_solver_c.h"], deps = [ + "//ortools/base:memutil", "//ortools/sat:cp_model_cc_proto", "//ortools/sat:cp_model_solver", + "//ortools/sat:model", "//ortools/sat:sat_parameters_cc_proto", "//ortools/util:time_limit", - "@com_google_absl//absl/log", - "@com_google_absl//absl/status", - "@com_google_absl//absl/strings", + "@com_google_absl//absl/log:check", ], ) diff --git a/ortools/sat/go/cpmodel/cp_model.go b/ortools/sat/go/cpmodel/cp_model.go index 1822f6c55f..561233aece 100644 --- a/ortools/sat/go/cpmodel/cp_model.go +++ b/ortools/sat/go/cpmodel/cp_model.go @@ -578,8 +578,8 @@ func (cp *Builder) NewOptionalIntervalVar(start, size, end LinearArgument, prese Start: start.asLinearExpressionProto(), Size: size.asLinearExpressionProto(), End: end.asLinearExpressionProto(), - }, - }}) + }}, + }) return IntervalVar{cpb: cp, ind: ind} } @@ -803,11 +803,10 @@ func (cp *Builder) AddMinEquality(target LinearArgument, exprs ...LinearArgument } return cp.appendConstraint(&cmpb.ConstraintProto{ - Constraint: &cmpb.ConstraintProto_LinMax{ - &cmpb.LinearArgumentProto{ - Target: asNegatedLinearExpressionProto(target), - Exprs: protos, - }}, + Constraint: &cmpb.ConstraintProto_LinMax{&cmpb.LinearArgumentProto{ + Target: asNegatedLinearExpressionProto(target), + Exprs: protos, + }}, }) } @@ -819,11 +818,10 @@ func (cp *Builder) AddMaxEquality(target LinearArgument, exprs ...LinearArgument } return cp.appendConstraint(&cmpb.ConstraintProto{ - Constraint: &cmpb.ConstraintProto_LinMax{ - &cmpb.LinearArgumentProto{ - Target: target.asLinearExpressionProto(), - Exprs: protos, - }}, + Constraint: &cmpb.ConstraintProto_LinMax{&cmpb.LinearArgumentProto{ + Target: target.asLinearExpressionProto(), + Exprs: protos, + }}, }) } @@ -835,53 +833,49 @@ func (cp *Builder) AddMultiplicationEquality(target LinearArgument, exprs ...Lin } return cp.appendConstraint(&cmpb.ConstraintProto{ - Constraint: &cmpb.ConstraintProto_IntProd{ - &cmpb.LinearArgumentProto{ - Target: target.asLinearExpressionProto(), - Exprs: protos, - }}, + Constraint: &cmpb.ConstraintProto_IntProd{&cmpb.LinearArgumentProto{ + Target: target.asLinearExpressionProto(), + Exprs: protos, + }}, }) } // AddDivisionEquality adds the constraint: target == num / denom. func (cp *Builder) AddDivisionEquality(target, num, denom LinearArgument) Constraint { return cp.appendConstraint(&cmpb.ConstraintProto{ - Constraint: &cmpb.ConstraintProto_IntDiv{ - &cmpb.LinearArgumentProto{ - Target: target.asLinearExpressionProto(), - Exprs: []*cmpb.LinearExpressionProto{ - num.asLinearExpressionProto(), - denom.asLinearExpressionProto(), - }, - }}, + Constraint: &cmpb.ConstraintProto_IntDiv{&cmpb.LinearArgumentProto{ + Target: target.asLinearExpressionProto(), + Exprs: []*cmpb.LinearExpressionProto{ + num.asLinearExpressionProto(), + denom.asLinearExpressionProto(), + }, + }}, }) } // AddAbsEquality adds the constraint: target == Abs(expr). func (cp *Builder) AddAbsEquality(target, expr LinearArgument) Constraint { return cp.appendConstraint(&cmpb.ConstraintProto{ - Constraint: &cmpb.ConstraintProto_LinMax{ - &cmpb.LinearArgumentProto{ - Target: target.asLinearExpressionProto(), - Exprs: []*cmpb.LinearExpressionProto{ - expr.asLinearExpressionProto(), - asNegatedLinearExpressionProto(expr), - }, - }}, + Constraint: &cmpb.ConstraintProto_LinMax{&cmpb.LinearArgumentProto{ + Target: target.asLinearExpressionProto(), + Exprs: []*cmpb.LinearExpressionProto{ + expr.asLinearExpressionProto(), + asNegatedLinearExpressionProto(expr), + }, + }}, }) } // AddModuloEquality adds the constraint: target == v % mod. func (cp *Builder) AddModuloEquality(target, v, mod LinearArgument) Constraint { return cp.appendConstraint(&cmpb.ConstraintProto{ - Constraint: &cmpb.ConstraintProto_IntMod{ - &cmpb.LinearArgumentProto{ - Target: target.asLinearExpressionProto(), - Exprs: []*cmpb.LinearExpressionProto{ - v.asLinearExpressionProto(), - mod.asLinearExpressionProto(), - }, - }}, + Constraint: &cmpb.ConstraintProto_IntMod{&cmpb.LinearArgumentProto{ + Target: target.asLinearExpressionProto(), + Exprs: []*cmpb.LinearExpressionProto{ + v.asLinearExpressionProto(), + mod.asLinearExpressionProto(), + }, + }}, }) } @@ -894,37 +888,33 @@ func (cp *Builder) AddNoOverlap(vars ...IntervalVar) Constraint { } return cp.appendConstraint(&cmpb.ConstraintProto{ - Constraint: &cmpb.ConstraintProto_NoOverlap{ - &cmpb.NoOverlapConstraintProto{ - Intervals: intervals, - }}, + Constraint: &cmpb.ConstraintProto_NoOverlap{&cmpb.NoOverlapConstraintProto{ + Intervals: intervals, + }}, }) } // AddNoOverlap2D adds a no_overlap2D constraint that prevents a set of boxes from overlapping. func (cp *Builder) AddNoOverlap2D() NoOverlap2DConstraint { return NoOverlap2DConstraint{cp.appendConstraint(&cmpb.ConstraintProto{ - Constraint: &cmpb.ConstraintProto_NoOverlap_2D{ - &cmpb.NoOverlap2DConstraintProto{}, - }})} + Constraint: &cmpb.ConstraintProto_NoOverlap_2D{&cmpb.NoOverlap2DConstraintProto{}}, + })} } // AddCircuitConstraint adds a circuit constraint to the model. The circuit constraint is // defined on a graph where the arcs are present if the corresponding literals are set to true. func (cp *Builder) AddCircuitConstraint() CircuitConstraint { return CircuitConstraint{cp.appendConstraint(&cmpb.ConstraintProto{ - Constraint: &cmpb.ConstraintProto_Circuit{ - &cmpb.CircuitConstraintProto{}, - }})} + Constraint: &cmpb.ConstraintProto_Circuit{&cmpb.CircuitConstraintProto{}}, + })} } // AddMultipleCircuitConstraint adds a multiple circuit constraint to the model, aka the "VRP" // (Vehicle Routing Problem) constraint. func (cp *Builder) AddMultipleCircuitConstraint() MultipleCircuitConstraint { return MultipleCircuitConstraint{cp.appendConstraint(&cmpb.ConstraintProto{ - Constraint: &cmpb.ConstraintProto_Routes{ - &cmpb.RoutesConstraintProto{}, - }})} + Constraint: &cmpb.ConstraintProto_Routes{&cmpb.RoutesConstraintProto{}}, + })} } // AddAllowedAssignments adds an allowed assignments constraint to the model. When all variables @@ -937,9 +927,8 @@ func (cp *Builder) AddAllowedAssignments(vars ...IntVar) TableConstraint { } return TableConstraint{cp.appendConstraint(&cmpb.ConstraintProto{ - Constraint: &cmpb.ConstraintProto_Table{ - &cmpb.TableConstraintProto{Vars: varsInd}, - }})} + Constraint: &cmpb.ConstraintProto_Table{&cmpb.TableConstraintProto{Vars: varsInd}}, + })} } // AddReservoirConstraint adds a reservoir constraint with optional refill/emptying events. @@ -951,8 +940,7 @@ func (cp *Builder) AddAllowedAssignments(vars ...IntVar) TableConstraint { // is assigned a value t, then the level of the reservoir changes by // level_change (which is constant) at time t. Therefore, at any time t: // -// sum(level_changes[i] * actives[i] if times[i] <= t) -// in [min_level, max_level] +// sum(level_changes[i] * actives[i] if times[i] <= t) in [min_level, max_level] // // Note that min level must be <= 0, and the max level must be >= 0. // Please use fixed level_changes to simulate an initial state. @@ -963,10 +951,9 @@ func (cp *Builder) AddReservoirConstraint(min, max int64) ReservoirConstraint { return ReservoirConstraint{ cp.appendConstraint( &cmpb.ConstraintProto{ - Constraint: &cmpb.ConstraintProto_Reservoir{ - &cmpb.ReservoirConstraintProto{ - MinLevel: min, MaxLevel: max, - }}}, + Constraint: &cmpb.ConstraintProto_Reservoir{&cmpb.ReservoirConstraintProto{ + MinLevel: min, MaxLevel: max, + }}}, ), cp.NewConstant(1).Index()} } @@ -1001,12 +988,11 @@ func (cp *Builder) AddAutomaton(transitionVars []IntVar, startState int64, final transitions = append(transitions, int32(v.Index())) } return AutomatonConstraint{cp.appendConstraint(&cmpb.ConstraintProto{ - Constraint: &cmpb.ConstraintProto_Automaton{ - &cmpb.AutomatonConstraintProto{ - Vars: transitions, - StartingState: startState, - FinalStates: finalStates, - }}, + Constraint: &cmpb.ConstraintProto_Automaton{&cmpb.AutomatonConstraintProto{ + Vars: transitions, + StartingState: startState, + FinalStates: finalStates, + }}, })} } @@ -1015,11 +1001,10 @@ func (cp *Builder) AddAutomaton(transitionVars []IntVar, startState int64, final // capacity. func (cp *Builder) AddCumulative(capacity LinearArgument) CumulativeConstraint { return CumulativeConstraint{cp.appendConstraint(&cmpb.ConstraintProto{ - Constraint: &cmpb.ConstraintProto_Cumulative{ - &cmpb.CumulativeConstraintProto{ - Capacity: capacity.asLinearExpressionProto(), - }, - }})} + Constraint: &cmpb.ConstraintProto_Cumulative{&cmpb.CumulativeConstraintProto{ + Capacity: capacity.asLinearExpressionProto(), + }}, + })} } // Minimize adds a linear minimization objective. diff --git a/ortools/sat/go/cpmodel/cp_model_test.go b/ortools/sat/go/cpmodel/cp_model_test.go index ad91fefff0..8fa8c53bc0 100644 --- a/ortools/sat/go/cpmodel/cp_model_test.go +++ b/ortools/sat/go/cpmodel/cp_model_test.go @@ -20,11 +20,10 @@ import ( "sort" "testing" - "github.com/google/go-cmp/cmp" - "google.golang.org/protobuf/testing/protocmp" - log "github.com/golang/glog" + "github.com/google/go-cmp/cmp" cmpb "github.com/google/or-tools/ortools/sat/proto/cpmodel" + "google.golang.org/protobuf/testing/protocmp" ) func Example() { @@ -735,19 +734,17 @@ func TestIntervalVar(t *testing.T) { }, want: &cmpb.ConstraintProto{ EnforcementLiteral: []int32{int32(trueVar.Index())}, - Constraint: &cmpb.ConstraintProto_Interval{ - &cmpb.IntervalConstraintProto{ - Start: &cmpb.LinearExpressionProto{Offset: 1}, - Size: &cmpb.LinearExpressionProto{ - Vars: []int32{int32(iv1.Index())}, - Coeffs: []int64{1}, - }, - End: &cmpb.LinearExpressionProto{ - Vars: []int32{int32(iv2.Index())}, - Coeffs: []int64{1}, - }, + Constraint: &cmpb.ConstraintProto_Interval{&cmpb.IntervalConstraintProto{ + Start: &cmpb.LinearExpressionProto{Offset: 1}, + Size: &cmpb.LinearExpressionProto{ + Vars: []int32{int32(iv1.Index())}, + Coeffs: []int64{1}, }, - }, + End: &cmpb.LinearExpressionProto{ + Vars: []int32{int32(iv2.Index())}, + Coeffs: []int64{1}, + }, + }}, }, }, { @@ -759,20 +756,18 @@ func TestIntervalVar(t *testing.T) { }, want: &cmpb.ConstraintProto{ EnforcementLiteral: []int32{int32(trueVar.Index())}, - Constraint: &cmpb.ConstraintProto_Interval{ - &cmpb.IntervalConstraintProto{ - Start: &cmpb.LinearExpressionProto{ - Vars: []int32{int32(iv1.Index())}, - Coeffs: []int64{1}, - }, - Size: &cmpb.LinearExpressionProto{Offset: 5}, - End: &cmpb.LinearExpressionProto{ - Vars: []int32{int32(iv1.Index())}, - Coeffs: []int64{1}, - Offset: 5, - }, + Constraint: &cmpb.ConstraintProto_Interval{&cmpb.IntervalConstraintProto{ + Start: &cmpb.LinearExpressionProto{ + Vars: []int32{int32(iv1.Index())}, + Coeffs: []int64{1}, }, - }, + Size: &cmpb.LinearExpressionProto{Offset: 5}, + End: &cmpb.LinearExpressionProto{ + Vars: []int32{int32(iv1.Index())}, + Coeffs: []int64{1}, + Offset: 5, + }, + }}, }, }, { @@ -784,19 +779,17 @@ func TestIntervalVar(t *testing.T) { }, want: &cmpb.ConstraintProto{ EnforcementLiteral: []int32{int32(bv1.Index())}, - Constraint: &cmpb.ConstraintProto_Interval{ - &cmpb.IntervalConstraintProto{ - Start: &cmpb.LinearExpressionProto{Offset: 1}, - Size: &cmpb.LinearExpressionProto{ - Vars: []int32{int32(iv1.Index())}, - Coeffs: []int64{1}, - }, - End: &cmpb.LinearExpressionProto{ - Vars: []int32{int32(iv2.Index())}, - Coeffs: []int64{1}, - }, + Constraint: &cmpb.ConstraintProto_Interval{&cmpb.IntervalConstraintProto{ + Start: &cmpb.LinearExpressionProto{Offset: 1}, + Size: &cmpb.LinearExpressionProto{ + Vars: []int32{int32(iv1.Index())}, + Coeffs: []int64{1}, }, - }, + End: &cmpb.LinearExpressionProto{ + Vars: []int32{int32(iv2.Index())}, + Coeffs: []int64{1}, + }, + }}, }, }, { @@ -808,20 +801,18 @@ func TestIntervalVar(t *testing.T) { }, want: &cmpb.ConstraintProto{ EnforcementLiteral: []int32{int32(bv1.Index())}, - Constraint: &cmpb.ConstraintProto_Interval{ - &cmpb.IntervalConstraintProto{ - Start: &cmpb.LinearExpressionProto{ - Vars: []int32{int32(iv1.Index())}, - Coeffs: []int64{1}, - }, - Size: &cmpb.LinearExpressionProto{Offset: 5}, - End: &cmpb.LinearExpressionProto{ - Vars: []int32{int32(iv1.Index())}, - Coeffs: []int64{1}, - Offset: 5, - }, + Constraint: &cmpb.ConstraintProto_Interval{&cmpb.IntervalConstraintProto{ + Start: &cmpb.LinearExpressionProto{ + Vars: []int32{int32(iv1.Index())}, + Coeffs: []int64{1}, }, - }, + Size: &cmpb.LinearExpressionProto{Offset: 5}, + End: &cmpb.LinearExpressionProto{ + Vars: []int32{int32(iv1.Index())}, + Coeffs: []int64{1}, + Offset: 5, + }, + }}, }, }, } @@ -880,11 +871,9 @@ func TestCpModelBuilder_Constraints(t *testing.T) { }, want: &cmpb.ConstraintProto{ EnforcementLiteral: []int32{int32(bv3.Index())}, - Constraint: &cmpb.ConstraintProto_BoolOr{ - &cmpb.BoolArgumentProto{ - Literals: []int32{int32(bv1.Index()), int32(bv2.Not().Index())}, - }, - }, + Constraint: &cmpb.ConstraintProto_BoolOr{&cmpb.BoolArgumentProto{ + Literals: []int32{int32(bv1.Index()), int32(bv2.Not().Index())}, + }}, }, }, { @@ -896,11 +885,9 @@ func TestCpModelBuilder_Constraints(t *testing.T) { }, want: &cmpb.ConstraintProto{ EnforcementLiteral: []int32{int32(bv3.Index())}, - Constraint: &cmpb.ConstraintProto_BoolAnd{ - &cmpb.BoolArgumentProto{ - Literals: []int32{int32(bv1.Index()), int32(bv2.Not().Index())}, - }, - }, + Constraint: &cmpb.ConstraintProto_BoolAnd{&cmpb.BoolArgumentProto{ + Literals: []int32{int32(bv1.Index()), int32(bv2.Not().Index())}, + }}, }, }, { @@ -912,11 +899,9 @@ func TestCpModelBuilder_Constraints(t *testing.T) { }, want: &cmpb.ConstraintProto{ EnforcementLiteral: []int32{int32(bv3.Index())}, - Constraint: &cmpb.ConstraintProto_BoolXor{ - &cmpb.BoolArgumentProto{ - Literals: []int32{int32(bv1.Index()), int32(bv2.Not().Index())}, - }, - }, + Constraint: &cmpb.ConstraintProto_BoolXor{&cmpb.BoolArgumentProto{ + Literals: []int32{int32(bv1.Index()), int32(bv2.Not().Index())}, + }}, }, }, { @@ -928,11 +913,9 @@ func TestCpModelBuilder_Constraints(t *testing.T) { }, want: &cmpb.ConstraintProto{ EnforcementLiteral: []int32{int32(bv3.Index())}, - Constraint: &cmpb.ConstraintProto_BoolOr{ - &cmpb.BoolArgumentProto{ - Literals: []int32{int32(bv1.Index()), int32(bv2.Not().Index())}, - }, - }, + Constraint: &cmpb.ConstraintProto_BoolOr{&cmpb.BoolArgumentProto{ + Literals: []int32{int32(bv1.Index()), int32(bv2.Not().Index())}, + }}, }, }, { @@ -944,11 +927,9 @@ func TestCpModelBuilder_Constraints(t *testing.T) { }, want: &cmpb.ConstraintProto{ EnforcementLiteral: []int32{int32(bv3.Index())}, - Constraint: &cmpb.ConstraintProto_AtMostOne{ - &cmpb.BoolArgumentProto{ - Literals: []int32{int32(bv1.Index()), int32(bv2.Not().Index())}, - }, - }, + Constraint: &cmpb.ConstraintProto_AtMostOne{&cmpb.BoolArgumentProto{ + Literals: []int32{int32(bv1.Index()), int32(bv2.Not().Index())}, + }}, }, }, { @@ -960,11 +941,9 @@ func TestCpModelBuilder_Constraints(t *testing.T) { }, want: &cmpb.ConstraintProto{ EnforcementLiteral: []int32{int32(bv3.Index())}, - Constraint: &cmpb.ConstraintProto_ExactlyOne{ - &cmpb.BoolArgumentProto{ - Literals: []int32{int32(bv1.Index()), int32(bv2.Not().Index())}, - }, - }, + Constraint: &cmpb.ConstraintProto_ExactlyOne{&cmpb.BoolArgumentProto{ + Literals: []int32{int32(bv1.Index()), int32(bv2.Not().Index())}, + }}, }, }, { @@ -975,11 +954,9 @@ func TestCpModelBuilder_Constraints(t *testing.T) { return m.GetConstraints()[c.Index()] }, want: &cmpb.ConstraintProto{ - Constraint: &cmpb.ConstraintProto_BoolOr{ - &cmpb.BoolArgumentProto{ - Literals: []int32{int32(bv1.Not().Index()), int32(bv2.Not().Index())}, - }, - }, + Constraint: &cmpb.ConstraintProto_BoolOr{&cmpb.BoolArgumentProto{ + Literals: []int32{int32(bv1.Not().Index()), int32(bv2.Not().Index())}, + }}, }, }, { @@ -991,13 +968,11 @@ func TestCpModelBuilder_Constraints(t *testing.T) { return m.GetConstraints()[c.Index()] }, want: &cmpb.ConstraintProto{ - Constraint: &cmpb.ConstraintProto_Linear{ - &cmpb.LinearConstraintProto{ - Vars: []int32{int32(iv1.Index()), int32(bv1.Index())}, - Coeffs: []int64{1, 1}, - Domain: []int64{-5, -4, -2, -1, 6, 15}, - }, - }, + Constraint: &cmpb.ConstraintProto_Linear{&cmpb.LinearConstraintProto{ + Vars: []int32{int32(iv1.Index()), int32(bv1.Index())}, + Coeffs: []int64{1, 1}, + Domain: []int64{-5, -4, -2, -1, 6, 15}, + }}, }, }, { @@ -1008,13 +983,11 @@ func TestCpModelBuilder_Constraints(t *testing.T) { return m.GetConstraints()[c.Index()] }, want: &cmpb.ConstraintProto{ - Constraint: &cmpb.ConstraintProto_Linear{ - &cmpb.LinearConstraintProto{ - Vars: []int32{int32(iv1.Index()), int32(bv1.Index())}, - Coeffs: []int64{1, 1}, - Domain: []int64{2, 6}, - }, - }, + Constraint: &cmpb.ConstraintProto_Linear{&cmpb.LinearConstraintProto{ + Vars: []int32{int32(iv1.Index()), int32(bv1.Index())}, + Coeffs: []int64{1, 1}, + Domain: []int64{2, 6}, + }}, }, }, { @@ -1025,13 +998,11 @@ func TestCpModelBuilder_Constraints(t *testing.T) { return m.GetConstraints()[c.Index()] }, want: &cmpb.ConstraintProto{ - Constraint: &cmpb.ConstraintProto_Linear{ - &cmpb.LinearConstraintProto{ - Vars: []int32{int32(iv1.Index())}, - Coeffs: []int64{1}, - Domain: []int64{10, 10}, - }, - }, + Constraint: &cmpb.ConstraintProto_Linear{&cmpb.LinearConstraintProto{ + Vars: []int32{int32(iv1.Index())}, + Coeffs: []int64{1}, + Domain: []int64{10, 10}, + }}, }, }, { @@ -1042,13 +1013,11 @@ func TestCpModelBuilder_Constraints(t *testing.T) { return m.GetConstraints()[c.Index()] }, want: &cmpb.ConstraintProto{ - Constraint: &cmpb.ConstraintProto_Linear{ - &cmpb.LinearConstraintProto{ - Vars: []int32{int32(iv1.Index())}, - Coeffs: []int64{1}, - Domain: []int64{math.MinInt64, 10}, - }, - }, + Constraint: &cmpb.ConstraintProto_Linear{&cmpb.LinearConstraintProto{ + Vars: []int32{int32(iv1.Index())}, + Coeffs: []int64{1}, + Domain: []int64{math.MinInt64, 10}, + }}, }, }, { @@ -1059,13 +1028,11 @@ func TestCpModelBuilder_Constraints(t *testing.T) { return m.GetConstraints()[c.Index()] }, want: &cmpb.ConstraintProto{ - Constraint: &cmpb.ConstraintProto_Linear{ - &cmpb.LinearConstraintProto{ - Vars: []int32{int32(iv1.Index())}, - Coeffs: []int64{1}, - Domain: []int64{math.MinInt64, 9}, - }, - }, + Constraint: &cmpb.ConstraintProto_Linear{&cmpb.LinearConstraintProto{ + Vars: []int32{int32(iv1.Index())}, + Coeffs: []int64{1}, + Domain: []int64{math.MinInt64, 9}, + }}, }, }, { @@ -1076,13 +1043,11 @@ func TestCpModelBuilder_Constraints(t *testing.T) { return m.GetConstraints()[c.Index()] }, want: &cmpb.ConstraintProto{ - Constraint: &cmpb.ConstraintProto_Linear{ - &cmpb.LinearConstraintProto{ - Vars: []int32{int32(iv1.Index())}, - Coeffs: []int64{1}, - Domain: []int64{10, math.MaxInt64}, - }, - }, + Constraint: &cmpb.ConstraintProto_Linear{&cmpb.LinearConstraintProto{ + Vars: []int32{int32(iv1.Index())}, + Coeffs: []int64{1}, + Domain: []int64{10, math.MaxInt64}, + }}, }, }, { @@ -1093,13 +1058,11 @@ func TestCpModelBuilder_Constraints(t *testing.T) { return m.GetConstraints()[c.Index()] }, want: &cmpb.ConstraintProto{ - Constraint: &cmpb.ConstraintProto_Linear{ - &cmpb.LinearConstraintProto{ - Vars: []int32{int32(iv1.Index())}, - Coeffs: []int64{1}, - Domain: []int64{11, math.MaxInt64}, - }, - }, + Constraint: &cmpb.ConstraintProto_Linear{&cmpb.LinearConstraintProto{ + Vars: []int32{int32(iv1.Index())}, + Coeffs: []int64{1}, + Domain: []int64{11, math.MaxInt64}, + }}, }, }, { @@ -1110,13 +1073,11 @@ func TestCpModelBuilder_Constraints(t *testing.T) { return m.GetConstraints()[c.Index()] }, want: &cmpb.ConstraintProto{ - Constraint: &cmpb.ConstraintProto_Linear{ - &cmpb.LinearConstraintProto{ - Vars: []int32{int32(iv1.Index())}, - Coeffs: []int64{1}, - Domain: []int64{math.MinInt64, 9, 11, math.MaxInt64}, - }, - }, + Constraint: &cmpb.ConstraintProto_Linear{&cmpb.LinearConstraintProto{ + Vars: []int32{int32(iv1.Index())}, + Coeffs: []int64{1}, + Domain: []int64{math.MinInt64, 9, 11, math.MaxInt64}, + }}, }, }, { @@ -1127,30 +1088,28 @@ func TestCpModelBuilder_Constraints(t *testing.T) { return m.GetConstraints()[c.Index()] }, want: &cmpb.ConstraintProto{ - Constraint: &cmpb.ConstraintProto_AllDiff{ - &cmpb.AllDifferentConstraintProto{ - Exprs: []*cmpb.LinearExpressionProto{ - &cmpb.LinearExpressionProto{ - Vars: []int32{int32(iv1.Index())}, - Coeffs: []int64{1}, - }, - &cmpb.LinearExpressionProto{ - Vars: []int32{int32(bv1.Index())}, - Coeffs: []int64{1}, - }, - &cmpb.LinearExpressionProto{ - Vars: []int32{int32(bv2.Index())}, - Coeffs: []int64{-1}, - Offset: 1, - }, - &cmpb.LinearExpressionProto{ - Vars: []int32{}, - Coeffs: []int64{}, - Offset: 10, - }, + Constraint: &cmpb.ConstraintProto_AllDiff{&cmpb.AllDifferentConstraintProto{ + Exprs: []*cmpb.LinearExpressionProto{ + &cmpb.LinearExpressionProto{ + Vars: []int32{int32(iv1.Index())}, + Coeffs: []int64{1}, + }, + &cmpb.LinearExpressionProto{ + Vars: []int32{int32(bv1.Index())}, + Coeffs: []int64{1}, + }, + &cmpb.LinearExpressionProto{ + Vars: []int32{int32(bv2.Index())}, + Coeffs: []int64{-1}, + Offset: 1, + }, + &cmpb.LinearExpressionProto{ + Vars: []int32{}, + Coeffs: []int64{}, + Offset: 10, }, }, - }, + }}, }, }, { @@ -1161,13 +1120,11 @@ func TestCpModelBuilder_Constraints(t *testing.T) { return m.GetConstraints()[c.Index()] }, want: &cmpb.ConstraintProto{ - Constraint: &cmpb.ConstraintProto_Element{ - &cmpb.ElementConstraintProto{ - Index: int32(iv1.Index()), - Target: int32(iv4.Index()), - Vars: []int32{int32(iv2.Index()), int32(iv3.Index())}, - }, - }, + Constraint: &cmpb.ConstraintProto_Element{&cmpb.ElementConstraintProto{ + Index: int32(iv1.Index()), + Target: int32(iv4.Index()), + Vars: []int32{int32(iv2.Index()), int32(iv3.Index())}, + }}, }, }, { @@ -1178,16 +1135,14 @@ func TestCpModelBuilder_Constraints(t *testing.T) { return m.GetConstraints()[c.Index()] }, want: &cmpb.ConstraintProto{ - Constraint: &cmpb.ConstraintProto_Element{ - &cmpb.ElementConstraintProto{ - Index: int32(iv1.Index()), - Target: int32(iv4.Index()), - Vars: []int32{ - int32(model.NewConstant(10).Index()), - int32(model.NewConstant(20).Index()), - }, + Constraint: &cmpb.ConstraintProto_Element{&cmpb.ElementConstraintProto{ + Index: int32(iv1.Index()), + Target: int32(iv4.Index()), + Vars: []int32{ + int32(model.NewConstant(10).Index()), + int32(model.NewConstant(20).Index()), }, - }, + }}, }, }, { @@ -1198,12 +1153,10 @@ func TestCpModelBuilder_Constraints(t *testing.T) { return m.GetConstraints()[c.Index()] }, want: &cmpb.ConstraintProto{ - Constraint: &cmpb.ConstraintProto_Inverse{ - &cmpb.InverseConstraintProto{ - FDirect: []int32{int32(iv1.Index()), int32(iv2.Index())}, - FInverse: []int32{int32(iv3.Index()), int32(iv4.Index())}, - }, - }, + Constraint: &cmpb.ConstraintProto_Inverse{&cmpb.InverseConstraintProto{ + FDirect: []int32{int32(iv1.Index()), int32(iv2.Index())}, + FInverse: []int32{int32(iv3.Index()), int32(iv4.Index())}, + }}, }, }, { @@ -1214,24 +1167,22 @@ func TestCpModelBuilder_Constraints(t *testing.T) { return m.GetConstraints()[c.Index()] }, want: &cmpb.ConstraintProto{ - Constraint: &cmpb.ConstraintProto_LinMax{ - &cmpb.LinearArgumentProto{ - Target: &cmpb.LinearExpressionProto{ - Vars: []int32{int32(iv1.Index())}, + Constraint: &cmpb.ConstraintProto_LinMax{&cmpb.LinearArgumentProto{ + Target: &cmpb.LinearExpressionProto{ + Vars: []int32{int32(iv1.Index())}, + Coeffs: []int64{-1}, + }, + Exprs: []*cmpb.LinearExpressionProto{ + &cmpb.LinearExpressionProto{ + Vars: []int32{int32(iv2.Index())}, Coeffs: []int64{-1}, }, - Exprs: []*cmpb.LinearExpressionProto{ - &cmpb.LinearExpressionProto{ - Vars: []int32{int32(iv2.Index())}, - Coeffs: []int64{-1}, - }, - &cmpb.LinearExpressionProto{ - Vars: []int32{int32(iv3.Index())}, - Coeffs: []int64{-1}, - }, + &cmpb.LinearExpressionProto{ + Vars: []int32{int32(iv3.Index())}, + Coeffs: []int64{-1}, }, }, - }, + }}, }, }, { @@ -1242,24 +1193,22 @@ func TestCpModelBuilder_Constraints(t *testing.T) { return m.GetConstraints()[c.Index()] }, want: &cmpb.ConstraintProto{ - Constraint: &cmpb.ConstraintProto_LinMax{ - &cmpb.LinearArgumentProto{ - Target: &cmpb.LinearExpressionProto{ - Vars: []int32{int32(iv1.Index())}, + Constraint: &cmpb.ConstraintProto_LinMax{&cmpb.LinearArgumentProto{ + Target: &cmpb.LinearExpressionProto{ + Vars: []int32{int32(iv1.Index())}, + Coeffs: []int64{1}, + }, + Exprs: []*cmpb.LinearExpressionProto{ + &cmpb.LinearExpressionProto{ + Vars: []int32{int32(iv2.Index())}, Coeffs: []int64{1}, }, - Exprs: []*cmpb.LinearExpressionProto{ - &cmpb.LinearExpressionProto{ - Vars: []int32{int32(iv2.Index())}, - Coeffs: []int64{1}, - }, - &cmpb.LinearExpressionProto{ - Vars: []int32{int32(iv3.Index())}, - Coeffs: []int64{1}, - }, + &cmpb.LinearExpressionProto{ + Vars: []int32{int32(iv3.Index())}, + Coeffs: []int64{1}, }, }, - }, + }}, }, }, { @@ -1270,24 +1219,22 @@ func TestCpModelBuilder_Constraints(t *testing.T) { return m.GetConstraints()[c.Index()] }, want: &cmpb.ConstraintProto{ - Constraint: &cmpb.ConstraintProto_IntProd{ - &cmpb.LinearArgumentProto{ - Target: &cmpb.LinearExpressionProto{ - Vars: []int32{int32(iv1.Index())}, + Constraint: &cmpb.ConstraintProto_IntProd{&cmpb.LinearArgumentProto{ + Target: &cmpb.LinearExpressionProto{ + Vars: []int32{int32(iv1.Index())}, + Coeffs: []int64{1}, + }, + Exprs: []*cmpb.LinearExpressionProto{ + &cmpb.LinearExpressionProto{ + Vars: []int32{int32(iv2.Index())}, Coeffs: []int64{1}, }, - Exprs: []*cmpb.LinearExpressionProto{ - &cmpb.LinearExpressionProto{ - Vars: []int32{int32(iv2.Index())}, - Coeffs: []int64{1}, - }, - &cmpb.LinearExpressionProto{ - Vars: []int32{int32(iv3.Index())}, - Coeffs: []int64{1}, - }, + &cmpb.LinearExpressionProto{ + Vars: []int32{int32(iv3.Index())}, + Coeffs: []int64{1}, }, }, - }, + }}, }, }, { @@ -1298,24 +1245,22 @@ func TestCpModelBuilder_Constraints(t *testing.T) { return m.GetConstraints()[c.Index()] }, want: &cmpb.ConstraintProto{ - Constraint: &cmpb.ConstraintProto_IntDiv{ - &cmpb.LinearArgumentProto{ - Target: &cmpb.LinearExpressionProto{ - Vars: []int32{int32(iv1.Index())}, + Constraint: &cmpb.ConstraintProto_IntDiv{&cmpb.LinearArgumentProto{ + Target: &cmpb.LinearExpressionProto{ + Vars: []int32{int32(iv1.Index())}, + Coeffs: []int64{1}, + }, + Exprs: []*cmpb.LinearExpressionProto{ + &cmpb.LinearExpressionProto{ + Vars: []int32{int32(iv2.Index())}, Coeffs: []int64{1}, }, - Exprs: []*cmpb.LinearExpressionProto{ - &cmpb.LinearExpressionProto{ - Vars: []int32{int32(iv2.Index())}, - Coeffs: []int64{1}, - }, - &cmpb.LinearExpressionProto{ - Vars: []int32{int32(iv3.Index())}, - Coeffs: []int64{1}, - }, + &cmpb.LinearExpressionProto{ + Vars: []int32{int32(iv3.Index())}, + Coeffs: []int64{1}, }, }, - }, + }}, }, }, { @@ -1326,24 +1271,22 @@ func TestCpModelBuilder_Constraints(t *testing.T) { return m.GetConstraints()[c.Index()] }, want: &cmpb.ConstraintProto{ - Constraint: &cmpb.ConstraintProto_LinMax{ - &cmpb.LinearArgumentProto{ - Target: &cmpb.LinearExpressionProto{ - Vars: []int32{int32(iv1.Index())}, + Constraint: &cmpb.ConstraintProto_LinMax{&cmpb.LinearArgumentProto{ + Target: &cmpb.LinearExpressionProto{ + Vars: []int32{int32(iv1.Index())}, + Coeffs: []int64{1}, + }, + Exprs: []*cmpb.LinearExpressionProto{ + &cmpb.LinearExpressionProto{ + Vars: []int32{int32(iv2.Index())}, Coeffs: []int64{1}, }, - Exprs: []*cmpb.LinearExpressionProto{ - &cmpb.LinearExpressionProto{ - Vars: []int32{int32(iv2.Index())}, - Coeffs: []int64{1}, - }, - &cmpb.LinearExpressionProto{ - Vars: []int32{int32(iv2.Index())}, - Coeffs: []int64{-1}, - }, + &cmpb.LinearExpressionProto{ + Vars: []int32{int32(iv2.Index())}, + Coeffs: []int64{-1}, }, }, - }, + }}, }, }, { @@ -1354,24 +1297,22 @@ func TestCpModelBuilder_Constraints(t *testing.T) { return m.GetConstraints()[c.Index()] }, want: &cmpb.ConstraintProto{ - Constraint: &cmpb.ConstraintProto_IntMod{ - &cmpb.LinearArgumentProto{ - Target: &cmpb.LinearExpressionProto{ - Vars: []int32{int32(iv1.Index())}, + Constraint: &cmpb.ConstraintProto_IntMod{&cmpb.LinearArgumentProto{ + Target: &cmpb.LinearExpressionProto{ + Vars: []int32{int32(iv1.Index())}, + Coeffs: []int64{1}, + }, + Exprs: []*cmpb.LinearExpressionProto{ + &cmpb.LinearExpressionProto{ + Vars: []int32{int32(iv2.Index())}, Coeffs: []int64{1}, }, - Exprs: []*cmpb.LinearExpressionProto{ - &cmpb.LinearExpressionProto{ - Vars: []int32{int32(iv2.Index())}, - Coeffs: []int64{1}, - }, - &cmpb.LinearExpressionProto{ - Vars: []int32{int32(iv3.Index())}, - Coeffs: []int64{1}, - }, + &cmpb.LinearExpressionProto{ + Vars: []int32{int32(iv3.Index())}, + Coeffs: []int64{1}, }, }, - }, + }}, }, }, { @@ -1382,11 +1323,9 @@ func TestCpModelBuilder_Constraints(t *testing.T) { return m.GetConstraints()[c.Index()] }, want: &cmpb.ConstraintProto{ - Constraint: &cmpb.ConstraintProto_NoOverlap{ - &cmpb.NoOverlapConstraintProto{ - Intervals: []int32{int32(interval1.Index()), int32(interval2.Index())}, - }, - }, + Constraint: &cmpb.ConstraintProto_NoOverlap{&cmpb.NoOverlapConstraintProto{ + Intervals: []int32{int32(interval1.Index()), int32(interval2.Index())}, + }}, }, }, { @@ -1399,12 +1338,10 @@ func TestCpModelBuilder_Constraints(t *testing.T) { return m.GetConstraints()[c.Index()] }, want: &cmpb.ConstraintProto{ - Constraint: &cmpb.ConstraintProto_NoOverlap_2D{ - &cmpb.NoOverlap2DConstraintProto{ - XIntervals: []int32{int32(interval1.Index()), int32(interval3.Index())}, - YIntervals: []int32{int32(interval2.Index()), int32(interval4.Index())}, - }, - }, + Constraint: &cmpb.ConstraintProto_NoOverlap_2D{&cmpb.NoOverlap2DConstraintProto{ + XIntervals: []int32{int32(interval1.Index()), int32(interval3.Index())}, + YIntervals: []int32{int32(interval2.Index()), int32(interval4.Index())}, + }}, }, }, { @@ -1416,13 +1353,11 @@ func TestCpModelBuilder_Constraints(t *testing.T) { return m.GetConstraints()[c.Index()] }, want: &cmpb.ConstraintProto{ - Constraint: &cmpb.ConstraintProto_Circuit{ - &cmpb.CircuitConstraintProto{ - Tails: []int32{0}, - Heads: []int32{1}, - Literals: []int32{int32(bv1.Index())}, - }, - }, + Constraint: &cmpb.ConstraintProto_Circuit{&cmpb.CircuitConstraintProto{ + Tails: []int32{0}, + Heads: []int32{1}, + Literals: []int32{int32(bv1.Index())}, + }}, }, }, { @@ -1434,13 +1369,11 @@ func TestCpModelBuilder_Constraints(t *testing.T) { return m.GetConstraints()[c.Index()] }, want: &cmpb.ConstraintProto{ - Constraint: &cmpb.ConstraintProto_Routes{ - &cmpb.RoutesConstraintProto{ - Tails: []int32{0}, - Heads: []int32{1}, - Literals: []int32{int32(bv1.Index())}, - }, - }, + Constraint: &cmpb.ConstraintProto_Routes{&cmpb.RoutesConstraintProto{ + Tails: []int32{0}, + Heads: []int32{1}, + Literals: []int32{int32(bv1.Index())}, + }}, }, }, { @@ -1453,12 +1386,10 @@ func TestCpModelBuilder_Constraints(t *testing.T) { return m.GetConstraints()[c.Index()] }, want: &cmpb.ConstraintProto{ - Constraint: &cmpb.ConstraintProto_Table{ - &cmpb.TableConstraintProto{ - Vars: []int32{int32(iv1.Index()), int32(iv2.Index())}, - Values: []int64{0, 2, 1, 3}, - }, - }, + Constraint: &cmpb.ConstraintProto_Table{&cmpb.TableConstraintProto{ + Vars: []int32{int32(iv1.Index()), int32(iv2.Index())}, + Values: []int64{0, 2, 1, 3}, + }}, }, }, { @@ -1470,24 +1401,22 @@ func TestCpModelBuilder_Constraints(t *testing.T) { return m.GetConstraints()[c.Index()] }, want: &cmpb.ConstraintProto{ - Constraint: &cmpb.ConstraintProto_Reservoir{ - &cmpb.ReservoirConstraintProto{ - MinLevel: 10, - MaxLevel: 20, - TimeExprs: []*cmpb.LinearExpressionProto{ - &cmpb.LinearExpressionProto{ - Vars: []int32{int32(iv1.Index())}, - Coeffs: []int64{2}, - }, + Constraint: &cmpb.ConstraintProto_Reservoir{&cmpb.ReservoirConstraintProto{ + MinLevel: 10, + MaxLevel: 20, + TimeExprs: []*cmpb.LinearExpressionProto{ + &cmpb.LinearExpressionProto{ + Vars: []int32{int32(iv1.Index())}, + Coeffs: []int64{2}, }, - LevelChanges: []*cmpb.LinearExpressionProto{ - &cmpb.LinearExpressionProto{ - Offset: 15, - }, - }, - ActiveLiterals: []int32{int32(one.Index())}, }, - }, + LevelChanges: []*cmpb.LinearExpressionProto{ + &cmpb.LinearExpressionProto{ + Offset: 15, + }, + }, + ActiveLiterals: []int32{int32(one.Index())}, + }}, }, }, { @@ -1500,16 +1429,14 @@ func TestCpModelBuilder_Constraints(t *testing.T) { return m.GetConstraints()[c.Index()] }, want: &cmpb.ConstraintProto{ - Constraint: &cmpb.ConstraintProto_Automaton{ - &cmpb.AutomatonConstraintProto{ - Vars: []int32{int32(iv1.Index()), int32(iv2.Index())}, - StartingState: 0, - FinalStates: []int64{5, 10}, - TransitionTail: []int64{0, 2}, - TransitionHead: []int64{1, 3}, - TransitionLabel: []int64{10, 15}, - }, - }, + Constraint: &cmpb.ConstraintProto_Automaton{&cmpb.AutomatonConstraintProto{ + Vars: []int32{int32(iv1.Index()), int32(iv2.Index())}, + StartingState: 0, + FinalStates: []int64{5, 10}, + TransitionTail: []int64{0, 2}, + TransitionHead: []int64{1, 3}, + TransitionLabel: []int64{10, 15}, + }}, }, }, { @@ -1521,21 +1448,19 @@ func TestCpModelBuilder_Constraints(t *testing.T) { return m.GetConstraints()[c.Index()] }, want: &cmpb.ConstraintProto{ - Constraint: &cmpb.ConstraintProto_Cumulative{ - &cmpb.CumulativeConstraintProto{ - Capacity: &cmpb.LinearExpressionProto{ - Vars: []int32{int32(iv1.Index())}, + Constraint: &cmpb.ConstraintProto_Cumulative{&cmpb.CumulativeConstraintProto{ + Capacity: &cmpb.LinearExpressionProto{ + Vars: []int32{int32(iv1.Index())}, + Coeffs: []int64{1}, + }, + Intervals: []int32{int32(interval1.Index())}, + Demands: []*cmpb.LinearExpressionProto{ + &cmpb.LinearExpressionProto{ + Vars: []int32{int32(iv2.Index())}, Coeffs: []int64{1}, }, - Intervals: []int32{int32(interval1.Index())}, - Demands: []*cmpb.LinearExpressionProto{ - &cmpb.LinearExpressionProto{ - Vars: []int32{int32(iv2.Index())}, - Coeffs: []int64{1}, - }, - }, }, - }, + }}, }, }, } diff --git a/ortools/sat/go/cpmodel/cp_solver.go b/ortools/sat/go/cpmodel/cp_solver.go index 752b5d0562..e50a1f2f5b 100644 --- a/ortools/sat/go/cpmodel/cp_solver.go +++ b/ortools/sat/go/cpmodel/cp_solver.go @@ -18,10 +18,10 @@ import ( "sync" "unsafe" + "google.golang.org/protobuf/proto" + cmpb "github.com/google/or-tools/ortools/sat/proto/cpmodel" sppb "github.com/google/or-tools/ortools/sat/proto/satparameters" - - "google.golang.org/protobuf/proto" ) /* diff --git a/ortools/sat/go/cpmodel/cp_solver_c.cc b/ortools/sat/go/cpmodel/cp_solver_c.cc index 1f5b36808f..e8bba37b13 100644 --- a/ortools/sat/go/cpmodel/cp_solver_c.cc +++ b/ortools/sat/go/cpmodel/cp_solver_c.cc @@ -14,13 +14,12 @@ #include "ortools/sat/go/cpmodel/cp_solver_c.h" #include -#include -#include "absl/status/status.h" -#include "absl/strings/internal/memutil.h" -#include "ortools/base/logging.h" +#include "absl/log/check.h" +#include "ortools/base/memutil.h" #include "ortools/sat/cp_model.pb.h" #include "ortools/sat/cp_model_solver.h" +#include "ortools/sat/model.h" #include "ortools/sat/sat_parameters.pb.h" #include "ortools/util/time_limit.h" @@ -28,13 +27,6 @@ namespace operations_research::sat { namespace { -char* memdup(const char* s, size_t slen) { - void* copy; - if ((copy = malloc(slen)) == nullptr) return nullptr; - memcpy(copy, s, slen); - return reinterpret_cast(copy); -} - CpSolverResponse solveWithParameters(std::atomic* const limit_reached, const CpModelProto& proto, const SatParameters& params) { @@ -81,7 +73,7 @@ void SolveCpInterruptible(void* const limit_reached, const void* creq, CHECK(res.SerializeToString(&res_str)); *cres_len = static_cast(res_str.size()); - *cres = memdup(res_str.data(), *cres_len); + *cres = strings::memdup(res_str.data(), *cres_len); CHECK(*cres != nullptr); } diff --git a/ortools/sat/samples/assumptions_sample_sat.go b/ortools/sat/samples/assumptions_sample_sat.go index 56f700c5d9..c46d994a91 100644 --- a/ortools/sat/samples/assumptions_sample_sat.go +++ b/ortools/sat/samples/assumptions_sample_sat.go @@ -17,8 +17,8 @@ package main import ( "fmt" - "github.com/golang/glog" - cmpb "ortools/sat/cp_model_go_proto" + log "github.com/golang/glog" + cmpb "github.com/google/or-tools/ortools/sat/proto/cpmodel" "ortools/sat/go/cpmodel" ) @@ -62,6 +62,6 @@ func assumptionsSampleSat() error { func main() { if err := assumptionsSampleSat(); err != nil { - glog.Exitf("assumptionsSampleSat returned with error: %v", err) + log.Exitf("assumptionsSampleSat returned with error: %v", err) } } diff --git a/ortools/sat/samples/binpacking_problem_sat.go b/ortools/sat/samples/binpacking_problem_sat.go index 5ba4215479..96c1251cd4 100644 --- a/ortools/sat/samples/binpacking_problem_sat.go +++ b/ortools/sat/samples/binpacking_problem_sat.go @@ -18,7 +18,7 @@ package main import ( "fmt" - "github.com/golang/glog" + log "github.com/golang/glog" "ortools/sat/go/cpmodel" ) @@ -30,7 +30,7 @@ const ( ) type item struct { - Cost, Copies int64_t + Cost, Copies int64 } func binpackingProblemSat() error { @@ -116,6 +116,6 @@ func binpackingProblemSat() error { func main() { if err := binpackingProblemSat(); err != nil { - glog.Exitf("binpackingProblemSat returned with error: %v", err) + log.Exitf("binpackingProblemSat returned with error: %v", err) } } diff --git a/ortools/sat/samples/boolean_product_sample_sat.go b/ortools/sat/samples/boolean_product_sample_sat.go index 874294ebd8..8d724195c7 100644 --- a/ortools/sat/samples/boolean_product_sample_sat.go +++ b/ortools/sat/samples/boolean_product_sample_sat.go @@ -17,10 +17,10 @@ package main import ( "fmt" - "github.com/golang/glog" - "golang/protobuf/v2/proto/proto" + log "github.com/golang/glog" + sppb "github.com/google/or-tools/ortools/sat/proto/satparameters" + "google.golang.org/protobuf/proto" "ortools/sat/go/cpmodel" - sppb "ortools/sat/sat_parameters_go_proto" ) func booleanProductSample() error { @@ -68,6 +68,6 @@ func booleanProductSample() error { func main() { err := booleanProductSample() if err != nil { - glog.Exitf("booleanProductSample returned with error: %v", err) + log.Exitf("booleanProductSample returned with error: %v", err) } } diff --git a/ortools/sat/samples/channeling_sample_sat.go b/ortools/sat/samples/channeling_sample_sat.go index 88db3279f9..dab99823e6 100644 --- a/ortools/sat/samples/channeling_sample_sat.go +++ b/ortools/sat/samples/channeling_sample_sat.go @@ -17,11 +17,11 @@ package main import ( "fmt" - "github.com/golang/glog" - "golang/protobuf/v2/proto/proto" - cmpb "ortools/sat/cp_model_go_proto" + log "github.com/golang/glog" + cmpb "github.com/google/or-tools/ortools/sat/proto/cpmodel" + sppb "github.com/google/or-tools/ortools/sat/proto/satparameters" + "google.golang.org/protobuf/proto" "ortools/sat/go/cpmodel" - sppb "ortools/sat/sat_parameters_go_proto" ) func channelingSampleSat() error { @@ -76,6 +76,6 @@ func channelingSampleSat() error { func main() { if err := channelingSampleSat(); err != nil { - glog.Exitf("channelingSampleSat returned with error: %v", err) + log.Exitf("channelingSampleSat returned with error: %v", err) } } diff --git a/ortools/sat/samples/earliness_tardiness_cost_sample_sat.go b/ortools/sat/samples/earliness_tardiness_cost_sample_sat.go index 43f7c2f181..ce4135506c 100644 --- a/ortools/sat/samples/earliness_tardiness_cost_sample_sat.go +++ b/ortools/sat/samples/earliness_tardiness_cost_sample_sat.go @@ -18,11 +18,11 @@ package main import ( "fmt" - "github.com/golang/glog" - "golang/protobuf/v2/proto/proto" - cmpb "ortools/sat/cp_model_go_proto" + log "github.com/golang/glog" + cmpb "github.com/google/or-tools/ortools/sat/proto/cpmodel" + sppb "github.com/google/or-tools/ortools/sat/proto/satparameters" + "google.golang.org/protobuf/proto" "ortools/sat/go/cpmodel" - sppb "ortools/sat/sat_parameters_go_proto" ) const ( @@ -85,6 +85,6 @@ func earlinessTardinessCostSampleSat() error { func main() { if err := earlinessTardinessCostSampleSat(); err != nil { - glog.Exitf("earlinessTardinessCostSampleSat returned with error: %v", err) + log.Exitf("earlinessTardinessCostSampleSat returned with error: %v", err) } } diff --git a/ortools/sat/samples/interval_sample_sat.go b/ortools/sat/samples/interval_sample_sat.go index e0e2776631..5a3a254591 100644 --- a/ortools/sat/samples/interval_sample_sat.go +++ b/ortools/sat/samples/interval_sample_sat.go @@ -17,7 +17,7 @@ package main import ( "fmt" - "github.com/golang/glog" + log "github.com/golang/glog" "ortools/sat/go/cpmodel" ) @@ -53,6 +53,6 @@ func intervalSampleSat() error { func main() { if err := intervalSampleSat(); err != nil { - glog.Exitf("intervalSampleSat returned with error: %v", err) + log.Exitf("intervalSampleSat returned with error: %v", err) } } diff --git a/ortools/sat/samples/literal_sample_sat.go b/ortools/sat/samples/literal_sample_sat.go index be171a9161..7cfec06ccb 100644 --- a/ortools/sat/samples/literal_sample_sat.go +++ b/ortools/sat/samples/literal_sample_sat.go @@ -15,7 +15,7 @@ package main import ( - "github.com/golang/glog" + log "github.com/golang/glog" "ortools/sat/go/cpmodel" ) @@ -25,7 +25,7 @@ func literalSampleSat() { x := model.NewBoolVar().WithName("x") notX := x.Not() - glog.Infof("x = %d, x.Not() = %d", x.Index(), notX.Index()) + log.Infof("x = %d, x.Not() = %d", x.Index(), notX.Index()) } func main() { diff --git a/ortools/sat/samples/no_overlap_sample_sat.go b/ortools/sat/samples/no_overlap_sample_sat.go index ce5fccab1e..e69fc2a0cc 100644 --- a/ortools/sat/samples/no_overlap_sample_sat.go +++ b/ortools/sat/samples/no_overlap_sample_sat.go @@ -17,8 +17,8 @@ package main import ( "fmt" - "github.com/golang/glog" - cmpb "ortools/sat/cp_model_go_proto" + log "github.com/golang/glog" + cmpb "github.com/google/or-tools/ortools/sat/proto/cpmodel" "ortools/sat/go/cpmodel" ) @@ -85,6 +85,6 @@ func noOverlapSampleSat() error { func main() { if err := noOverlapSampleSat(); err != nil { - glog.Exitf("noOverlapSampleSat returned with error: %v", err) + log.Exitf("noOverlapSampleSat returned with error: %v", err) } } diff --git a/ortools/sat/samples/nqueens_sat.go b/ortools/sat/samples/nqueens_sat.go index b2a7b8323c..198237f6bd 100644 --- a/ortools/sat/samples/nqueens_sat.go +++ b/ortools/sat/samples/nqueens_sat.go @@ -17,7 +17,7 @@ package main import ( "fmt" - "github.com/golang/glog" + log "github.com/golang/glog" "ortools/sat/go/cpmodel" ) @@ -30,7 +30,7 @@ func nQueensSat() error { // of the board. The value of each variable is the row that the queen is in. var queenRows []cpmodel.LinearArgument for i := 0; i < boardSize; i++ { - queenRows = append(queenRows, model.NewIntVar(0, int64_t(boardSize-1))) + queenRows = append(queenRows, model.NewIntVar(0, int64(boardSize-1))) } // The following sets the constraint that all queens are in different rows. @@ -40,8 +40,8 @@ func nQueensSat() error { var diag1 []cpmodel.LinearArgument var diag2 []cpmodel.LinearArgument for i := 0; i < boardSize; i++ { - diag1 = append(diag1, cpmodel.NewConstant(int64_t(i)).Add(queenRows[i])) - diag2 = append(diag2, cpmodel.NewConstant(int64_t(-i)).Add(queenRows[i])) + diag1 = append(diag1, cpmodel.NewConstant(int64(i)).Add(queenRows[i])) + diag2 = append(diag2, cpmodel.NewConstant(int64(-i)).Add(queenRows[i])) } model.AddAllDifferent(diag1...) model.AddAllDifferent(diag2...) @@ -59,7 +59,7 @@ func nQueensSat() error { fmt.Printf("Objective: %v\n", response.GetObjectiveValue()) fmt.Printf("Solution:\n") - for i := int64_t(0); i < boardSize; i++ { + for i := int64(0); i < boardSize; i++ { for j := 0; j < boardSize; j++ { if cpmodel.SolutionIntegerValue(response, queenRows[j]) == i { fmt.Print("Q") @@ -76,6 +76,6 @@ func nQueensSat() error { func main() { err := nQueensSat() if err != nil { - glog.Exitf("nQueensSat returned with error: %v", err) + log.Exitf("nQueensSat returned with error: %v", err) } } diff --git a/ortools/sat/samples/nurses_sat.go b/ortools/sat/samples/nurses_sat.go index 59ffa83e8a..c8fbb49284 100644 --- a/ortools/sat/samples/nurses_sat.go +++ b/ortools/sat/samples/nurses_sat.go @@ -17,7 +17,7 @@ package main import ( "fmt" - "github.com/golang/glog" + log "github.com/golang/glog" "ortools/sat/go/cpmodel" ) @@ -121,6 +121,6 @@ func nursesSat() error { func main() { if err := nursesSat(); err != nil { - glog.Exitf("nursesSat returned with error: %v", err) + log.Exitf("nursesSat returned with error: %v", err) } } diff --git a/ortools/sat/samples/optional_interval_sample_sat.go b/ortools/sat/samples/optional_interval_sample_sat.go index b586dce62d..2f50f8a411 100644 --- a/ortools/sat/samples/optional_interval_sample_sat.go +++ b/ortools/sat/samples/optional_interval_sample_sat.go @@ -18,7 +18,7 @@ package main import ( "fmt" - "github.com/golang/glog" + log "github.com/golang/glog" "ortools/sat/go/cpmodel" ) @@ -51,6 +51,6 @@ func optionalIntervalSampleSat() error { func main() { if err := optionalIntervalSampleSat(); err != nil { - glog.Exitf("optionalIntervalSampleSat returned with error: %v", err) + log.Exitf("optionalIntervalSampleSat returned with error: %v", err) } } diff --git a/ortools/sat/samples/rabbits_and_pheasants_sat.go b/ortools/sat/samples/rabbits_and_pheasants_sat.go index f00a76adb6..c828874a16 100644 --- a/ortools/sat/samples/rabbits_and_pheasants_sat.go +++ b/ortools/sat/samples/rabbits_and_pheasants_sat.go @@ -18,8 +18,8 @@ package main import ( "fmt" - "github.com/golang/glog" - cmpb "ortools/sat/cp_model_go_proto" + log "github.com/golang/glog" + cmpb "github.com/google/or-tools/ortools/sat/proto/cpmodel" "ortools/sat/go/cpmodel" ) @@ -58,6 +58,6 @@ func rabbitsAndPheasants() error { func main() { if err := rabbitsAndPheasants(); err != nil { - glog.Exitf("rabbitsAndPheasants returned with error: %v", err) + log.Exitf("rabbitsAndPheasants returned with error: %v", err) } } diff --git a/ortools/sat/samples/ranking_sample_sat.go b/ortools/sat/samples/ranking_sample_sat.go index cb1a398984..d81c01e1fd 100644 --- a/ortools/sat/samples/ranking_sample_sat.go +++ b/ortools/sat/samples/ranking_sample_sat.go @@ -17,8 +17,8 @@ package main import ( "fmt" - "github.com/golang/glog" - cmpb "ortools/sat/cp_model_go_proto" + log "github.com/golang/glog" + cmpb "github.com/google/or-tools/ortools/sat/proto/cpmodel" "ortools/sat/go/cpmodel" ) @@ -89,7 +89,7 @@ func rankingSampleSat() error { for t := 0; t < numTasks; t++ { start := model.NewIntVarFromDomain(horizon) - duration := cpmodel.NewConstant(int64_t(t + 1)) + duration := cpmodel.NewConstant(int64(t + 1)) end := model.NewIntVarFromDomain(horizon) var presence cpmodel.BoolVar if t < numTasks/2 { @@ -160,6 +160,6 @@ func rankingSampleSat() error { func main() { if err := rankingSampleSat(); err != nil { - glog.Exitf("rankingSampleSat returned with error: %v", err) + log.Exitf("rankingSampleSat returned with error: %v", err) } } diff --git a/ortools/sat/samples/search_for_all_solutions_sample_sat.go b/ortools/sat/samples/search_for_all_solutions_sample_sat.go index 17c4e3f84d..2324f031de 100644 --- a/ortools/sat/samples/search_for_all_solutions_sample_sat.go +++ b/ortools/sat/samples/search_for_all_solutions_sample_sat.go @@ -18,10 +18,10 @@ package main import ( "fmt" - "github.com/golang/glog" - "golang/protobuf/v2/proto/proto" + log "github.com/golang/glog" + sppb "github.com/google/or-tools/ortools/sat/proto/satparameters" + "google.golang.org/protobuf/proto" "ortools/sat/go/cpmodel" - sppb "ortools/sat/sat_parameters_go_proto" ) func searchForAllSolutionsSampleSat() error { @@ -63,6 +63,6 @@ func searchForAllSolutionsSampleSat() error { func main() { if err := searchForAllSolutionsSampleSat(); err != nil { - glog.Exitf("searchForAllSolutionsSampleSat returned with error: %v", err) + log.Exitf("searchForAllSolutionsSampleSat returned with error: %v", err) } } diff --git a/ortools/sat/samples/simple_sat_program.go b/ortools/sat/samples/simple_sat_program.go index 0430d34fb4..cf48a94284 100644 --- a/ortools/sat/samples/simple_sat_program.go +++ b/ortools/sat/samples/simple_sat_program.go @@ -17,8 +17,8 @@ package main import ( "fmt" - "github.com/golang/glog" - cmpb "ortools/sat/cp_model_go_proto" + log "github.com/golang/glog" + cmpb "github.com/google/or-tools/ortools/sat/proto/cpmodel" "ortools/sat/go/cpmodel" ) @@ -55,6 +55,6 @@ func simpleSatProgram() error { func main() { if err := simpleSatProgram(); err != nil { - glog.Exitf("simpleSatProgram returned with error: %v", err) + log.Exitf("simpleSatProgram returned with error: %v", err) } } diff --git a/ortools/sat/samples/solution_hinting_sample_sat.go b/ortools/sat/samples/solution_hinting_sample_sat.go index 59b2766c2b..70f15710c9 100644 --- a/ortools/sat/samples/solution_hinting_sample_sat.go +++ b/ortools/sat/samples/solution_hinting_sample_sat.go @@ -17,8 +17,8 @@ package main import ( "fmt" - "github.com/golang/glog" - cmpb "ortools/sat/cp_model_go_proto" + log "github.com/golang/glog" + cmpb "github.com/google/or-tools/ortools/sat/proto/cpmodel" "ortools/sat/go/cpmodel" ) @@ -60,6 +60,6 @@ func solutionHintingSampleSat() error { func main() { if err := solutionHintingSampleSat(); err != nil { - glog.Exitf("solutionHintingSampleSat returned with error: %v", err) + log.Exitf("solutionHintingSampleSat returned with error: %v", err) } } diff --git a/ortools/sat/samples/solve_and_print_intermediate_solutions_sample_sat.go b/ortools/sat/samples/solve_and_print_intermediate_solutions_sample_sat.go index f1a0d86507..7885073bac 100644 --- a/ortools/sat/samples/solve_and_print_intermediate_solutions_sample_sat.go +++ b/ortools/sat/samples/solve_and_print_intermediate_solutions_sample_sat.go @@ -17,10 +17,10 @@ package main import ( "fmt" - "github.com/golang/glog" - "golang/protobuf/v2/proto/proto" + log "github.com/golang/glog" + sppb "github.com/google/or-tools/ortools/sat/proto/satparameters" + "google.golang.org/protobuf/proto" "ortools/sat/go/cpmodel" - sppb "ortools/sat/sat_parameters_go_proto" ) func solveAndPrintIntermediateSolutionsSampleSat() error { @@ -65,6 +65,6 @@ func solveAndPrintIntermediateSolutionsSampleSat() error { func main() { if err := solveAndPrintIntermediateSolutionsSampleSat(); err != nil { - glog.Exitf("solveAndPrintIntermediateSolutionsSampleSat returned with error: %v", err) + log.Exitf("solveAndPrintIntermediateSolutionsSampleSat returned with error: %v", err) } } diff --git a/ortools/sat/samples/solve_with_time_limit_sample_sat.go b/ortools/sat/samples/solve_with_time_limit_sample_sat.go index 4fe04c95a8..b7f968b336 100644 --- a/ortools/sat/samples/solve_with_time_limit_sample_sat.go +++ b/ortools/sat/samples/solve_with_time_limit_sample_sat.go @@ -17,11 +17,11 @@ package main import ( "fmt" - "github.com/golang/glog" - "golang/protobuf/v2/proto/proto" - cmpb "ortools/sat/cp_model_go_proto" + log "github.com/golang/glog" + cmpb "github.com/google/or-tools/ortools/sat/proto/cpmodel" + sppb "github.com/google/or-tools/ortools/sat/proto/satparameters" + "google.golang.org/protobuf/proto" "ortools/sat/go/cpmodel" - sppb "ortools/sat/sat_parameters_go_proto" ) func solveWithTimeLimitSampleSat() error { @@ -63,6 +63,6 @@ func solveWithTimeLimitSampleSat() error { func main() { if err := solveWithTimeLimitSampleSat(); err != nil { - glog.Exitf("solveWithTimeLimitSampleSat returned with error: %v", err) + log.Exitf("solveWithTimeLimitSampleSat returned with error: %v", err) } } diff --git a/ortools/sat/samples/step_function_sample_sat.go b/ortools/sat/samples/step_function_sample_sat.go index 5fb4e66f9f..b04a04d9ac 100644 --- a/ortools/sat/samples/step_function_sample_sat.go +++ b/ortools/sat/samples/step_function_sample_sat.go @@ -17,11 +17,11 @@ package main import ( "fmt" - "github.com/golang/glog" - "golang/protobuf/v2/proto/proto" - cmpb "ortools/sat/cp_model_go_proto" + log "github.com/golang/glog" + cmpb "github.com/google/or-tools/ortools/sat/proto/cpmodel" + sppb "github.com/google/or-tools/ortools/sat/proto/satparameters" + "google.golang.org/protobuf/proto" "ortools/sat/go/cpmodel" - sppb "ortools/sat/sat_parameters_go_proto" ) func stepFunctionSampleSat() error { @@ -94,6 +94,6 @@ func stepFunctionSampleSat() error { func main() { if err := stepFunctionSampleSat(); err != nil { - glog.Exitf("stepFunctionSampleSat returned with error: %v", err) + log.Exitf("stepFunctionSampleSat returned with error: %v", err) } } diff --git a/ortools/sat/sat_parameters.proto b/ortools/sat/sat_parameters.proto index 2cc412d4aa..870cf2cf8b 100644 --- a/ortools/sat/sat_parameters.proto +++ b/ortools/sat/sat_parameters.proto @@ -15,15 +15,15 @@ syntax = "proto2"; package operations_research.sat; +option csharp_namespace = "Google.OrTools.Sat"; +option go_package = "github.com/google/or-tools/ortools/sat/proto/satparameters"; option java_package = "com.google.ortools.sat"; option java_multiple_files = true; -option csharp_namespace = "Google.OrTools.Sat"; - // Contains the definitions for all the sat algorithm parameters and their // default values. // -// NEXT TAG: 299 +// NEXT TAG: 300 message SatParameters { // In some context, like in a portfolio of search, it makes sense to name a // given parameters set for logging purpose. From 9a3c8a2de6d56b9c4b81770d347981605874ad1a Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Fri, 27 Sep 2024 11:10:42 +0200 Subject: [PATCH 025/105] sat: export from google3 --- ortools/graph/BUILD.bazel | 2 + ortools/graph/cliques.cc | 127 ++++++++ ortools/graph/cliques.h | 82 +++++ ortools/graph/cliques_test.cc | 115 +++++++ ortools/sat/2d_orthogonal_packing_testing.cc | 2 + ortools/sat/2d_try_edge_propagator.cc | 307 +++++++++++++++++++ ortools/sat/2d_try_edge_propagator.h | 96 ++++++ ortools/sat/2d_try_edge_propagator_test.cc | 152 +++++++++ ortools/sat/BUILD.bazel | 32 ++ ortools/sat/clause.cc | 174 ++++++++--- ortools/sat/clause.h | 9 +- ortools/sat/cuts.cc | 8 +- ortools/sat/diffn.cc | 5 + ortools/sat/diffn_util.h | 12 + ortools/sat/linear_constraint_manager.h | 11 + ortools/sat/linear_programming_constraint.cc | 33 +- ortools/sat/linear_programming_constraint.h | 1 + ortools/sat/sat_parameters.proto | 2 + ortools/util/bitset.h | 26 +- 19 files changed, 1135 insertions(+), 61 deletions(-) create mode 100644 ortools/sat/2d_try_edge_propagator.cc create mode 100644 ortools/sat/2d_try_edge_propagator.h create mode 100644 ortools/sat/2d_try_edge_propagator_test.cc diff --git a/ortools/graph/BUILD.bazel b/ortools/graph/BUILD.bazel index d00f8154c3..98e6c8a510 100644 --- a/ortools/graph/BUILD.bazel +++ b/ortools/graph/BUILD.bazel @@ -144,8 +144,10 @@ cc_library( "//ortools/base", "//ortools/base:int_type", "//ortools/base:strong_vector", + "//ortools/util:bitset", "//ortools/util:time_limit", "@com_google_absl//absl/container:flat_hash_set", + "@com_google_absl//absl/log:check", "@com_google_absl//absl/strings", ], ) diff --git a/ortools/graph/cliques.cc b/ortools/graph/cliques.cc index ed3ccc1b93..e9fcff0dd1 100644 --- a/ortools/graph/cliques.cc +++ b/ortools/graph/cliques.cc @@ -20,6 +20,8 @@ #include #include "absl/container/flat_hash_set.h" +#include "absl/log/check.h" +#include "ortools/util/bitset.h" namespace operations_research { namespace { @@ -262,4 +264,129 @@ void CoverArcsByCliques(std::function graph, int node_count, initial_candidates.get(), 0, node_count, &actual, &stop); } +void WeightedBronKerboschBitsetAlgorithm::Initialize(int num_nodes) { + work_ = 0; + weights_.assign(num_nodes, 0.0); + + // We need +1 in case the graph is complete and form a clique. + clique_.resize(num_nodes + 1); + clique_weight_.resize(num_nodes + 1); + left_to_process_.resize(num_nodes + 1); + x_.resize(num_nodes + 1); + + // Initialize to empty graph. + graph_.resize(num_nodes); + for (Bitset64& bitset : graph_) { + bitset.ClearAndResize(num_nodes); + } +} + +void WeightedBronKerboschBitsetAlgorithm:: + TakeTransitiveClosureOfImplicationGraph() { + // We use Floyd-Warshall algorithm. + const int num_nodes = weights_.size(); + for (int k = 0; k < num_nodes; ++k) { + // Loop over all the i => k, we can do that by looking at the not(k) => + // not(i). + for (const int i : graph_[k ^ 1]) { + // Now i also implies all the literals implied by k. + graph_[i].Union(graph_[k]); + } + } +} + +std::vector> WeightedBronKerboschBitsetAlgorithm::Run() { + clique_index_and_weight_.clear(); + std::vector> cliques; + + const int num_nodes = weights_.size(); + in_clique_.ClearAndResize(num_nodes); + + queue_.clear(); + + int depth = 0; + left_to_process_[0].ClearAndResize(num_nodes); + x_[0].ClearAndResize(num_nodes); + for (int i = 0; i < num_nodes; ++i) { + left_to_process_[0].Set(i); + queue_.push_back(i); + } + + // We run an iterative DFS where we push all possible next node to + // queue_. We just abort brutally if we hit the work limit. + while (!queue_.empty() && work_ <= work_limit_) { + const int node = queue_.back(); + if (!in_clique_[node]) { + // We add this node to the clique. + in_clique_.Set(node); + clique_[depth] = node; + left_to_process_[depth].Clear(node); + x_[depth].Set(node); + + // Note that it might seems we don't need to keep both set since we + // only process nodes in order, but because of the pivot optim, while + // both set are sorted, they can be "interleaved". + ++depth; + work_ += num_nodes; + const double current_weight = weights_[node] + clique_weight_[depth - 1]; + clique_weight_[depth] = current_weight; + left_to_process_[depth].SetToIntersectionOf(left_to_process_[depth - 1], + graph_[node]); + x_[depth].SetToIntersectionOf(x_[depth - 1], graph_[node]); + + // Choose a pivot. We use the vertex with highest weight according to: + // Samuel Souza Britoa, Haroldo Gambini Santosa, "Preprocessing and + // Cutting Planes with Conflict Graphs", + // https://arxiv.org/pdf/1909.07780 + // but maybe random is more robust? + int pivot = -1; + double pivot_weight = -1.0; + for (const int candidate : x_[depth]) { + const double candidate_weight = weights_[candidate]; + if (candidate_weight > pivot_weight) { + pivot = candidate; + pivot_weight = candidate_weight; + } + } + double total_weight_left = 0.0; + for (const int candidate : left_to_process_[depth]) { + const double candidate_weight = weights_[candidate]; + if (candidate_weight > pivot_weight) { + pivot = candidate; + pivot_weight = candidate_weight; + } + total_weight_left += candidate_weight; + } + + // Heuristic: We can abort early if there is no way to reach the + // threshold here. + if (current_weight + total_weight_left < weight_threshold_) { + continue; + } + + if (pivot == -1 && current_weight >= weight_threshold_) { + // This clique is maximal. + clique_index_and_weight_.push_back({cliques.size(), current_weight}); + cliques.emplace_back(clique_.begin(), clique_.begin() + depth); + continue; + } + + for (const int next : left_to_process_[depth]) { + if (graph_[pivot][next]) continue; // skip. + queue_.push_back(next); + } + } else { + // We finished exploring node. + // backtrack. + --depth; + DCHECK_GE(depth, 0); + DCHECK_EQ(clique_[depth], node); + in_clique_.Clear(node); + queue_.pop_back(); + } + } + + return cliques; +} + } // namespace operations_research diff --git a/ortools/graph/cliques.h b/ortools/graph/cliques.h index 7901566bf2..702a0a0909 100644 --- a/ortools/graph/cliques.h +++ b/ortools/graph/cliques.h @@ -36,6 +36,7 @@ #include "ortools/base/int_type.h" #include "ortools/base/logging.h" #include "ortools/base/strong_vector.h" +#include "ortools/util/bitset.h" #include "ortools/util/time_limit.h" namespace operations_research { @@ -358,6 +359,87 @@ class BronKerboschAlgorithm { TimeLimit* time_limit_; }; +// More specialized version used to separate clique-cuts in MIP solver. +// This finds all maximal clique with a weight greater than a given threshold. +// It also has computation limit. +// +// This implementation assumes small graph since we use a dense bitmask +// representation to encode the graph adjacency. So it shouldn't really be used +// with more than a few thousands nodes. +class WeightedBronKerboschBitsetAlgorithm { + public: + // Resets the class to an empty graph will all weights of zero. + // This also reset the work done. + void Initialize(int num_nodes); + + // Set the weight of a given node, must be in [0, num_nodes). + // Weights are assumed to be non-negative. + void SetWeight(int i, double weight) { weights_[i] = weight; } + + // Add an edge in the graph. + void AddEdge(int a, int b) { + graph_[a].Set(b); + graph_[b].Set(a); + } + + // We count the number of basic operations, and stop when we reach this limit. + void SetWorkLimit(int64_t limit) { work_limit_ = limit; } + + // Set the minimum weight of the maximal cliques we are looking for. + void SetMinimumWeight(double min_weight) { weight_threshold_ = min_weight; } + + // This function is quite specific. It interprets node i as the negated + // literal of node i ^ 1. And all j in graph[i] as literal that are in at most + // two relation. So i implies all not(j) for all j in graph[i]. + // + // The transitive close runs in O(num_nodes ^ 3) in the worst case, but since + // we process 64 bits at the time, it is okay to run it for graph up to 1k + // nodes. + void TakeTransitiveClosureOfImplicationGraph(); + + // Runs the algo and returns all maximal clique with a weight above the + // configured thrheshold via SetMinimumWeight(). It is possible we reach the + // work limit before that. + std::vector> Run(); + + // Specific API where the index refer in the last result of Run(). + // This allows to select cliques when they are many. + std::vector>& GetMutableIndexAndWeight() { + return clique_index_and_weight_; + } + + int64_t WorkDone() const { return work_; } + + bool HasEdge(int i, int j) const { return graph_[i][j]; } + + private: + int64_t work_ = 0; + int64_t work_limit_ = std::numeric_limits::max(); + double weight_threshold_ = 0.0; + + std::vector weights_; + std::vector> graph_; + + // Iterative DFS queue. + std::vector queue_; + + // Current clique we are constructing. + // Note this is always of size num_nodes, the clique is in [0, depth) + Bitset64 in_clique_; + std::vector clique_; + + // We maintain the weight of the clique. We use a stack to avoid floating + // point issue with +/- weights many times. So clique_weight_[i] is the sum of + // weight from [0, i) of element of the cliques. + std::vector clique_weight_; + + // Correspond to P and X in BronKerbosch description. + std::vector> left_to_process_; + std::vector> x_; + + std::vector> clique_index_and_weight_; +}; + template void BronKerboschAlgorithm::InitializeState(State* state) { DCHECK(state != nullptr); diff --git a/ortools/graph/cliques_test.cc b/ortools/graph/cliques_test.cc index d846be1d65..623d104ba3 100644 --- a/ortools/graph/cliques_test.cc +++ b/ortools/graph/cliques_test.cc @@ -14,6 +14,7 @@ #include "ortools/graph/cliques.h" #include +#include #include #include #include @@ -24,6 +25,7 @@ #include "absl/container/flat_hash_set.h" #include "absl/functional/bind_front.h" +#include "absl/log/check.h" #include "absl/random/distributions.h" #include "absl/strings/str_cat.h" #include "absl/types/span.h" @@ -412,6 +414,44 @@ TEST(BronKerbosch, CompleteGraphCover) { EXPECT_EQ(10, all_cliques[0].size()); } +TEST(WeightedBronKerboschBitsetAlgorithmTest, CompleteGraph) { + const int num_nodes = 1000; + WeightedBronKerboschBitsetAlgorithm algo; + algo.Initialize(num_nodes); + for (int i = 0; i < num_nodes; ++i) { + for (int j = i + 1; j < num_nodes; ++j) { + algo.AddEdge(i, j); + } + } + std::vector> cliques = algo.Run(); + EXPECT_EQ(cliques.size(), 1); + for (const std::vector& clique : cliques) { + EXPECT_EQ(num_nodes, clique.size()); + } +} + +TEST(WeightedBronKerboschBitsetAlgorithmTest, ImplicationGraphClosure) { + const int num_nodes = 10; + WeightedBronKerboschBitsetAlgorithm algo; + algo.Initialize(num_nodes); + for (int i = 0; i + 2 < num_nodes; i += 2) { + const int j = i + 2; + algo.AddEdge(i, j ^ 1); // i => j + } + algo.TakeTransitiveClosureOfImplicationGraph(); + for (int i = 0; i < num_nodes; ++i) { + for (int j = 0; j < num_nodes; ++j) { + if (i % 2 == 0 && j % 2 == 0) { + if (j > i) { + EXPECT_TRUE(algo.HasEdge(i, j ^ 1)); + } else { + EXPECT_FALSE(algo.HasEdge(i, j ^ 1)); + } + } + } + } +} + TEST(BronKerbosch, EmptyGraphCover) { auto graph = EmptyGraph; CliqueReporter reporter; @@ -477,6 +517,50 @@ TEST(BronKerboschAlgorithmTest, FullKPartiteGraph) { } } +TEST(WeightedBronKerboschBitsetAlgorithmTest, FullKPartiteGraph) { + const int kNumPartitions[] = {2, 3, 4, 5, 6, 7}; + for (const int num_partitions : kNumPartitions) { + SCOPED_TRACE(absl::StrCat("num_partitions = ", num_partitions)); + WeightedBronKerboschBitsetAlgorithm algo; + + const int num_nodes = num_partitions * num_partitions; + algo.Initialize(num_nodes); + + for (int i = 0; i < num_nodes; ++i) { + for (int j = i + 1; j < num_nodes; ++j) { + if (FullKPartiteGraph(num_partitions, i, j)) algo.AddEdge(i, j); + } + } + + std::vector> cliques = algo.Run(); + EXPECT_EQ(cliques.size(), pow(num_partitions, num_partitions)); + for (const std::vector& clique : cliques) { + EXPECT_EQ(num_partitions, clique.size()); + } + } +} + +TEST(WeightedBronKerboschBitsetAlgorithmTest, ModuloGraph) { + int num_partitions = 50; + int partition_size = 100; + WeightedBronKerboschBitsetAlgorithm algo; + + const int num_nodes = num_partitions * partition_size; + algo.Initialize(num_nodes); + + for (int i = 0; i < num_nodes; ++i) { + for (int j = i + 1; j < num_nodes; ++j) { + if (ModuloGraph(num_partitions, i, j)) algo.AddEdge(i, j); + } + } + + std::vector> cliques = algo.Run(); + EXPECT_EQ(cliques.size(), num_partitions); + for (const std::vector& clique : cliques) { + EXPECT_EQ(partition_size, clique.size()); + } +} + // The following two tests run the Bron-Kerbosch algorithm with wall time // limit and deterministic time limit. They use a full 15-partite graph with // a one second time limit. @@ -590,6 +674,37 @@ BENCHMARK(BM_FindCliquesInModuloGraphWithBronKerboschAlgorithm) ->ArgPair(500, 10) ->ArgPair(1000, 5); +void BM_FindCliquesInModuloGraphWithBitsetBK(benchmark::State& state) { + int num_partitions = state.range(0); + int partition_size = state.range(1); + const int kExpectedNumCliques = num_partitions; + const int kExpectedCliqueSize = partition_size; + const int num_nodes = num_partitions * partition_size; + for (auto _ : state) { + WeightedBronKerboschBitsetAlgorithm algo; + algo.Initialize(num_nodes); + for (int i = 0; i < num_nodes; ++i) { + for (int j = i + 1; j < num_nodes; ++j) { + if (ModuloGraph(num_partitions, i, j)) algo.AddEdge(i, j); + } + } + + std::vector> cliques = algo.Run(); + EXPECT_EQ(cliques.size(), kExpectedNumCliques); + for (const std::vector& clique : cliques) { + EXPECT_EQ(kExpectedCliqueSize, clique.size()); + } + } +} + +BENCHMARK(BM_FindCliquesInModuloGraphWithBitsetBK) + ->ArgPair(5, 1000) + ->ArgPair(10, 500) + ->ArgPair(50, 100) + ->ArgPair(100, 50) + ->ArgPair(500, 10) + ->ArgPair(1000, 5); + // A benchmark that finds all maximal cliques in a 7-partite graph (a graph // where the nodes are divided into 7 groups of size 7; each node is connected // to all nodes in other groups but to no node in the same group). This graph diff --git a/ortools/sat/2d_orthogonal_packing_testing.cc b/ortools/sat/2d_orthogonal_packing_testing.cc index 597e718db3..4fc9789b1c 100644 --- a/ortools/sat/2d_orthogonal_packing_testing.cc +++ b/ortools/sat/2d_orthogonal_packing_testing.cc @@ -158,10 +158,12 @@ std::vector MakeItemsFromRectangles( ranges.reserve(rectangles.size()); const int max_slack_x = slack_factor * size_max_x.value(); const int max_slack_y = slack_factor * size_max_y.value(); + int count = 0; for (const Rectangle& rec : rectangles) { RectangleInRange range; range.x_size = rec.x_max - rec.x_min; range.y_size = rec.y_max - rec.y_min; + range.box_index = count++; range.bounding_area = { .x_min = rec.x_min - IntegerValue(absl::Uniform(random, 0, max_slack_x)), diff --git a/ortools/sat/2d_try_edge_propagator.cc b/ortools/sat/2d_try_edge_propagator.cc new file mode 100644 index 0000000000..0d847090b8 --- /dev/null +++ b/ortools/sat/2d_try_edge_propagator.cc @@ -0,0 +1,307 @@ +// Copyright 2010-2024 Google LLC +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "ortools/sat/2d_try_edge_propagator.h" + +#include +#include +#include +#include +#include +#include +#include + +#include "absl/algorithm/container.h" +#include "ortools/base/logging.h" +#include "ortools/sat/diffn_util.h" +#include "ortools/sat/integer.h" +#include "ortools/sat/intervals.h" +#include "ortools/sat/model.h" +#include "ortools/sat/synchronization.h" + +namespace operations_research { +namespace sat { + +int TryEdgeRectanglePropagator::RegisterWith(GenericLiteralWatcher* watcher) { + const int id = watcher->Register(this); + x_.WatchAllTasks(id); + y_.WatchAllTasks(id); + watcher->NotifyThatPropagatorMayNotReachFixedPointInOnePass(id); + return id; +} + +TryEdgeRectanglePropagator::~TryEdgeRectanglePropagator() { + if (!VLOG_IS_ON(1)) return; + std::vector> stats; + stats.push_back({"TryEdgeRectanglePropagator/called", num_calls_}); + stats.push_back({"TryEdgeRectanglePropagator/conflicts", num_conflicts_}); + stats.push_back( + {"TryEdgeRectanglePropagator/propagations", num_propagations_}); + stats.push_back({"TryEdgeRectanglePropagator/cache_hits", num_cache_hits_}); + stats.push_back( + {"TryEdgeRectanglePropagator/cache_misses", num_cache_misses_}); + + shared_stats_->AddStats(stats); +} + +void TryEdgeRectanglePropagator::PopulateActiveBoxRanges() { + const int num_boxes = x_.NumTasks(); + active_box_ranges_.clear(); + active_box_ranges_.reserve(num_boxes); + for (int box = 0; box < num_boxes; ++box) { + if (x_.SizeMin(box) == 0 || y_.SizeMin(box) == 0) continue; + if (!x_.IsPresent(box) || !y_.IsPresent(box)) continue; + + active_box_ranges_.push_back(RectangleInRange{ + .box_index = box, + .bounding_area = {.x_min = x_.StartMin(box), + .x_max = x_.StartMax(box) + x_.SizeMin(box), + .y_min = y_.StartMin(box), + .y_max = y_.StartMax(box) + y_.SizeMin(box)}, + .x_size = x_.SizeMin(box), + .y_size = y_.SizeMin(box)}); + } + max_box_index_ = num_boxes - 1; +} + +bool TryEdgeRectanglePropagator::CanPlace( + int box_index, + const std::pair& position) const { + const Rectangle placed_box = { + .x_min = position.first, + .x_max = position.first + active_box_ranges_[box_index].x_size, + .y_min = position.second, + .y_max = position.second + active_box_ranges_[box_index].y_size}; + for (int i = 0; i < active_box_ranges_.size(); ++i) { + if (i == box_index) continue; + const RectangleInRange& box_reason = active_box_ranges_[i]; + const Rectangle mandatory_region = box_reason.GetMandatoryRegion(); + if (mandatory_region != Rectangle::GetEmpty() && + !mandatory_region.IsDisjoint(placed_box)) { + return false; + } + } + return true; +} + +bool TryEdgeRectanglePropagator::Propagate() { + if (!x_.SynchronizeAndSetTimeDirection(x_is_forward_)) return false; + if (!y_.SynchronizeAndSetTimeDirection(y_is_forward_)) return false; + + num_calls_++; + + PopulateActiveBoxRanges(); + + if (cached_y_hint_.size() <= max_box_index_) { + cached_y_hint_.resize(max_box_index_ + 1, + std::numeric_limits::max()); + } + + if (active_box_ranges_.size() < 2) { + return true; + } + + // Our algo is quadratic, so we don't want to run it on really large problems. + if (active_box_ranges_.size() > 1000) { + return true; + } + + potential_x_positions_.clear(); + potential_y_positions_.clear(); + std::vector>> found_propagations; + for (const RectangleInRange& box : active_box_ranges_) { + const Rectangle mandatory_region = box.GetMandatoryRegion(); + if (mandatory_region == Rectangle::GetEmpty()) { + continue; + } + potential_x_positions_.push_back(mandatory_region.x_max); + potential_y_positions_.push_back(mandatory_region.y_max); + } + std::sort(potential_x_positions_.begin(), potential_x_positions_.end()); + std::sort(potential_y_positions_.begin(), potential_y_positions_.end()); + + for (int i = 0; i < active_box_ranges_.size(); ++i) { + const RectangleInRange& box = active_box_ranges_[i]; + + // For each box, we need to answer whether there exist some y for which + // (x_min, y) is not in conflict with any other box. If there is no such y, + // we can propagate a larger lower bound on x. Now, for the most majority of + // cases there is nothing to propagate, so we want to find the y that makes + // (x_min, y) a valid placement as fast as possible. Now, since things don't + // change that often we try the last y value that was a valid placement for + // this box. This is just a hint: if it is not a valid placement, we will + // try all "interesting" y values before concluding that no such y exist. + const IntegerValue cached_y_hint = cached_y_hint_[box.box_index]; + if (cached_y_hint >= box.bounding_area.y_min && + cached_y_hint <= box.bounding_area.y_max - box.y_size) { + if (CanPlace(i, {box.bounding_area.x_min, cached_y_hint})) { + num_cache_hits_++; + continue; + } + } + num_cache_misses_++; + if (CanPlace(i, {box.bounding_area.x_min, box.bounding_area.y_min})) { + cached_y_hint_[box.box_index] = box.bounding_area.y_min; + continue; + } + + bool placed_at_x_min = false; + const int y_start = + absl::c_lower_bound(potential_y_positions_, box.bounding_area.y_min) - + potential_y_positions_.begin(); + for (int j = y_start; j < potential_y_positions_.size(); ++j) { + if (potential_y_positions_[j] > box.bounding_area.y_max - box.y_size) { + // potential_y_positions is sorted, so we can stop here. + break; + } + if (CanPlace(i, {box.bounding_area.x_min, potential_y_positions_[j]})) { + placed_at_x_min = true; + cached_y_hint_[box.box_index] = potential_y_positions_[j]; + break; + } + } + if (placed_at_x_min) continue; + + // We could not find any placement of the box at its current lower bound! + // Thus, we are sure we have something to propagate. Let's find the new + // lower bound (or a conflict). Note that the code below is much less + // performance critical than the code above, since it only triggers on + // propagations. + std::optional new_x_min = std::nullopt; + for (int j = 0; j < potential_x_positions_.size(); ++j) { + if (potential_x_positions_[j] < box.bounding_area.x_min) { + continue; + } + if (potential_x_positions_[j] > box.bounding_area.x_max - box.x_size) { + continue; + } + if (CanPlace(i, {potential_x_positions_[j], box.bounding_area.y_min})) { + new_x_min = potential_x_positions_[j]; + break; + } + for (int k = y_start; k < potential_y_positions_.size(); ++k) { + const IntegerValue potential_y_position = potential_y_positions_[k]; + if (potential_y_position > box.bounding_area.y_max - box.y_size) { + break; + } + if (CanPlace(i, {potential_x_positions_[j], potential_y_position})) { + // potential_x_positions is sorted, so the first we found is the + // lowest one. + new_x_min = potential_x_positions_[j]; + break; + } + } + if (new_x_min.has_value()) { + break; + } + } + found_propagations.push_back({i, new_x_min}); + } + return ExplainAndPropagate(found_propagations); +} + +bool TryEdgeRectanglePropagator::ExplainAndPropagate( + const std::vector>>& + found_propagations) { + for (const auto& [box_index, new_x_min] : found_propagations) { + const RectangleInRange& box = active_box_ranges_[box_index]; + x_.ClearReason(); + y_.ClearReason(); + for (int j = 0; j < active_box_ranges_.size(); ++j) { + // Important: we also add to the reason the actual box we are changing the + // x_min. This is important, since we don't check if there are any + // feasible placement before its current x_min, so it needs to be part of + // the reason. + const RectangleInRange& box_reason = active_box_ranges_[j]; + if (j != box_index) { + const Rectangle mandatory_region = box_reason.GetMandatoryRegion(); + if (mandatory_region == Rectangle::GetEmpty()) { + continue; + } + // Don't add to the reason any box that was not participating in the + // placement decision. Ie., anything before the old x_min or after the + // new x_max. + if (new_x_min.has_value() && + mandatory_region.x_min > *new_x_min + box_reason.x_size) { + continue; + } + if (new_x_min.has_value() && + mandatory_region.x_max < box.bounding_area.x_min) { + continue; + } + if (mandatory_region.y_min > box.bounding_area.y_max || + mandatory_region.y_max < box.bounding_area.y_min) { + continue; + } + } + + const int b = box_reason.box_index; + + x_.AddStartMinReason(b, box_reason.bounding_area.x_min); + y_.AddStartMinReason(b, box_reason.bounding_area.y_min); + + x_.AddStartMaxReason(b, + box_reason.bounding_area.x_max - box_reason.x_size); + y_.AddStartMaxReason(b, + box_reason.bounding_area.y_max - box_reason.y_size); + + x_.AddSizeMinReason(b); + y_.AddSizeMinReason(b); + + x_.AddPresenceReason(b); + y_.AddPresenceReason(b); + } + x_.ImportOtherReasons(y_); + if (new_x_min.has_value()) { + num_propagations_++; + if (!x_.IncreaseStartMin(box.box_index, *new_x_min)) { + return false; + } + } else { + num_conflicts_++; + return x_.ReportConflict(); + } + } + return true; +} + +void CreateAndRegisterTryEdgePropagator(SchedulingConstraintHelper* x, + SchedulingConstraintHelper* y, + Model* model, + GenericLiteralWatcher* watcher) { + TryEdgeRectanglePropagator* try_edge_propagator = + new TryEdgeRectanglePropagator(true, true, x, y, model); + watcher->SetPropagatorPriority(try_edge_propagator->RegisterWith(watcher), 5); + model->TakeOwnership(try_edge_propagator); + + TryEdgeRectanglePropagator* try_edge_propagator_mirrored = + new TryEdgeRectanglePropagator(false, true, x, y, model); + watcher->SetPropagatorPriority( + try_edge_propagator_mirrored->RegisterWith(watcher), 5); + model->TakeOwnership(try_edge_propagator_mirrored); + + TryEdgeRectanglePropagator* try_edge_propagator_swap = + new TryEdgeRectanglePropagator(true, true, y, x, model); + watcher->SetPropagatorPriority( + try_edge_propagator_swap->RegisterWith(watcher), 5); + model->TakeOwnership(try_edge_propagator_swap); + + TryEdgeRectanglePropagator* try_edge_propagator_swap_mirrored = + new TryEdgeRectanglePropagator(false, true, y, x, model); + watcher->SetPropagatorPriority( + try_edge_propagator_swap_mirrored->RegisterWith(watcher), 5); + model->TakeOwnership(try_edge_propagator_swap_mirrored); +} + +} // namespace sat +} // namespace operations_research diff --git a/ortools/sat/2d_try_edge_propagator.h b/ortools/sat/2d_try_edge_propagator.h new file mode 100644 index 0000000000..526ab040b5 --- /dev/null +++ b/ortools/sat/2d_try_edge_propagator.h @@ -0,0 +1,96 @@ +// Copyright 2010-2024 Google LLC +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef OR_TOOLS_SAT_2D_TRY_EDGE_PROPAGATOR_H_ +#define OR_TOOLS_SAT_2D_TRY_EDGE_PROPAGATOR_H_ + +#include +#include +#include +#include + +#include "ortools/sat/diffn_util.h" +#include "ortools/sat/integer.h" +#include "ortools/sat/intervals.h" +#include "ortools/sat/model.h" +#include "ortools/sat/synchronization.h" + +namespace operations_research { +namespace sat { + +// Propagator that for each boxes participating in a no_overlap_2d constraint +// try to find the leftmost valid position that is compatible with all the +// other boxes. If none is found, it will propagate a conflict. Otherwise, if +// it is different from the current x_min, it will propagate the new x_min. +void CreateAndRegisterTryEdgePropagator(SchedulingConstraintHelper* x, + SchedulingConstraintHelper* y, + Model* model, + GenericLiteralWatcher* watcher); + +// Exposed for testing. +class TryEdgeRectanglePropagator : public PropagatorInterface { + public: + TryEdgeRectanglePropagator(bool x_is_forward, bool y_is_forward, + SchedulingConstraintHelper* x, + SchedulingConstraintHelper* y, Model* model) + : x_(*x), + y_(*y), + shared_stats_(model->GetOrCreate()), + x_is_forward_(x_is_forward), + y_is_forward_(y_is_forward) {} + + ~TryEdgeRectanglePropagator() override; + + bool Propagate() final; + int RegisterWith(GenericLiteralWatcher* watcher); + + protected: + std::vector active_box_ranges_; + int max_box_index_ = 0; + + // Must also populate max_box_index_. + virtual void PopulateActiveBoxRanges(); + + virtual bool ExplainAndPropagate( + const std::vector>>& + found_propagations); + + private: + SchedulingConstraintHelper& x_; + SchedulingConstraintHelper& y_; + SharedStatistics* shared_stats_; + bool x_is_forward_; + bool y_is_forward_; + std::vector cached_y_hint_; + + std::vector potential_x_positions_; + std::vector potential_y_positions_; + + int64_t num_conflicts_ = 0; + int64_t num_propagations_ = 0; + int64_t num_calls_ = 0; + int64_t num_cache_hits_ = 0; + int64_t num_cache_misses_ = 0; + + bool CanPlace(int box_index, + const std::pair& position) const; + + TryEdgeRectanglePropagator(const TryEdgeRectanglePropagator&) = delete; + TryEdgeRectanglePropagator& operator=(const TryEdgeRectanglePropagator&) = + delete; +}; + +} // namespace sat +} // namespace operations_research + +#endif // OR_TOOLS_SAT_2D_TRY_EDGE_PROPAGATOR_H_ diff --git a/ortools/sat/2d_try_edge_propagator_test.cc b/ortools/sat/2d_try_edge_propagator_test.cc new file mode 100644 index 0000000000..f200431553 --- /dev/null +++ b/ortools/sat/2d_try_edge_propagator_test.cc @@ -0,0 +1,152 @@ +// Copyright 2010-2024 Google LLC +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "ortools/sat/2d_try_edge_propagator.h" + +#include +#include +#include +#include + +#include "absl/random/random.h" +#include "gtest/gtest.h" +#include "ortools/base/gmock.h" +#include "ortools/sat/2d_orthogonal_packing_testing.h" +#include "ortools/sat/diffn_util.h" +#include "ortools/sat/integer.h" +#include "ortools/sat/intervals.h" +#include "ortools/sat/model.h" + +namespace operations_research { +namespace sat { +namespace { + +using ::testing::_; +using ::testing::Each; +using ::testing::Eq; +using ::testing::Not; +using ::testing::Pair; +using ::testing::UnorderedElementsAre; + +class TryEdgeRectanglePropagatorForTest : public TryEdgeRectanglePropagator { + public: + explicit TryEdgeRectanglePropagatorForTest( + Model* model, std::vector active_box_ranges) + : TryEdgeRectanglePropagator(true, true, GetHelperFromModel(model), + GetHelperFromModel(model), model) { + active_box_ranges_ = std::move(active_box_ranges); + } + + void PopulateActiveBoxRanges() override { + max_box_index_ = 0; + for (const RectangleInRange& range : active_box_ranges_) { + if (range.box_index > max_box_index_) { + max_box_index_ = range.box_index; + } + } + } + + bool ExplainAndPropagate( + const std::vector>>& + found_propagations) override { + propagations_ = found_propagations; + return false; + } + + const std::vector>>& propagations() + const { + return propagations_; + } + + private: + static SchedulingConstraintHelper* GetHelperFromModel(Model* model) { + return model->GetOrCreate()->GetOrCreateHelper({}); + } + + Model model_; + IntervalsRepository* repository_ = model_.GetOrCreate(); + + std::vector>> propagations_; +}; + +TEST(TryEdgeRectanglePropagatorTest, Simple) { + // ********** + // ********** To place: + // ********** ++++++++ + // ********** ++++++++ + // ++++++++ + // ++++++++++ ++++++++ + // ++++++++++ + // ++++++++++ + // ++++++++++ + // + // The object to place can only be on the right of the two placed ones. + std::vector active_box_ranges = { + {.box_index = 0, + .bounding_area = {.x_min = 0, .x_max = 5, .y_min = 0, .y_max = 5}, + .x_size = 5, + .y_size = 5}, + {.box_index = 1, + .bounding_area = {.x_min = 0, .x_max = 5, .y_min = 6, .y_max = 11}, + .x_size = 5, + .y_size = 5}, + {.box_index = 2, + .bounding_area = {.x_min = 0, .x_max = 10, .y_min = 0, .y_max = 10}, + .x_size = 5, + .y_size = 5}, + }; + Model model; + TryEdgeRectanglePropagatorForTest propagator(&model, active_box_ranges); + propagator.Propagate(); + EXPECT_THAT(propagator.propagations(), + UnorderedElementsAre(Pair(2, IntegerValue(5)))); + + // Now the same thing, but makes it a conflict + active_box_ranges[2].bounding_area.x_min = 0; + active_box_ranges[2].bounding_area.x_max = 5; + TryEdgeRectanglePropagatorForTest propagator2(&model, active_box_ranges); + propagator2.Propagate(); + EXPECT_THAT(propagator2.propagations(), + UnorderedElementsAre(Pair(2, std::nullopt))); +} + +TEST(TryEdgeRectanglePropagatorTest, NoConflictForFeasible) { + constexpr int kNumRuns = 100; + absl::BitGen bit_gen; + Model model; + + for (int run = 0; run < kNumRuns; ++run) { + // Start by generating a feasible problem that we know the solution with + // some items fixed. + std::vector rectangles = + GenerateNonConflictingRectanglesWithPacking({100, 100}, 60, bit_gen); + std::shuffle(rectangles.begin(), rectangles.end(), bit_gen); + const std::vector input_in_range = + MakeItemsFromRectangles(rectangles, 0.6, bit_gen); + + TryEdgeRectanglePropagatorForTest propagator(&model, input_in_range); + propagator.Propagate(); + EXPECT_THAT(propagator.propagations(), + Each(Pair(_, Not(Eq(std::nullopt))))); + + // Now check that the propagations are not in conflict with the initial + // solution. + for (const auto& [box_index, new_x_min] : propagator.propagations()) { + EXPECT_LE(*new_x_min, rectangles[box_index].x_min); + } + } +} + +} // namespace +} // namespace sat +} // namespace operations_research diff --git a/ortools/sat/BUILD.bazel b/ortools/sat/BUILD.bazel index 5077db303c..5b2214ff06 100644 --- a/ortools/sat/BUILD.bazel +++ b/ortools/sat/BUILD.bazel @@ -1039,6 +1039,7 @@ cc_library( "//ortools/base:stl_util", "//ortools/base:strong_vector", "//ortools/base:timer", + "//ortools/graph:cliques", "//ortools/graph:strongly_connected_components", "//ortools/util:bitset", "//ortools/util:stats", @@ -2582,6 +2583,36 @@ cc_library( ], ) +cc_library( + name = "2d_try_edge_propagator", + srcs = ["2d_try_edge_propagator.cc"], + hdrs = ["2d_try_edge_propagator.h"], + deps = [ + ":diffn_util", + ":integer", + ":intervals", + ":model", + ":synchronization", + "@com_google_absl//absl/algorithm:container", + "@com_google_absl//absl/log", + ], +) + +cc_test( + name = "2d_try_edge_propagator_test", + srcs = ["2d_try_edge_propagator_test.cc"], + deps = [ + ":2d_orthogonal_packing_testing", + ":2d_try_edge_propagator", + ":diffn_util", + ":integer", + ":intervals", + ":model", + "//ortools/base:gmock_main", + "@com_google_absl//absl/random", + ], +) + cc_test( name = "diffn_util_test", size = "small", @@ -2611,6 +2642,7 @@ cc_library( hdrs = ["diffn.h"], deps = [ ":2d_orthogonal_packing", + ":2d_try_edge_propagator", ":cumulative_energy", ":diffn_util", ":disjunctive", diff --git a/ortools/sat/clause.cc b/ortools/sat/clause.cc index fca9d35be0..8241b15329 100644 --- a/ortools/sat/clause.cc +++ b/ortools/sat/clause.cc @@ -1535,11 +1535,12 @@ bool BinaryImplicationGraph::ComputeTransitiveReduction(bool log_info) { // Also mark all the ones reachable through the root AMOs. if (root < at_most_ones_.size()) { + auto is_marked = is_marked_.BitsetView(); for (const int start : at_most_ones_[root]) { for (const Literal l : AtMostOne(start)) { if (l.Index() == root) continue; - if (!is_marked_[l.Negated()] && !is_redundant_[l.Negated()]) { - is_marked_.SetUnsafe(l.Negated()); + if (!is_marked[l.Negated()] && !is_redundant_[l.Negated()]) { + is_marked_.SetUnsafe(is_marked, l.Negated()); MarkDescendants(l.Negated()); } } @@ -1806,13 +1807,7 @@ std::vector BinaryImplicationGraph::ExpandAtMostOneWithWeight( const util_intops::StrongVector& expanded_lp_values) { std::vector clique(at_most_one.begin(), at_most_one.end()); std::vector intersection; - double clique_weight = 0.0; const int64_t old_work = work_done_in_mark_descendants_; - if (use_weight) { - for (const Literal l : clique) { - clique_weight += expanded_lp_values[l]; - } - } for (int i = 0; i < clique.size(); ++i) { // Do not spend too much time here. if (work_done_in_mark_descendants_ - old_work > 1e8) break; @@ -1829,26 +1824,15 @@ std::vector BinaryImplicationGraph::ExpandAtMostOneWithWeight( } int new_size = 0; - double intersection_weight = 0.0; is_marked_.Clear(clique[i]); is_marked_.Clear(clique[i].NegatedIndex()); for (const LiteralIndex index : intersection) { if (!is_marked_[index]) continue; intersection[new_size++] = index; - if (use_weight) { - intersection_weight += expanded_lp_values[index]; - } } intersection.resize(new_size); if (intersection.empty()) break; - // We can't generate a violated cut this way. This is because intersection - // contains all the possible ways to extend the current clique. - if (use_weight && clique_weight + intersection_weight <= 1.0) { - clique.clear(); - return clique; - } - // Expand? The negation of any literal in the intersection is a valid way // to extend the clique. if (i + 1 == clique.size()) { @@ -1858,9 +1842,10 @@ std::vector BinaryImplicationGraph::ExpandAtMostOneWithWeight( for (int j = 0; j < intersection.size(); ++j) { // If we don't use weight, we prefer variable that comes first. const double lp = - use_weight ? 1.0 - expanded_lp_values[intersection[j]] + - absl::Uniform(*random_, 0.0, 1e-4) - : can_be_included.size() - intersection[j].value(); + use_weight + ? expanded_lp_values[Literal(intersection[j]).NegatedIndex()] + + absl::Uniform(*random_, 0.0, 1e-4) + : can_be_included.size() - intersection[j].value(); if (index == -1 || lp > max_lp) { index = j; max_lp = lp; @@ -1870,9 +1855,6 @@ std::vector BinaryImplicationGraph::ExpandAtMostOneWithWeight( clique.push_back(Literal(intersection[index]).Negated()); std::swap(intersection.back(), intersection[index]); intersection.pop_back(); - if (use_weight) { - clique_weight += expanded_lp_values[clique.back()]; - } } } } @@ -1891,16 +1873,26 @@ BinaryImplicationGraph::ExpandAtMostOneWithWeight( const util_intops::StrongVector& can_be_included, const util_intops::StrongVector& expanded_lp_values); +// This function and the generated cut serves two purpose: +// 1/ If a new clause of size 2 was discovered and not included in the LP, we +// will add it. +// 2/ The more classical clique cut separation algorithm +// +// Note that once 1/ Is performed, any literal close to 1.0 in the lp shouldn't +// be in the same clique as a literal with positive weight. So for step 2, we +// only really need to consider fractional variables. const std::vector>& BinaryImplicationGraph::GenerateAtMostOnesWithLargeWeight( - const std::vector& literals, - const std::vector& lp_values) { + absl::Span literals, absl::Span lp_values, + absl::Span reduced_costs) { // We only want to generate a cut with literals from the LP, not extra ones. const int num_literals = implications_.size(); util_intops::StrongVector can_be_included(num_literals, false); util_intops::StrongVector expanded_lp_values( num_literals, 0.0); + util_intops::StrongVector heuristic_weights( + num_literals, 0.0); const int size = literals.size(); for (int i = 0; i < size; ++i) { const Literal l = literals[i]; @@ -1910,6 +1902,23 @@ BinaryImplicationGraph::GenerateAtMostOnesWithLargeWeight( const double value = lp_values[i]; expanded_lp_values[l] = value; expanded_lp_values[l.NegatedIndex()] = 1.0 - value; + + // This is used for extending clique-cuts. + // In most situation, we will only extend the cuts with literal at zero, + // and we prefer "low" reduced cost first, so we negate it. Variable with + // high reduced costs will likely stay that way and are of less interest in + // a clique cut. At least that is my interpretation. + // + // TODO(user): For large problems or when we base the clique from a newly + // added and violated 2-clique, we might consider only a subset of + // fractional variables, so we might need to include fractional variable + // first, but then their rc should be zero, so it should be already kind of + // doing that. + // + // Remark: This seems to have a huge impact to the cut performance! + const double rc = reduced_costs[i]; + heuristic_weights[l] = -rc; + heuristic_weights[l.NegatedIndex()] = rc; } // We want highest sum first. @@ -1927,6 +1936,7 @@ BinaryImplicationGraph::GenerateAtMostOnesWithLargeWeight( // currently still statically add the initial implications, this will only add // cut based on newly learned binary clause. Or the one that were not added // to the relaxation in the first place. + std::vector fractional_literals; for (int i = 0; i < size; ++i) { Literal current_literal = literals[i]; double current_value = lp_values[i]; @@ -1938,6 +1948,10 @@ BinaryImplicationGraph::GenerateAtMostOnesWithLargeWeight( current_value = 1.0 - current_value; } + if (current_value < 0.99) { + fractional_literals.push_back(current_literal); + } + // We consider only one candidate for each current_literal. LiteralIndex best = kNoLiteralIndex; double best_value = 0.0; @@ -1968,12 +1982,101 @@ BinaryImplicationGraph::GenerateAtMostOnesWithLargeWeight( // Expand to a maximal at most one each candidates before returning them. // Note that we only expand using literal from the LP. tmp_cuts_.clear(); - std::vector at_most_one; for (const Candidate& candidate : candidates) { - at_most_one = ExpandAtMostOneWithWeight( - {candidate.a, candidate.b}, can_be_included, expanded_lp_values); - if (!at_most_one.empty()) tmp_cuts_.push_back(at_most_one); + tmp_cuts_.push_back(ExpandAtMostOneWithWeight( + {candidate.a, candidate.b}, can_be_included, heuristic_weights)); } + + // Once we processed new implications, also add "proper" clique cuts. + // We can generate a small graph and separate cut efficiently there. + if (fractional_literals.size() > 1) { + // Lets permute this randomly and truncate if we have too many variables. + // Since we use bitset it is good to have a multiple of 64 there. + // + // TODO(user): Prefer more fractional variables. + const int max_graph_size = 1024; + if (fractional_literals.size() > max_graph_size) { + std::shuffle(fractional_literals.begin(), fractional_literals.end(), + *random_); + fractional_literals.resize(max_graph_size); + } + + bron_kerbosch_.Initialize(fractional_literals.size() * 2); + + // Prepare a dense mapping. + int i = 0; + tmp_mapping_.resize(implications_.size(), -1); + for (const Literal l : fractional_literals) { + bron_kerbosch_.SetWeight(i, expanded_lp_values[l]); + tmp_mapping_[l] = i++; + bron_kerbosch_.SetWeight(i, expanded_lp_values[l.Negated()]); + tmp_mapping_[l.Negated()] = i++; + } + + // Copy the implication subgraph and remap it to a dense indexing. + // + // TODO(user): Treat at_most_one more efficiently. We can collect them + // and scan each of them just once. + for (const Literal base : fractional_literals) { + for (const Literal l : {base, base.Negated()}) { + const int from = tmp_mapping_[l]; + for (const Literal next : DirectImplications(l)) { + // l => next so (l + not(next) <= 1). + const int to = tmp_mapping_[next.Negated()]; + if (to != -1) { + bron_kerbosch_.AddEdge(from, to); + } + } + } + } + + // Before running the algo, compute the transitive closure. + // The graph shouldn't be too large, so this should be fast enough. + bron_kerbosch_.TakeTransitiveClosureOfImplicationGraph(); + + bron_kerbosch_.SetWorkLimit(1e8); + bron_kerbosch_.SetMinimumWeight(1.001); + std::vector> cliques = bron_kerbosch_.Run(); + + // If we have many candidates, we will only expand the first few with + // maximum weights. + const int max_num_per_batch = 5; + std::vector> with_weight = + bron_kerbosch_.GetMutableIndexAndWeight(); + if (with_weight.size() > max_num_per_batch) { + std::sort( + with_weight.begin(), with_weight.end(), + [](const std::pair& a, const std::pair& b) { + return a.second > b.second; + }); + with_weight.resize(max_num_per_batch); + } + + std::vector at_most_one; + for (const auto [index, weight] : with_weight) { + // Convert. + at_most_one.clear(); + for (const int i : cliques[index]) { + const Literal l = fractional_literals[i / 2]; + at_most_one.push_back(i % 2 == 1 ? l.Negated() : l); + } + + // Expand and add clique. + // + // TODO(user): Expansion is pretty slow. Given that the base clique can + // share literal beeing part of the same amo, we should be able to speed + // that up, we don't want to scan an amo twice basically. + tmp_cuts_.push_back(ExpandAtMostOneWithWeight( + at_most_one, can_be_included, heuristic_weights)); + } + + // Clear the dense mapping + for (const Literal l : fractional_literals) { + tmp_mapping_[l] = -1; + tmp_mapping_[l.Negated()] = -1; + } + } + return tmp_cuts_; } @@ -2055,7 +2158,7 @@ BinaryImplicationGraph::HeuristicAmoPartition(std::vector* literals) { void BinaryImplicationGraph::MarkDescendants(Literal root) { auto* const stack = bfs_stack_.data(); - auto is_marked = is_marked_.const_view(); + auto is_marked = is_marked_.BitsetView(); auto is_redundant = is_redundant_.const_view(); if (is_redundant[root]) return; @@ -2063,13 +2166,14 @@ void BinaryImplicationGraph::MarkDescendants(Literal root) { stack[0] = root; is_marked_.Set(root); const int amo_size = static_cast(at_most_ones_.size()); + auto implies_something = implies_something_.const_view(); for (int j = 0; j < stack_size; ++j) { const Literal current = stack[j]; - if (!implies_something_[current]) continue; + if (!implies_something[current]) continue; for (const Literal l : implications_[current]) { if (!is_marked[l] && !is_redundant[l]) { - is_marked_.SetUnsafe(l); + is_marked_.SetUnsafe(is_marked, l); stack[stack_size++] = l; } } @@ -2079,7 +2183,7 @@ void BinaryImplicationGraph::MarkDescendants(Literal root) { for (const Literal l : AtMostOne(start)) { if (l == current) continue; if (!is_marked[l.NegatedIndex()] && !is_redundant[l.NegatedIndex()]) { - is_marked_.SetUnsafe(l.NegatedIndex()); + is_marked_.SetUnsafe(is_marked, l.NegatedIndex()); stack[stack_size++] = l.Negated(); } } diff --git a/ortools/sat/clause.h b/ortools/sat/clause.h index 4a30915cdc..3f1c0c8f84 100644 --- a/ortools/sat/clause.h +++ b/ortools/sat/clause.h @@ -33,6 +33,7 @@ #include "absl/random/bit_gen_ref.h" #include "absl/types/span.h" #include "ortools/base/strong_vector.h" +#include "ortools/graph/cliques.h" #include "ortools/sat/drat_proof_handler.h" #include "ortools/sat/model.h" #include "ortools/sat/sat_base.h" @@ -652,8 +653,8 @@ class BinaryImplicationGraph : public SatPropagator { // // TODO(user): Refine the heuristic and unit test! const std::vector>& GenerateAtMostOnesWithLargeWeight( - const std::vector& literals, - const std::vector& lp_values); + absl::Span literals, absl::Span lp_values, + absl::Span reduced_costs); // Heuristically identify "at most one" between the given literals, swap // them around and return these amo as span inside the literals vector. @@ -920,6 +921,10 @@ class BinaryImplicationGraph : public SatPropagator { int64_t work_done_in_mark_descendants_ = 0; std::vector bfs_stack_; + // For clique cuts. + util_intops::StrongVector tmp_mapping_; + WeightedBronKerboschBitsetAlgorithm bron_kerbosch_; + // Used by ComputeTransitiveReduction() in case we abort early to maintain // the invariant checked by InvariantsAreOk(). Some of our algo // relies on this to be always true. diff --git a/ortools/sat/cuts.cc b/ortools/sat/cuts.cc index ea727355d4..e65af77517 100644 --- a/ortools/sat/cuts.cc +++ b/ortools/sat/cuts.cc @@ -2731,17 +2731,21 @@ CutGenerator CreateCliqueCutGenerator( CutGenerator result; result.vars = variables; auto* implication_graph = model->GetOrCreate(); + result.only_run_at_level_zero = true; result.generate_cuts = [variables, literals, implication_graph, positive_map, negative_map, model](LinearConstraintManager* manager) { std::vector packed_values; + std::vector packed_reduced_costs; const auto& lp_values = manager->LpValues(); + const auto& reduced_costs = manager->ReducedCosts(); for (int i = 0; i < literals.size(); ++i) { packed_values.push_back(lp_values[variables[i]]); + packed_reduced_costs.push_back(reduced_costs[variables[i]]); } const std::vector> at_most_ones = - implication_graph->GenerateAtMostOnesWithLargeWeight(literals, - packed_values); + implication_graph->GenerateAtMostOnesWithLargeWeight( + literals, packed_values, packed_reduced_costs); for (const std::vector& at_most_one : at_most_ones) { // We need to express such "at most one" in term of the initial diff --git a/ortools/sat/diffn.cc b/ortools/sat/diffn.cc index 7faf54896a..d4617abad7 100644 --- a/ortools/sat/diffn.cc +++ b/ortools/sat/diffn.cc @@ -31,6 +31,7 @@ #include "absl/types/span.h" #include "ortools/base/logging.h" #include "ortools/sat/2d_orthogonal_packing.h" +#include "ortools/sat/2d_try_edge_propagator.h" #include "ortools/sat/cumulative_energy.h" #include "ortools/sat/diffn_util.h" #include "ortools/sat/disjunctive.h" @@ -233,6 +234,10 @@ void AddNonOverlappingRectangles(const std::vector& x, watcher->SetPropagatorPriority(energy_constraint->RegisterWith(watcher), 5); model->TakeOwnership(energy_constraint); } + + if (params.use_try_edge_reasoning_in_no_overlap_2d()) { + CreateAndRegisterTryEdgePropagator(x_helper, y_helper, model, watcher); + } } #define RETURN_IF_FALSE(f) \ diff --git a/ortools/sat/diffn_util.h b/ortools/sat/diffn_util.h index c2688c0d8c..0fc5df8ee2 100644 --- a/ortools/sat/diffn_util.h +++ b/ortools/sat/diffn_util.h @@ -444,6 +444,18 @@ struct RectangleInRange { containing_area.y_max); } + Rectangle GetMandatoryRegion() const { + // Weird math to avoid overflow. + if (bounding_area.SizeX() - x_size >= x_size || + bounding_area.SizeY() - y_size >= y_size) { + return Rectangle::GetEmpty(); + } + return Rectangle{.x_min = bounding_area.x_max - x_size, + .x_max = bounding_area.x_min + x_size, + .y_min = bounding_area.y_max - y_size, + .y_max = bounding_area.y_min + y_size}; + } + static RectangleInRange BiggestWithMinIntersection( const Rectangle& containing_area, const RectangleInRange& original, const IntegerValue& min_intersect_x, diff --git a/ortools/sat/linear_constraint_manager.h b/ortools/sat/linear_constraint_manager.h index eb39b52c3b..2085fa63d4 100644 --- a/ortools/sat/linear_constraint_manager.h +++ b/ortools/sat/linear_constraint_manager.h @@ -48,6 +48,12 @@ struct ModelLpValues ModelLpValues() = default; }; +// Same as ModelLpValues for reduced costs. +struct ModelReducedCosts + : public util_intops::StrongVector { + ModelReducedCosts() = default; +}; + // This class holds a list of globally valid linear constraints and has some // logic to decide which one should be part of the LP relaxation. We want more // for a better relaxation, but for efficiency we do not want to have too much @@ -98,6 +104,7 @@ class LinearConstraintManager { integer_trail_(*model->GetOrCreate()), time_limit_(model->GetOrCreate()), expanded_lp_solution_(*model->GetOrCreate()), + expanded_reduced_costs_(*model->GetOrCreate()), model_(model), logger_(model->GetOrCreate()) {} ~LinearConstraintManager(); @@ -161,6 +168,9 @@ class LinearConstraintManager { const util_intops::StrongVector& LpValues() { return expanded_lp_solution_; } + const util_intops::StrongVector& ReducedCosts() { + return expanded_reduced_costs_; + } // Stats. int64_t num_constraints() const { return constraint_infos_.size(); } @@ -267,6 +277,7 @@ class LinearConstraintManager { TimeLimit* time_limit_; ModelLpValues& expanded_lp_solution_; + ModelReducedCosts& expanded_reduced_costs_; Model* model_; SolverLogger* logger_; diff --git a/ortools/sat/linear_programming_constraint.cc b/ortools/sat/linear_programming_constraint.cc index a1d70915b6..63237ee2f7 100644 --- a/ortools/sat/linear_programming_constraint.cc +++ b/ortools/sat/linear_programming_constraint.cc @@ -282,7 +282,8 @@ LinearProgrammingConstraint::LinearProgrammingConstraint( implied_bounds_processor_({}, integer_trail_, model->GetOrCreate()), dispatcher_(model->GetOrCreate()), - expanded_lp_solution_(*model->GetOrCreate()) { + expanded_lp_solution_(*model->GetOrCreate()), + expanded_reduced_costs_(*model->GetOrCreate()) { // Tweak the default parameters to make the solve incremental. simplex_params_.set_use_dual_simplex(true); simplex_params_.set_cost_scaling(glop::GlopParameters::MEAN_COST_SCALING); @@ -327,6 +328,9 @@ LinearProgrammingConstraint::LinearProgrammingConstraint( if (max_index >= expanded_lp_solution_.size()) { expanded_lp_solution_.assign(max_index + 1, 0.0); } + if (max_index >= expanded_reduced_costs_.size()) { + expanded_reduced_costs_.assign(max_index + 1, 0.0); + } } } @@ -731,33 +735,30 @@ bool LinearProgrammingConstraint::SolveLp() { } lp_at_optimal_ = simplex_.GetProblemStatus() == glop::ProblemStatus::OPTIMAL; - if (simplex_.GetProblemStatus() == glop::ProblemStatus::OPTIMAL) { + // If stop_after_root_propagation() is true, we still copy whatever we have as + // these values will be used for the local-branching lns heuristic. + if (simplex_.GetProblemStatus() == glop::ProblemStatus::OPTIMAL || + parameters_.stop_after_root_propagation()) { lp_solution_is_set_ = true; lp_solution_level_ = trail_->CurrentDecisionLevel(); const int num_vars = integer_variables_.size(); + const auto reduced_costs = simplex_.GetReducedCosts().const_view(); for (int i = 0; i < num_vars; i++) { - const glop::Fractional value = - GetVariableValueAtCpScale(glop::ColIndex(i)); + const glop::ColIndex col(i); + const glop::Fractional value = GetVariableValueAtCpScale(col); lp_solution_[i] = value; expanded_lp_solution_[integer_variables_[i]] = value; expanded_lp_solution_[NegationOf(integer_variables_[i])] = -value; + + const glop::Fractional rc = + scaler_.UnscaleReducedCost(col, reduced_costs[col]); + expanded_reduced_costs_[integer_variables_[i]] = rc; + expanded_reduced_costs_[NegationOf(integer_variables_[i])] = -rc; } if (lp_solution_level_ == 0) { level_zero_lp_solution_ = lp_solution_; } - } else { - // If this parameter is true, we still copy whatever we have as these - // values will be used for the local-branching lns heuristic. - if (parameters_.stop_after_root_propagation()) { - const int num_vars = integer_variables_.size(); - for (int i = 0; i < num_vars; i++) { - const glop::Fractional value = - GetVariableValueAtCpScale(glop::ColIndex(i)); - expanded_lp_solution_[integer_variables_[i]] = value; - expanded_lp_solution_[NegationOf(integer_variables_[i])] = -value; - } - } } return true; diff --git a/ortools/sat/linear_programming_constraint.h b/ortools/sat/linear_programming_constraint.h index 2757dc3f2e..ae98877708 100644 --- a/ortools/sat/linear_programming_constraint.h +++ b/ortools/sat/linear_programming_constraint.h @@ -569,6 +569,7 @@ class LinearProgrammingConstraint : public PropagatorInterface, // Same as lp_solution_ but this vector is indexed by IntegerVariable. ModelLpValues& expanded_lp_solution_; + ModelReducedCosts& expanded_reduced_costs_; // Linear constraints cannot be created or modified after this is registered. bool lp_constraint_is_registered_ = false; diff --git a/ortools/sat/sat_parameters.proto b/ortools/sat/sat_parameters.proto index 870cf2cf8b..7527fa6481 100644 --- a/ortools/sat/sat_parameters.proto +++ b/ortools/sat/sat_parameters.proto @@ -875,6 +875,8 @@ message SatParameters { optional bool use_area_energetic_reasoning_in_no_overlap_2d = 271 [default = false]; + optional bool use_try_edge_reasoning_in_no_overlap_2d = 299 [default = false]; + // If the number of pairs to look is below this threshold, do an extra step of // propagation in the no_overlap_2d constraint by looking at all pairs of // intervals. diff --git a/ortools/util/bitset.h b/ortools/util/bitset.h index 7f2f3a4932..9b0636a328 100644 --- a/ortools/util/bitset.h +++ b/ortools/util/bitset.h @@ -457,10 +457,6 @@ class Bitset64 { : size_(Value(size) > 0 ? size : IndexType(0)), data_(BitLength64(Value(size_))) {} - // This type is neither copyable nor movable. - Bitset64(const Bitset64&) = delete; - Bitset64& operator=(const Bitset64&) = delete; - ConstView const_view() const { return ConstView(this); } View view() { return View(this); } @@ -548,6 +544,7 @@ class Bitset64 { void Set(IndexType i) { DCHECK_GE(Value(i), 0); DCHECK_LT(Value(i), size_); + // The c++ hardening is costly here, so we disable it. data_[BitOffset64(Value(i))] |= OneBit64(BitPos64(Value(i))); } @@ -603,6 +600,19 @@ class Bitset64 { } } + // This one assume both given bitset to be of the same size. + void SetToIntersectionOf(const Bitset64& a, + const Bitset64& b) { + DCHECK_EQ(a.size(), b.size()); + Resize(a.size()); + + // Copy buckets. + const int num_buckets = a.data_.size(); + for (int i = 0; i < num_buckets; ++i) { + data_[i] = a.data_[i] & b.data_[i]; + } + } + // Sets "this" to be the union of "this" and "other". The // bitsets do not have to be the same size. If other is smaller, all // the higher order bits are assumed to be 0. @@ -871,10 +881,14 @@ class SparseBitset { to_clear_.push_back(index); } } - void SetUnsafe(IntegerType index) { - bitset_.Set(index); + + // A bit hacky for really hot loop. + typename Bitset64::View BitsetView() { return bitset_.view(); } + void SetUnsafe(typename Bitset64::View view, IntegerType index) { + view.Set(index); to_clear_.push_back(index); } + void Clear(IntegerType index) { bitset_.Clear(index); } int NumberOfSetCallsWithDifferentArguments() const { return to_clear_.size(); From 820be10d0f97793ec9971b064da5f8314490640c Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Fri, 27 Sep 2024 11:34:22 +0200 Subject: [PATCH 026/105] sat: build go samples --- ortools/sat/go/cpmodel/BUILD.bazel | 2 - ortools/sat/samples/BUILD.bazel | 45 ++++++++++--------- ortools/sat/samples/assumptions_sample_sat.go | 2 +- ortools/sat/samples/binpacking_problem_sat.go | 2 +- ortools/sat/samples/bool_or_sample_sat.go | 2 +- .../sat/samples/boolean_product_sample_sat.go | 6 +-- ortools/sat/samples/channeling_sample_sat.go | 6 +-- ortools/sat/samples/code_samples.bzl | 25 ++++++++++- .../earliness_tardiness_cost_sample_sat.go | 6 +-- ortools/sat/samples/interval_sample_sat.go | 2 +- ortools/sat/samples/literal_sample_sat.go | 2 +- ortools/sat/samples/no_overlap_sample_sat.go | 2 +- ortools/sat/samples/nqueens_sat.go | 2 +- ortools/sat/samples/nurses_sat.go | 2 +- .../samples/optional_interval_sample_sat.go | 2 +- .../sat/samples/rabbits_and_pheasants_sat.go | 2 +- ortools/sat/samples/ranking_sample_sat.go | 2 +- ortools/sat/samples/reified_sample_sat.go | 2 +- .../search_for_all_solutions_sample_sat.go | 6 +-- ortools/sat/samples/simple_sat_program.go | 2 +- .../samples/solution_hinting_sample_sat.go | 2 +- ...print_intermediate_solutions_sample_sat.go | 6 +-- .../solve_with_time_limit_sample_sat.go | 6 +-- .../sat/samples/step_function_sample_sat.go | 6 +-- 24 files changed, 82 insertions(+), 60 deletions(-) diff --git a/ortools/sat/go/cpmodel/BUILD.bazel b/ortools/sat/go/cpmodel/BUILD.bazel index 25b32f8db7..7a7e86aaff 100644 --- a/ortools/sat/go/cpmodel/BUILD.bazel +++ b/ortools/sat/go/cpmodel/BUILD.bazel @@ -18,8 +18,6 @@ go_library( srcs = [ "cp_model.go", "cp_solver.go", - #"cp_solver_c.cc", - #"cp_solver_c.h", "domain.go", ], cdeps = [":cp_solver_c"], diff --git a/ortools/sat/samples/BUILD.bazel b/ortools/sat/samples/BUILD.bazel index 47f1ef0c75..14fed40823 100644 --- a/ortools/sat/samples/BUILD.bazel +++ b/ortools/sat/samples/BUILD.bazel @@ -11,7 +11,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -load(":code_samples.bzl", "code_sample_cc_py", "code_sample_java", "code_sample_py") +load(":code_samples.bzl", + "code_sample_cc_go_py", "code_sample_cc_py", + "code_sample_go", "code_sample_java", "code_sample_py") code_sample_py(name = "all_different_except_zero_sample_sat") @@ -23,19 +25,20 @@ code_sample_cc_py(name = "assignment_task_sizes_sat") code_sample_cc_py(name = "assignment_teams_sat") -code_sample_cc_py(name = "assumptions_sample_sat") +code_sample_cc_go_py(name = "assumptions_sample_sat") -code_sample_cc_py(name = "binpacking_problem_sat") +code_sample_cc_go_py(name = "binpacking_problem_sat") code_sample_py(name = "bin_packing_sat") code_sample_py(name = "bool_and_int_var_product_sample_sat") -code_sample_cc_py(name = "bool_or_sample_sat") +code_sample_cc_go_py(name = "bool_or_sample_sat") +code_sample_go(name = "boolean_product_sample_sat") code_sample_py(name = "boolean_product_sample_sat") -code_sample_cc_py(name = "channeling_sample_sat") +code_sample_cc_go_py(name = "channeling_sample_sat") code_sample_cc_py(name = "clone_model_sample_sat") @@ -45,55 +48,55 @@ code_sample_cc_py(name = "cp_sat_example") code_sample_py(name = "cumulative_variable_profile_sample_sat") -code_sample_cc_py(name = "earliness_tardiness_cost_sample_sat") +code_sample_cc_go_py(name = "earliness_tardiness_cost_sample_sat") code_sample_py(name = "index_first_boolvar_true_sample_sat") code_sample_py(name = "interval_relations_sample_sat") -code_sample_cc_py(name = "interval_sample_sat") +code_sample_cc_go_py(name = "interval_sample_sat") code_sample_cc_py(name = "minimal_jobshop_sat") -code_sample_cc_py(name = "literal_sample_sat") +code_sample_cc_go_py(name = "literal_sample_sat") code_sample_cc_py(name = "multiple_knapsack_sat") code_sample_cc_py(name = "non_linear_sat") -code_sample_cc_py(name = "nqueens_sat") +code_sample_cc_go_py(name = "no_overlap_sample_sat") -code_sample_cc_py(name = "nurses_sat") +code_sample_cc_go_py(name = "nqueens_sat") -code_sample_cc_py(name = "optional_interval_sample_sat") +code_sample_cc_go_py(name = "nurses_sat") -code_sample_cc_py(name = "no_overlap_sample_sat") +code_sample_cc_go_py(name = "optional_interval_sample_sat") code_sample_py(name = "overlapping_intervals_sample_sat") -code_sample_cc_py(name = "rabbits_and_pheasants_sat") +code_sample_cc_go_py(name = "rabbits_and_pheasants_sat") code_sample_py(name = "ranking_circuit_sample_sat") -code_sample_cc_py(name = "ranking_sample_sat") +code_sample_cc_go_py(name = "ranking_sample_sat") -code_sample_cc_py(name = "reified_sample_sat") +code_sample_cc_go_py(name = "reified_sample_sat") code_sample_cc_py(name = "schedule_requests_sat") code_sample_py(name = "scheduling_with_calendar_sample_sat") -code_sample_cc_py(name = "simple_sat_program") +code_sample_cc_go_py(name = "search_for_all_solutions_sample_sat") -code_sample_cc_py(name = "search_for_all_solutions_sample_sat") +code_sample_cc_go_py(name = "simple_sat_program") -code_sample_cc_py(name = "solution_hinting_sample_sat") +code_sample_cc_go_py(name = "solution_hinting_sample_sat") -code_sample_cc_py(name = "solve_and_print_intermediate_solutions_sample_sat") +code_sample_cc_go_py(name = "solve_and_print_intermediate_solutions_sample_sat") -code_sample_cc_py(name = "step_function_sample_sat") +code_sample_cc_go_py(name = "solve_with_time_limit_sample_sat") -code_sample_cc_py(name = "solve_with_time_limit_sample_sat") +code_sample_cc_go_py(name = "step_function_sample_sat") code_sample_cc_py(name = "stop_after_n_solutions_sample_sat") diff --git a/ortools/sat/samples/assumptions_sample_sat.go b/ortools/sat/samples/assumptions_sample_sat.go index c46d994a91..c8b291b688 100644 --- a/ortools/sat/samples/assumptions_sample_sat.go +++ b/ortools/sat/samples/assumptions_sample_sat.go @@ -18,8 +18,8 @@ import ( "fmt" log "github.com/golang/glog" + "github.com/google/or-tools/ortools/sat/go/cpmodel" cmpb "github.com/google/or-tools/ortools/sat/proto/cpmodel" - "ortools/sat/go/cpmodel" ) func assumptionsSampleSat() error { diff --git a/ortools/sat/samples/binpacking_problem_sat.go b/ortools/sat/samples/binpacking_problem_sat.go index 96c1251cd4..972d7a6d6c 100644 --- a/ortools/sat/samples/binpacking_problem_sat.go +++ b/ortools/sat/samples/binpacking_problem_sat.go @@ -19,7 +19,7 @@ import ( "fmt" log "github.com/golang/glog" - "ortools/sat/go/cpmodel" + "github.com/google/or-tools/ortools/sat/go/cpmodel" ) const ( diff --git a/ortools/sat/samples/bool_or_sample_sat.go b/ortools/sat/samples/bool_or_sample_sat.go index 0af18fde13..14ef894e98 100644 --- a/ortools/sat/samples/bool_or_sample_sat.go +++ b/ortools/sat/samples/bool_or_sample_sat.go @@ -15,7 +15,7 @@ package main import ( - "ortools/sat/go/cpmodel" + "github.com/google/or-tools/ortools/sat/go/cpmodel" ) func boolOrSampleSat() { diff --git a/ortools/sat/samples/boolean_product_sample_sat.go b/ortools/sat/samples/boolean_product_sample_sat.go index 8d724195c7..d79ae52d8b 100644 --- a/ortools/sat/samples/boolean_product_sample_sat.go +++ b/ortools/sat/samples/boolean_product_sample_sat.go @@ -18,9 +18,9 @@ import ( "fmt" log "github.com/golang/glog" + "github.com/google/or-tools/ortools/sat/go/cpmodel" sppb "github.com/google/or-tools/ortools/sat/proto/satparameters" "google.golang.org/protobuf/proto" - "ortools/sat/go/cpmodel" ) func booleanProductSample() error { @@ -44,11 +44,11 @@ func booleanProductSample() error { } // Set `fill_additional_solutions_in_response` and `enumerate_all_solutions` to true so // the solver returns all solutions found. - params := sppb.SatParameters_builder{ + params := &sppb.SatParameters{ FillAdditionalSolutionsInResponse: proto.Bool(true), EnumerateAllSolutions: proto.Bool(true), SolutionPoolSize: proto.Int32(4), - }.Build() + } response, err := cpmodel.SolveCpModelWithParameters(m, params) if err != nil { return fmt.Errorf("failed to solve the model: %w", err) diff --git a/ortools/sat/samples/channeling_sample_sat.go b/ortools/sat/samples/channeling_sample_sat.go index dab99823e6..33d3d52df2 100644 --- a/ortools/sat/samples/channeling_sample_sat.go +++ b/ortools/sat/samples/channeling_sample_sat.go @@ -18,10 +18,10 @@ import ( "fmt" log "github.com/golang/glog" + "github.com/google/or-tools/ortools/sat/go/cpmodel" cmpb "github.com/google/or-tools/ortools/sat/proto/cpmodel" sppb "github.com/google/or-tools/ortools/sat/proto/satparameters" "google.golang.org/protobuf/proto" - "ortools/sat/go/cpmodel" ) func channelingSampleSat() error { @@ -53,12 +53,12 @@ func channelingSampleSat() error { if err != nil { return fmt.Errorf("failed to instantiate the CP model: %w", err) } - params := sppb.SatParameters_builder{ + params := &sppb.SatParameters{ FillAdditionalSolutionsInResponse: proto.Bool(true), EnumerateAllSolutions: proto.Bool(true), SolutionPoolSize: proto.Int32(11), SearchBranching: sppb.SatParameters_FIXED_SEARCH.Enum(), - }.Build() + } response, err := cpmodel.SolveCpModelWithParameters(m, params) if err != nil { return fmt.Errorf("failed to solve the model: %w", err) diff --git a/ortools/sat/samples/code_samples.bzl b/ortools/sat/samples/code_samples.bzl index 92f9ebde9d..7f7f0ece59 100644 --- a/ortools/sat/samples/code_samples.bzl +++ b/ortools/sat/samples/code_samples.bzl @@ -13,11 +13,13 @@ """Helper macro to compile and test code samples.""" +load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_test") load("@pip_deps//:requirements.bzl", "requirement") +load("@rules_cc//cc:defs.bzl", "cc_binary", "cc_test") load("@rules_python//python:defs.bzl", "py_binary", "py_test") def code_sample_cc(name): - native.cc_binary( + cc_binary( name = name + "_cc", srcs = [name + ".cc"], deps = [ @@ -28,7 +30,7 @@ def code_sample_cc(name): ], ) - native.cc_test( + cc_test( name = name + "_cc_test", size = "small", srcs = [name + ".cc"], @@ -41,6 +43,20 @@ def code_sample_cc(name): ], ) +def code_sample_go(name): + go_test( + name = name + "_go_test", + size = "small", + srcs = [name + ".go"], + deps = [ + "//ortools/sat:cp_model_go_proto", + "//ortools/sat:sat_parameters_go_proto", + "//ortools/sat/go/cpmodel", + "@com_github_golang_glog//:glog", + "@org_golang_google_protobuf//proto", + ], + ) + def code_sample_py(name): py_binary( name = name + "_py3", @@ -74,6 +90,11 @@ def code_sample_py(name): srcs_version = "PY3", ) +def code_sample_cc_go_py(name): + code_sample_cc(name = name) + code_sample_go(name = name) + code_sample_py(name = name) + def code_sample_cc_py(name): code_sample_cc(name = name) code_sample_py(name = name) diff --git a/ortools/sat/samples/earliness_tardiness_cost_sample_sat.go b/ortools/sat/samples/earliness_tardiness_cost_sample_sat.go index ce4135506c..c8bdab2b25 100644 --- a/ortools/sat/samples/earliness_tardiness_cost_sample_sat.go +++ b/ortools/sat/samples/earliness_tardiness_cost_sample_sat.go @@ -19,10 +19,10 @@ import ( "fmt" log "github.com/golang/glog" + "github.com/google/or-tools/ortools/sat/go/cpmodel" cmpb "github.com/google/or-tools/ortools/sat/proto/cpmodel" sppb "github.com/google/or-tools/ortools/sat/proto/satparameters" "google.golang.org/protobuf/proto" - "ortools/sat/go/cpmodel" ) const ( @@ -62,12 +62,12 @@ func earlinessTardinessCostSampleSat() error { if err != nil { return fmt.Errorf("failed to instantiate the CP model: %w", err) } - params := sppb.SatParameters_builder{ + params := &sppb.SatParameters{ FillAdditionalSolutionsInResponse: proto.Bool(true), EnumerateAllSolutions: proto.Bool(true), SolutionPoolSize: proto.Int32(21), SearchBranching: sppb.SatParameters_FIXED_SEARCH.Enum(), - }.Build() + } response, err := cpmodel.SolveCpModelWithParameters(m, params) if err != nil { return fmt.Errorf("failed to solve the model: %w", err) diff --git a/ortools/sat/samples/interval_sample_sat.go b/ortools/sat/samples/interval_sample_sat.go index 5a3a254591..24d89f677b 100644 --- a/ortools/sat/samples/interval_sample_sat.go +++ b/ortools/sat/samples/interval_sample_sat.go @@ -18,7 +18,7 @@ import ( "fmt" log "github.com/golang/glog" - "ortools/sat/go/cpmodel" + "github.com/google/or-tools/ortools/sat/go/cpmodel" ) const horizon = 100 diff --git a/ortools/sat/samples/literal_sample_sat.go b/ortools/sat/samples/literal_sample_sat.go index 7cfec06ccb..6018b1911e 100644 --- a/ortools/sat/samples/literal_sample_sat.go +++ b/ortools/sat/samples/literal_sample_sat.go @@ -16,7 +16,7 @@ package main import ( log "github.com/golang/glog" - "ortools/sat/go/cpmodel" + "github.com/google/or-tools/ortools/sat/go/cpmodel" ) func literalSampleSat() { diff --git a/ortools/sat/samples/no_overlap_sample_sat.go b/ortools/sat/samples/no_overlap_sample_sat.go index e69fc2a0cc..cbe175f404 100644 --- a/ortools/sat/samples/no_overlap_sample_sat.go +++ b/ortools/sat/samples/no_overlap_sample_sat.go @@ -18,8 +18,8 @@ import ( "fmt" log "github.com/golang/glog" + "github.com/google/or-tools/ortools/sat/go/cpmodel" cmpb "github.com/google/or-tools/ortools/sat/proto/cpmodel" - "ortools/sat/go/cpmodel" ) const horizon = 21 // 3 weeks diff --git a/ortools/sat/samples/nqueens_sat.go b/ortools/sat/samples/nqueens_sat.go index 198237f6bd..5694bfd7ea 100644 --- a/ortools/sat/samples/nqueens_sat.go +++ b/ortools/sat/samples/nqueens_sat.go @@ -18,7 +18,7 @@ import ( "fmt" log "github.com/golang/glog" - "ortools/sat/go/cpmodel" + "github.com/google/or-tools/ortools/sat/go/cpmodel" ) const boardSize = 8 diff --git a/ortools/sat/samples/nurses_sat.go b/ortools/sat/samples/nurses_sat.go index c8fbb49284..8b1df4aa33 100644 --- a/ortools/sat/samples/nurses_sat.go +++ b/ortools/sat/samples/nurses_sat.go @@ -18,7 +18,7 @@ import ( "fmt" log "github.com/golang/glog" - "ortools/sat/go/cpmodel" + "github.com/google/or-tools/ortools/sat/go/cpmodel" ) const ( diff --git a/ortools/sat/samples/optional_interval_sample_sat.go b/ortools/sat/samples/optional_interval_sample_sat.go index 2f50f8a411..1dffb2d88e 100644 --- a/ortools/sat/samples/optional_interval_sample_sat.go +++ b/ortools/sat/samples/optional_interval_sample_sat.go @@ -19,7 +19,7 @@ import ( "fmt" log "github.com/golang/glog" - "ortools/sat/go/cpmodel" + "github.com/google/or-tools/ortools/sat/go/cpmodel" ) const horizon = 100 diff --git a/ortools/sat/samples/rabbits_and_pheasants_sat.go b/ortools/sat/samples/rabbits_and_pheasants_sat.go index c828874a16..a49d688e10 100644 --- a/ortools/sat/samples/rabbits_and_pheasants_sat.go +++ b/ortools/sat/samples/rabbits_and_pheasants_sat.go @@ -19,8 +19,8 @@ import ( "fmt" log "github.com/golang/glog" + "github.com/google/or-tools/ortools/sat/go/cpmodel" cmpb "github.com/google/or-tools/ortools/sat/proto/cpmodel" - "ortools/sat/go/cpmodel" ) const numAnimals = 20 diff --git a/ortools/sat/samples/ranking_sample_sat.go b/ortools/sat/samples/ranking_sample_sat.go index d81c01e1fd..a4bf692340 100644 --- a/ortools/sat/samples/ranking_sample_sat.go +++ b/ortools/sat/samples/ranking_sample_sat.go @@ -18,8 +18,8 @@ import ( "fmt" log "github.com/golang/glog" + "github.com/google/or-tools/ortools/sat/go/cpmodel" cmpb "github.com/google/or-tools/ortools/sat/proto/cpmodel" - "ortools/sat/go/cpmodel" ) const ( diff --git a/ortools/sat/samples/reified_sample_sat.go b/ortools/sat/samples/reified_sample_sat.go index 63a77dba72..00ecb4313e 100644 --- a/ortools/sat/samples/reified_sample_sat.go +++ b/ortools/sat/samples/reified_sample_sat.go @@ -15,7 +15,7 @@ package main import ( - "ortools/sat/go/cpmodel" + "github.com/google/or-tools/ortools/sat/go/cpmodel" ) func reifiedSampleSat() { diff --git a/ortools/sat/samples/search_for_all_solutions_sample_sat.go b/ortools/sat/samples/search_for_all_solutions_sample_sat.go index 2324f031de..6e43abbe65 100644 --- a/ortools/sat/samples/search_for_all_solutions_sample_sat.go +++ b/ortools/sat/samples/search_for_all_solutions_sample_sat.go @@ -19,9 +19,9 @@ import ( "fmt" log "github.com/golang/glog" + "github.com/google/or-tools/ortools/sat/go/cpmodel" sppb "github.com/google/or-tools/ortools/sat/proto/satparameters" "google.golang.org/protobuf/proto" - "ortools/sat/go/cpmodel" ) func searchForAllSolutionsSampleSat() error { @@ -41,11 +41,11 @@ func searchForAllSolutionsSampleSat() error { // Currently, the CpModelBuilder does not allow for callbacks, so each feasible solution cannot // be printed while solving. However, the CP Solver can return all of the enumerated solutions // in the response by setting the following parameters. - params := sppb.SatParameters_builder{ + params := &sppb.SatParameters{ EnumerateAllSolutions: proto.Bool(true), FillAdditionalSolutionsInResponse: proto.Bool(true), SolutionPoolSize: proto.Int32(27), - }.Build() + } response, err := cpmodel.SolveCpModelWithParameters(m, params) if err != nil { return fmt.Errorf("failed to solve the model: %w", err) diff --git a/ortools/sat/samples/simple_sat_program.go b/ortools/sat/samples/simple_sat_program.go index cf48a94284..8cddcb7efe 100644 --- a/ortools/sat/samples/simple_sat_program.go +++ b/ortools/sat/samples/simple_sat_program.go @@ -18,8 +18,8 @@ import ( "fmt" log "github.com/golang/glog" + "github.com/google/or-tools/ortools/sat/go/cpmodel" cmpb "github.com/google/or-tools/ortools/sat/proto/cpmodel" - "ortools/sat/go/cpmodel" ) func simpleSatProgram() error { diff --git a/ortools/sat/samples/solution_hinting_sample_sat.go b/ortools/sat/samples/solution_hinting_sample_sat.go index 70f15710c9..d0d4379fe8 100644 --- a/ortools/sat/samples/solution_hinting_sample_sat.go +++ b/ortools/sat/samples/solution_hinting_sample_sat.go @@ -18,8 +18,8 @@ import ( "fmt" log "github.com/golang/glog" + "github.com/google/or-tools/ortools/sat/go/cpmodel" cmpb "github.com/google/or-tools/ortools/sat/proto/cpmodel" - "ortools/sat/go/cpmodel" ) func solutionHintingSampleSat() error { diff --git a/ortools/sat/samples/solve_and_print_intermediate_solutions_sample_sat.go b/ortools/sat/samples/solve_and_print_intermediate_solutions_sample_sat.go index 7885073bac..5ed02d38bc 100644 --- a/ortools/sat/samples/solve_and_print_intermediate_solutions_sample_sat.go +++ b/ortools/sat/samples/solve_and_print_intermediate_solutions_sample_sat.go @@ -18,9 +18,9 @@ import ( "fmt" log "github.com/golang/glog" + "github.com/google/or-tools/ortools/sat/go/cpmodel" sppb "github.com/google/or-tools/ortools/sat/proto/satparameters" "google.golang.org/protobuf/proto" - "ortools/sat/go/cpmodel" ) func solveAndPrintIntermediateSolutionsSampleSat() error { @@ -44,10 +44,10 @@ func solveAndPrintIntermediateSolutionsSampleSat() error { // Currently, the CpModelBuilder does not allow for callbacks, so intermediate solutions // cannot be printed while solving. However, the CP-SAT solver does allow for returning // the intermediate solutions found while solving in the response. - params := sppb.SatParameters_builder{ + params := &sppb.SatParameters{ FillAdditionalSolutionsInResponse: proto.Bool(true), SolutionPoolSize: proto.Int32(10), - }.Build() + } response, err := cpmodel.SolveCpModelWithParameters(m, params) if err != nil { return fmt.Errorf("failed to solve the model: %w", err) diff --git a/ortools/sat/samples/solve_with_time_limit_sample_sat.go b/ortools/sat/samples/solve_with_time_limit_sample_sat.go index b7f968b336..5a5bd6cb1b 100644 --- a/ortools/sat/samples/solve_with_time_limit_sample_sat.go +++ b/ortools/sat/samples/solve_with_time_limit_sample_sat.go @@ -18,10 +18,10 @@ import ( "fmt" log "github.com/golang/glog" + "github.com/google/or-tools/ortools/sat/go/cpmodel" cmpb "github.com/google/or-tools/ortools/sat/proto/cpmodel" sppb "github.com/google/or-tools/ortools/sat/proto/satparameters" "google.golang.org/protobuf/proto" - "ortools/sat/go/cpmodel" ) func solveWithTimeLimitSampleSat() error { @@ -40,9 +40,9 @@ func solveWithTimeLimitSampleSat() error { } // Sets a time limit of 10 seconds. - params := sppb.SatParameters_builder{ + params := &sppb.SatParameters{ MaxTimeInSeconds: proto.Float64(10.0), - }.Build() + } // Solve. response, err := cpmodel.SolveCpModelWithParameters(m, params) diff --git a/ortools/sat/samples/step_function_sample_sat.go b/ortools/sat/samples/step_function_sample_sat.go index b04a04d9ac..8c927f1e75 100644 --- a/ortools/sat/samples/step_function_sample_sat.go +++ b/ortools/sat/samples/step_function_sample_sat.go @@ -18,10 +18,10 @@ import ( "fmt" log "github.com/golang/glog" + "github.com/google/or-tools/ortools/sat/go/cpmodel" cmpb "github.com/google/or-tools/ortools/sat/proto/cpmodel" sppb "github.com/google/or-tools/ortools/sat/proto/satparameters" "google.golang.org/protobuf/proto" - "ortools/sat/go/cpmodel" ) func stepFunctionSampleSat() error { @@ -71,12 +71,12 @@ func stepFunctionSampleSat() error { if err != nil { return fmt.Errorf("failed to instantiate the CP model: %w", err) } - params := sppb.SatParameters_builder{ + params := &sppb.SatParameters{ FillAdditionalSolutionsInResponse: proto.Bool(true), EnumerateAllSolutions: proto.Bool(true), SolutionPoolSize: proto.Int32(21), SearchBranching: sppb.SatParameters_FIXED_SEARCH.Enum(), - }.Build() + } response, err := cpmodel.SolveCpModelWithParameters(m, params) if err != nil { return fmt.Errorf("failed to solve the model: %w", err) From 3c7bc4909030f3af212b866321490bc2db55cd5e Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Fri, 27 Sep 2024 13:02:36 +0200 Subject: [PATCH 027/105] cleanup --- ortools/sat/samples/BUILD.bazel | 12 +++++++++--- ortools/sat/samples/code_samples.bzl | 10 +++++----- 2 files changed, 14 insertions(+), 8 deletions(-) diff --git a/ortools/sat/samples/BUILD.bazel b/ortools/sat/samples/BUILD.bazel index 14fed40823..95de48b809 100644 --- a/ortools/sat/samples/BUILD.bazel +++ b/ortools/sat/samples/BUILD.bazel @@ -11,9 +11,14 @@ # See the License for the specific language governing permissions and # limitations under the License. -load(":code_samples.bzl", - "code_sample_cc_go_py", "code_sample_cc_py", - "code_sample_go", "code_sample_java", "code_sample_py") +load( + ":code_samples.bzl", + "code_sample_cc_go_py", + "code_sample_cc_py", + "code_sample_go", + "code_sample_java", + "code_sample_py", +) code_sample_py(name = "all_different_except_zero_sample_sat") @@ -36,6 +41,7 @@ code_sample_py(name = "bool_and_int_var_product_sample_sat") code_sample_cc_go_py(name = "bool_or_sample_sat") code_sample_go(name = "boolean_product_sample_sat") + code_sample_py(name = "boolean_product_sample_sat") code_sample_cc_go_py(name = "channeling_sample_sat") diff --git a/ortools/sat/samples/code_samples.bzl b/ortools/sat/samples/code_samples.bzl index 7f7f0ece59..2d1e4afb84 100644 --- a/ortools/sat/samples/code_samples.bzl +++ b/ortools/sat/samples/code_samples.bzl @@ -49,11 +49,11 @@ def code_sample_go(name): size = "small", srcs = [name + ".go"], deps = [ - "//ortools/sat:cp_model_go_proto", - "//ortools/sat:sat_parameters_go_proto", - "//ortools/sat/go/cpmodel", - "@com_github_golang_glog//:glog", - "@org_golang_google_protobuf//proto", + "//ortools/sat:cp_model_go_proto", + "//ortools/sat:sat_parameters_go_proto", + "//ortools/sat/go/cpmodel", + "@com_github_golang_glog//:glog", + "@org_golang_google_protobuf//proto", ], ) From c4fac7717467904f16b37ee4bc055fb018eb0fce Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Fri, 27 Sep 2024 14:55:35 +0200 Subject: [PATCH 028/105] [CP-SAT] Fix #4373 --- ortools/algorithms/sparse_permutation.cc | 26 ++++ ortools/algorithms/sparse_permutation.h | 26 ++++ ortools/algorithms/sparse_permutation_test.cc | 15 ++ ortools/graph/BUILD.bazel | 18 --- ortools/sat/BUILD.bazel | 4 +- ortools/sat/cp_model_symmetries.cc | 63 +++++++- ortools/sat/presolve_context.cc | 8 + ortools/sat/presolve_context.h | 3 + ortools/sat/symmetry_util.cc | 38 +++++ ortools/sat/symmetry_util.h | 13 ++ ortools/sat/symmetry_util_test.cc | 138 +++++++++++------- 11 files changed, 275 insertions(+), 77 deletions(-) diff --git a/ortools/algorithms/sparse_permutation.cc b/ortools/algorithms/sparse_permutation.cc index ab6801fef1..9d4b6a466d 100644 --- a/ortools/algorithms/sparse_permutation.cc +++ b/ortools/algorithms/sparse_permutation.cc @@ -81,4 +81,30 @@ std::string SparsePermutation::DebugString() const { return out; } +int SparsePermutation::Image(int element) const { + for (int c = 0; c < NumCycles(); ++c) { + int cur_element = LastElementInCycle(c); + for (int image : Cycle(c)) { + if (cur_element == element) { + return image; + } + cur_element = image; + } + } + return element; +} + +int SparsePermutation::InverseImage(int element) const { + for (int c = 0; c < NumCycles(); ++c) { + int cur_element = LastElementInCycle(c); + for (int image : Cycle(c)) { + if (image == element) { + return cur_element; + } + cur_element = image; + } + } + return element; +} + } // namespace operations_research diff --git a/ortools/algorithms/sparse_permutation.h b/ortools/algorithms/sparse_permutation.h index 0cbee4f9ec..ee9b70db5c 100644 --- a/ortools/algorithms/sparse_permutation.h +++ b/ortools/algorithms/sparse_permutation.h @@ -59,6 +59,11 @@ class SparsePermutation { // information with the loop above. Not sure it is needed though. int LastElementInCycle(int i) const; + // Returns the image of the given element or `element` itself if it is stable + // under the permutation. + int Image(int element) const; + int InverseImage(int element) const; + // To add a cycle to the permutation, repeatedly call AddToCurrentCycle() // with the cycle's orbit, then call CloseCurrentCycle(); // This shouldn't be called on trivial cycles (of length 1). @@ -76,6 +81,9 @@ class SparsePermutation { // Example: "(1 4 3) (5 9) (6 8 7)". std::string DebugString() const; + template + void ApplyToDenseCollection(Collection& span) const; + private: const int size_; std::vector cycles_; @@ -129,6 +137,24 @@ inline int SparsePermutation::LastElementInCycle(int i) const { return cycles_[cycle_ends_[i] - 1]; } +template +void SparsePermutation::ApplyToDenseCollection(Collection& span) const { + using T = typename Collection::value_type; + for (int c = 0; c < NumCycles(); ++c) { + const int last_element_idx = LastElementInCycle(c); + int element = last_element_idx; + T last_element = span[element]; + for (int image : Cycle(c)) { + if (image == last_element_idx) { + span[element] = last_element; + } else { + span[element] = span[image]; + } + element = image; + } + } +} + } // namespace operations_research #endif // OR_TOOLS_ALGORITHMS_SPARSE_PERMUTATION_H_ diff --git a/ortools/algorithms/sparse_permutation_test.cc b/ortools/algorithms/sparse_permutation_test.cc index a31927b2b8..44aead1cf8 100644 --- a/ortools/algorithms/sparse_permutation_test.cc +++ b/ortools/algorithms/sparse_permutation_test.cc @@ -15,6 +15,7 @@ #include #include +#include #include #include "absl/container/flat_hash_set.h" @@ -73,6 +74,20 @@ TEST(SparsePermutationTest, Identity) { EXPECT_EQ(0, permutation.NumCycles()); } +TEST(SparsePermutationTest, ApplyToVector) { + std::vector v = {"0", "1", "2", "3", "4", "5", "6", "7", "8"}; + SparsePermutation permutation(v.size()); + permutation.AddToCurrentCycle(4); + permutation.AddToCurrentCycle(2); + permutation.AddToCurrentCycle(7); + permutation.CloseCurrentCycle(); + permutation.AddToCurrentCycle(6); + permutation.AddToCurrentCycle(1); + permutation.CloseCurrentCycle(); + permutation.ApplyToDenseCollection(v); + EXPECT_THAT(v, ElementsAre("0", "6", "7", "3", "2", "5", "1", "4", "8")); +} + // Generate a bunch of permutation on a 'huge' space, but that have very few // displacements. This would OOM if the implementation was O(N); we verify // that it doesn't. diff --git a/ortools/graph/BUILD.bazel b/ortools/graph/BUILD.bazel index 98e6c8a510..824d6cb89b 100644 --- a/ortools/graph/BUILD.bazel +++ b/ortools/graph/BUILD.bazel @@ -355,24 +355,6 @@ cc_library( ], ) -# need C++20 -#cc_test( -# name = "k_shortest_paths_test", -# srcs = ["k_shortest_paths_test.cc"], -# deps = [ -# ":graph", -# ":io", -# ":k_shortest_paths", -# ":shortest_paths", -# "//ortools/base:gmock_main", -# "@com_google_absl//absl/algorithm:container", -# "@com_google_absl//absl/log:check", -# "@com_google_absl//absl/random:distributions", -# "@com_google_absl//absl/strings", -# "@com_google_benchmark//:benchmark", -# ], -#) - # Flow problem protobuf representation proto_library( name = "flow_problem_proto", diff --git a/ortools/sat/BUILD.bazel b/ortools/sat/BUILD.bazel index 5b2214ff06..c21059e05e 100644 --- a/ortools/sat/BUILD.bazel +++ b/ortools/sat/BUILD.bazel @@ -657,7 +657,6 @@ cc_library( hdrs = ["presolve_context.h"], deps = [ ":cp_model_cc_proto", - ":cp_model_checker", ":cp_model_loader", ":cp_model_mapping", ":cp_model_utils", @@ -668,6 +667,7 @@ cc_library( ":sat_parameters_cc_proto", ":sat_solver", ":util", + "//ortools/algorithms:sparse_permutation", "//ortools/base", "//ortools/base:mathutil", "//ortools/port:proto_utils", @@ -1163,6 +1163,7 @@ cc_library( "//ortools/algorithms:dynamic_partition", "//ortools/algorithms:sparse_permutation", "//ortools/base", + "@com_google_absl//absl/container:flat_hash_set", "@com_google_absl//absl/log:check", "@com_google_absl//absl/types:span", ], @@ -1176,6 +1177,7 @@ cc_test( ":symmetry_util", "//ortools/algorithms:sparse_permutation", "//ortools/base:gmock_main", + "@com_google_absl//absl/types:span", ], ) diff --git a/ortools/sat/cp_model_symmetries.cc b/ortools/sat/cp_model_symmetries.cc index 1c53e17e9e..b489d655b6 100644 --- a/ortools/sat/cp_model_symmetries.cc +++ b/ortools/sat/cp_model_symmetries.cc @@ -895,6 +895,47 @@ std::vector BuildInequalityCoeffsForOrbitope( return out; } +void UpdateHintAfterFixingBoolToBreakSymmetry( + PresolveContext* context, int var, bool fixed_value, + const std::vector>& generators) { + if (!context->VarHasSolutionHint(var)) { + return; + } + const int64_t hinted_value = context->SolutionHint(var); + if (hinted_value == static_cast(fixed_value)) { + return; + } + + std::vector schrier_vector; + std::vector orbit; + GetSchreierVectorAndOrbit(var, generators, &schrier_vector, &orbit); + + bool found_target = false; + int target_var; + for (int v : orbit) { + if (context->VarHasSolutionHint(v) && + context->SolutionHint(v) == static_cast(fixed_value)) { + found_target = true; + target_var = v; + break; + } + } + if (!found_target) { + context->UpdateRuleStats( + "hint: couldn't transform infeasible hint properly"); + return; + } + + const std::vector generator_idx = + TracePoint(target_var, schrier_vector, generators); + for (const int i : generator_idx) { + context->PermuteHintValues(*generators[i]); + } + + DCHECK(context->VarHasSolutionHint(var)); + DCHECK_EQ(context->SolutionHint(var), fixed_value); +} + } // namespace bool DetectAndExploitSymmetriesInPresolve(PresolveContext* context) { @@ -1010,6 +1051,7 @@ bool DetectAndExploitSymmetriesInPresolve(PresolveContext* context) { // fixing do not exploit the full structure of these symmeteries. Note // however that the fixing via propagation above close cod105 even more // efficiently. + std::vector var_can_be_true_per_orbit(num_vars, -1); { std::vector tmp_to_clear; std::vector tmp_sizes(num_vars, 0); @@ -1050,7 +1092,11 @@ bool DetectAndExploitSymmetriesInPresolve(PresolveContext* context) { } // We push all but the first one in each orbit. - if (tmp_sizes[rep] == 0) can_be_fixed_to_false.push_back(var); + if (tmp_sizes[rep] == 0) { + can_be_fixed_to_false.push_back(var); + } else { + var_can_be_true_per_orbit[rep] = var; + } tmp_sizes[rep] = 0; } } else { @@ -1131,7 +1177,7 @@ bool DetectAndExploitSymmetriesInPresolve(PresolveContext* context) { } } - // Supper simple heuristic to use the orbitope or not. + // Super simple heuristic to use the orbitope or not. // // In an orbitope with an at most one on each row, we can fix the upper right // triangle. We could use a formula, but the loop is fast enough. @@ -1153,6 +1199,19 @@ bool DetectAndExploitSymmetriesInPresolve(PresolveContext* context) { const int var = can_be_fixed_to_false[i]; if (orbits[var] == orbit_index) ++num_in_orbit; context->UpdateRuleStats("symmetry: fixed to false in general orbit"); + if (context->VarHasSolutionHint(var) && context->SolutionHint(var) == 1 && + var_can_be_true_per_orbit[orbits[var]] != -1) { + // We are breaking the symmetry in a way that makes the hint invalid. + // We want `var` to be false, so we would naively pick a symmetry to + // enforce that. But that will be wrong if we do this twice: after we + // permute the hint to fix the first one we would look for a symmetry + // group element that fixes the second one to false. But there are many + // of those, and picking the wrong one would risk making the first one + // true again. Since this is a AMO, fixing the one that is true doesn't + // have this problem. + UpdateHintAfterFixingBoolToBreakSymmetry( + context, var_can_be_true_per_orbit[orbits[var]], true, generators); + } if (!context->SetLiteralToFalse(var)) return false; } diff --git a/ortools/sat/presolve_context.cc b/ortools/sat/presolve_context.cc index a5dab8dd6b..c4789f9bff 100644 --- a/ortools/sat/presolve_context.cc +++ b/ortools/sat/presolve_context.cc @@ -33,6 +33,7 @@ #include "absl/numeric/int128.h" #include "absl/strings/str_cat.h" #include "absl/types/span.h" +#include "ortools/algorithms/sparse_permutation.h" #include "ortools/base/logging.h" #include "ortools/base/mathutil.h" #include "ortools/port/proto_utils.h" @@ -725,6 +726,7 @@ void PresolveContext::UpdateConstraintVariableUsage(int c) { } bool PresolveContext::ConstraintVariableGraphIsUpToDate() const { + if (is_unsat_) return true; // We do not care in this case. return constraint_to_vars_.size() == working_model->constraints_size(); } @@ -1016,6 +1018,12 @@ bool PresolveContext::CanonicalizeAffineVariable(int ref, int64_t coeff, return true; } +void PresolveContext::PermuteHintValues(const SparsePermutation& perm) { + CHECK(hint_is_loaded_); + perm.ApplyToDenseCollection(hint_); + perm.ApplyToDenseCollection(hint_has_value_); +} + bool PresolveContext::StoreAffineRelation(int ref_x, int ref_y, int64_t coeff, int64_t offset, bool debug_no_recursion) { diff --git a/ortools/sat/presolve_context.h b/ortools/sat/presolve_context.h index faa7a39800..1dfac184ef 100644 --- a/ortools/sat/presolve_context.h +++ b/ortools/sat/presolve_context.h @@ -28,6 +28,7 @@ #include "absl/strings/str_cat.h" #include "absl/strings/string_view.h" #include "absl/types/span.h" +#include "ortools/algorithms/sparse_permutation.h" #include "ortools/base/logging.h" #include "ortools/sat/cp_model.pb.h" #include "ortools/sat/cp_model_utils.h" @@ -574,6 +575,8 @@ class PresolveContext { // the hint, in order to maintain it as best as possible during presolve. void LoadSolutionHint(); + void PermuteHintValues(const SparsePermutation& perm); + // Solution hint accessor. bool VarHasSolutionHint(int var) const { return hint_has_value_[var]; } int64_t SolutionHint(int var) const { return hint_[var]; } diff --git a/ortools/sat/symmetry_util.cc b/ortools/sat/symmetry_util.cc index 78edf8fe87..c1d96e0a38 100644 --- a/ortools/sat/symmetry_util.cc +++ b/ortools/sat/symmetry_util.cc @@ -18,6 +18,7 @@ #include #include +#include "absl/container/flat_hash_set.h" #include "absl/log/check.h" #include "absl/types/span.h" #include "ortools/algorithms/dynamic_partition.h" @@ -194,5 +195,42 @@ std::vector GetOrbitopeOrbits( return orbits; } +void GetSchreierVectorAndOrbit( + int point, absl::Span> generators, + std::vector* schrier_vector, std::vector* orbit) { + schrier_vector->clear(); + *orbit = {point}; + if (generators.empty()) return; + schrier_vector->resize(generators[0]->Size(), -1); + absl::flat_hash_set orbit_set = {point}; + for (int i = 0; i < orbit->size(); ++i) { + const int orbit_element = (*orbit)[i]; + for (int i = 0; i < generators.size(); ++i) { + DCHECK_EQ(schrier_vector->size(), generators[i]->Size()); + const int image = generators[i]->Image(orbit_element); + if (image == orbit_element) continue; + const auto [it, inserted] = orbit_set.insert(image); + if (inserted) { + (*schrier_vector)[image] = i; + orbit->push_back(image); + } + } + } +} + +std::vector TracePoint( + int point, absl::Span schrier_vector, + absl::Span> generators) { + std::vector result; + while (schrier_vector[point] != -1) { + const SparsePermutation& perm = *generators[schrier_vector[point]]; + result.push_back(schrier_vector[point]); + const int next = perm.InverseImage(point); + DCHECK_NE(next, point); + point = next; + } + return result; +} + } // namespace sat } // namespace operations_research diff --git a/ortools/sat/symmetry_util.h b/ortools/sat/symmetry_util.h index f045be430e..5e5e813d6e 100644 --- a/ortools/sat/symmetry_util.h +++ b/ortools/sat/symmetry_util.h @@ -62,6 +62,19 @@ std::vector GetOrbits( std::vector GetOrbitopeOrbits(int n, absl::Span> orbitope); +// See Chapter 7 of Butler, Gregory, ed. Fundamental algorithms for permutation +// groups. Berlin, Heidelberg: Springer Berlin Heidelberg, 1991. +void GetSchreierVectorAndOrbit( + int point, absl::Span> generators, + std::vector* schrier_vector, std::vector* orbit); + +// Given a schreier vector for a given base point and a point in the same orbit +// of the base point, returns a list of index of the `generators` to apply to +// get a permutation mapping the base point to get the given point. +std::vector TracePoint( + int point, absl::Span schrier_vector, + absl::Span> generators); + // Given the generators for a permutation group of [0, n-1], update it to // a set of generators of the group stabilizing the given element. // diff --git a/ortools/sat/symmetry_util_test.cc b/ortools/sat/symmetry_util_test.cc index 85a7d67481..9b3a5b19f5 100644 --- a/ortools/sat/symmetry_util_test.cc +++ b/ortools/sat/symmetry_util_test.cc @@ -13,9 +13,12 @@ #include "ortools/sat/symmetry_util.h" +#include #include +#include #include +#include "absl/types/span.h" #include "gtest/gtest.h" #include "ortools/algorithms/sparse_permutation.h" #include "ortools/base/gmock.h" @@ -25,24 +28,25 @@ namespace sat { namespace { using ::testing::ElementsAre; +using ::testing::UnorderedElementsAre; + +std::unique_ptr MakePerm( + int size, absl::Span> cycles) { + auto perm = std::make_unique(size); + for (const auto& cycle : cycles) { + for (const int x : cycle) { + perm->AddToCurrentCycle(x); + } + perm->CloseCurrentCycle(); + } + return perm; +} TEST(GetOrbitsTest, BasicExample) { const int n = 10; std::vector> generators; - generators.push_back(std::make_unique(n)); - generators[0]->AddToCurrentCycle(0); - generators[0]->AddToCurrentCycle(1); - generators[0]->AddToCurrentCycle(2); - generators[0]->CloseCurrentCycle(); - generators[0]->AddToCurrentCycle(7); - generators[0]->AddToCurrentCycle(8); - generators[0]->CloseCurrentCycle(); - - generators.push_back(std::make_unique(n)); - generators[1]->AddToCurrentCycle(3); - generators[1]->AddToCurrentCycle(2); - generators[1]->AddToCurrentCycle(7); - generators[1]->CloseCurrentCycle(); + generators.push_back(MakePerm(n, {{0, 1, 2}, {7, 8}})); + generators.push_back(MakePerm(n, {{3, 2, 7}})); const std::vector orbits = GetOrbits(n, generators); for (const int i : std::vector{0, 1, 2, 3, 7, 8}) { EXPECT_EQ(orbits[i], 0); @@ -60,27 +64,8 @@ TEST(BasicOrbitopeExtractionTest, BasicExample) { const int n = 10; std::vector> generators; - generators.push_back(std::make_unique(n)); - generators[0]->AddToCurrentCycle(0); - generators[0]->AddToCurrentCycle(1); - generators[0]->CloseCurrentCycle(); - generators[0]->AddToCurrentCycle(4); - generators[0]->AddToCurrentCycle(5); - generators[0]->CloseCurrentCycle(); - generators[0]->AddToCurrentCycle(8); - generators[0]->AddToCurrentCycle(7); - generators[0]->CloseCurrentCycle(); - - generators.push_back(std::make_unique(n)); - generators[1]->AddToCurrentCycle(2); - generators[1]->AddToCurrentCycle(1); - generators[1]->CloseCurrentCycle(); - generators[1]->AddToCurrentCycle(5); - generators[1]->AddToCurrentCycle(3); - generators[1]->CloseCurrentCycle(); - generators[1]->AddToCurrentCycle(6); - generators[1]->AddToCurrentCycle(7); - generators[1]->CloseCurrentCycle(); + generators.push_back(MakePerm(n, {{0, 1}, {4, 5}, {8, 7}})); + generators.push_back(MakePerm(n, {{2, 1}, {5, 3}, {6, 7}})); const std::vector> orbitope = BasicOrbitopeExtraction(generators); @@ -99,27 +84,8 @@ TEST(BasicOrbitopeExtractionTest, NotAnOrbitopeBecauseOfDuplicates) { const int n = 10; std::vector> generators; - generators.push_back(std::make_unique(n)); - generators[0]->AddToCurrentCycle(0); - generators[0]->AddToCurrentCycle(1); - generators[0]->CloseCurrentCycle(); - generators[0]->AddToCurrentCycle(4); - generators[0]->AddToCurrentCycle(5); - generators[0]->CloseCurrentCycle(); - generators[0]->AddToCurrentCycle(8); - generators[0]->AddToCurrentCycle(7); - generators[0]->CloseCurrentCycle(); - - generators.push_back(std::make_unique(n)); - generators[1]->AddToCurrentCycle(1); - generators[1]->AddToCurrentCycle(2); - generators[1]->CloseCurrentCycle(); - generators[1]->AddToCurrentCycle(5); - generators[1]->AddToCurrentCycle(8); - generators[1]->CloseCurrentCycle(); - generators[1]->AddToCurrentCycle(6); - generators[1]->AddToCurrentCycle(9); - generators[1]->CloseCurrentCycle(); + generators.push_back(MakePerm(n, {{0, 1}, {4, 5}, {8, 7}})); + generators.push_back(MakePerm(n, {{1, 2}, {5, 8}, {6, 9}})); const std::vector> orbitope = BasicOrbitopeExtraction(generators); @@ -129,6 +95,66 @@ TEST(BasicOrbitopeExtractionTest, NotAnOrbitopeBecauseOfDuplicates) { EXPECT_THAT(orbitope[2], ElementsAre(8, 7)); } +TEST(GetSchreierVectorTest, Square) { + const int n = 4; + std::vector> generators; + generators.push_back(MakePerm(n, {{0, 1, 2, 3}})); + generators.push_back(MakePerm(n, {{1, 3}})); + + std::vector schrier_vector, orbit; + GetSchreierVectorAndOrbit(0, generators, &schrier_vector, &orbit); + EXPECT_THAT(schrier_vector, ElementsAre(-1, 0, 0, 1)); +} + +TEST(GetSchreierVectorTest, ComplicatedGroup) { + // See Chapter 7 of Butler, Gregory, ed. Fundamental algorithms for + // permutation groups. Berlin, Heidelberg: Springer Berlin Heidelberg, 1991. + const int n = 11; + std::vector> generators; + generators.push_back(MakePerm(n, {{0, 3, 4, 10, 5, 9, 2, 1}, {6, 7}})); + generators.push_back(MakePerm(n, {{0, 3, 4, 10, 5, 9, 2, 1}, {7, 8}})); + generators.push_back(MakePerm(n, {{0, 3, 1, 2}, {4, 10, 9, 5}})); + + std::vector schrier_vector, orbit; + GetSchreierVectorAndOrbit(0, generators, &schrier_vector, &orbit); + EXPECT_THAT(schrier_vector, ElementsAre(-1, 2, 2, 0, 0, 0, -1, -1, -1, 2, 0)); + std::vector generators_idx = TracePoint(9, schrier_vector, generators); + std::vector points = {"0", "1", "2", "3", "4", "5", + "6", "7", "8", "9", "10"}; + for (const int i : generators_idx) { + generators[i]->ApplyToDenseCollection(points); + } + // It needs to take the base point 0 to the traced point 9. + EXPECT_THAT(points, ElementsAre("9", "10", "1", "4", "5", "2", "7", "6", "8", + "3", "0")); + GetSchreierVectorAndOrbit(6, generators, &schrier_vector, &orbit); + EXPECT_THAT(orbit, UnorderedElementsAre(6, 7, 8)); + EXPECT_THAT(schrier_vector, + ElementsAre(-1, -1, -1, -1, -1, -1, -1, 0, 1, -1, -1)); +} + +TEST(GetSchreierVectorTest, ProjectivePlaneOrderTwo) { + const int n = 7; + std::vector> generators; + generators.push_back(MakePerm(n, {{0, 1, 3, 4, 6, 2, 5}})); + generators.push_back(MakePerm(n, {{1, 3}, {2, 4}})); + + std::vector schrier_vector, orbit; + GetSchreierVectorAndOrbit(0, generators, &schrier_vector, &orbit); + EXPECT_THAT(schrier_vector, ElementsAre(-1, 0, 1, 0, 0, 0, 0)); + EXPECT_THAT(orbit, UnorderedElementsAre(0, 1, 2, 3, 4, 5, 6)); + + // Now let's see the stabilizer of the point 0. + std::vector> stabilizer; + + stabilizer.push_back(MakePerm(n, {{1, 3}, {2, 4}})); + stabilizer.push_back(MakePerm(n, {{3, 4}, {5, 6}})); + stabilizer.push_back(MakePerm(n, {{3, 5}, {4, 6}})); + + GetSchreierVectorAndOrbit(1, stabilizer, &schrier_vector, &orbit); + EXPECT_THAT(schrier_vector, ElementsAre(-1, -1, 0, 0, 1, 2, 2)); +} + } // namespace } // namespace sat } // namespace operations_research From b34393660a49b4bb6ed0f0ef6699a33084dfa9fd Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Fri, 27 Sep 2024 16:29:32 +0200 Subject: [PATCH 029/105] Fix NetBSD build (Fix #4359 #4361) --- cmake/Makefile | 59 +++++++----- cmake/vagrant/netbsd/cpp/Vagrantfile | 115 ++++++++++++++++++++++ cmake/vagrant/netbsd/dotnet/Vagrantfile | 118 +++++++++++++++++++++++ cmake/vagrant/netbsd/java/Vagrantfile | 119 +++++++++++++++++++++++ cmake/vagrant/netbsd/python/Vagrantfile | 122 ++++++++++++++++++++++++ ortools/base/sysinfo.cc | 6 +- ortools/python/setup.py.in | 1 + ortools/util/fp_utils.h | 6 ++ ortools/util/zvector.h | 3 +- 9 files changed, 522 insertions(+), 27 deletions(-) create mode 100644 cmake/vagrant/netbsd/cpp/Vagrantfile create mode 100644 cmake/vagrant/netbsd/dotnet/Vagrantfile create mode 100644 cmake/vagrant/netbsd/java/Vagrantfile create mode 100644 cmake/vagrant/netbsd/python/Vagrantfile diff --git a/cmake/Makefile b/cmake/Makefile index 848e9bd1c9..6d6dfa5fc8 100644 --- a/cmake/Makefile +++ b/cmake/Makefile @@ -143,7 +143,8 @@ help: @echo -e "\t${BOLD}clean_vms${RESET}: Remove ALL vagrant box." @echo @echo -e "\tWith ${BOLD}${RESET}:" - @echo -e "\t\t${BOLD}freebsd${RESET} (FreeBSD)" + @echo -e "\t\t${BOLD}freebsd${RESET} (FreeBSD 14)" + @echo -e "\t\t${BOLD}netbsd${RESET} (NetBSD 9)" @echo -e "\te.g. 'make freebsd_cpp'" @echo @echo -e "\t${BOLD}glop_${RESET}: Build Glop using an Ubuntu:rolling docker image." @@ -717,30 +718,42 @@ clean_web: $(addprefix clean_web_, $(WEB_STAGES)) ############# ## VAGRANT ## ############# -VMS := freebsd +VAGRANT_VMS := \ + freebsd \ + netbsd \ -freebsd_targets = $(addprefix freebsd_, $(LANGUAGES)) -.PHONY: freebsd $(freebsd_targets) -freebsd: $(freebsd_targets) -$(freebsd_targets): freebsd_%: vagrant/freebsd/%/Vagrantfile - @cd vagrant/freebsd/$* && vagrant destroy -f - cd vagrant/freebsd/$* && vagrant box update - cd vagrant/freebsd/$* && vagrant up +define make-vagrant-target = +#$$(info VMS: $1) +#$$(info Create target: $1_.) +$1_targets = $(addprefix $1_, $(LANGUAGES)) +.PHONY: $1 $$($1_targets) +$1: $$($1_targets) +$$($1_targets): $1_%: vagrant/$1/%/Vagrantfile + @cd vagrant/$1/$$* && vagrant destroy -f + cd vagrant/$1/$$* && vagrant box update + cd vagrant/$1/$$* && vagrant up -# SSH to a freebsd_ vagrant machine (debug). -sh_freebsd_targets = $(addprefix sh_freebsd_, $(LANGUAGES)) -.PHONY: $(sh_freebsd_targets) -$(sh_freebsd_targets): sh_freebsd_%: - cd vagrant/freebsd/$* && vagrant up - cd vagrant/freebsd/$* && vagrant ssh +#$$(info Create targets: sh_$1_ vagrant machine (debug).) +sh_$1_targets = $(addprefix sh_$1_, $(LANGUAGES)) +.PHONY: $$(sh_$1_targets) +$$(sh_$1_targets): sh_$1_%: + cd vagrant/$1/$$* && vagrant up + cd vagrant/$1/$$* && vagrant ssh -# Clean FreeBSD vagrant machine -clean_freebsd_targets = $(addprefix clean_freebsd_, $(LANGUAGES)) -.PHONY: clean_freebsd $(clean_freebsd_targets) -clean_freebsd: $(clean_freebsd_targets) -$(clean_freebsd_targets): clean_freebsd_%: - cd vagrant/freebsd/$* && vagrant destroy -f - -rm -rf vagrant/freebsd/$*/.vagrant +#$$(info Create targets: clean_$1) +clean_$1_targets = $(addprefix clean_$1_, $(LANGUAGES)) +.PHONY: clean_$1 $(clean_$1_targets) +clean_$1: $$(clean_$1_targets) +$$(clean_$1_targets): clean_$1_%: + cd vagrant/$1/$$* && vagrant destroy -f + -rm -rf vagrant/$1/$$*/.vagrant +endef + +$(foreach vms,$(VAGRANT_VMS),$(eval $(call make-vagrant-target,$(vms)))) + +## MERGE ## +.PHONY: clean_vagrant +clean_vagrant: $(addprefix clean_, $(VAGRANT_VMS)) ########## ## GLOP ## @@ -779,7 +792,7 @@ clean_glop: $(addprefix clean_glop_, $(STAGES)) ## CLEAN ## ########### .PHONY: clean -clean: clean_all clean_platforms clean_toolchains clean_web clean_freebsd clean_glop +clean: clean_all clean_platforms clean_toolchains clean_web clean_vagrant clean_glop docker container prune -f docker image prune -f -rmdir cache diff --git a/cmake/vagrant/netbsd/cpp/Vagrantfile b/cmake/vagrant/netbsd/cpp/Vagrantfile new file mode 100644 index 0000000000..0378b3d5a8 --- /dev/null +++ b/cmake/vagrant/netbsd/cpp/Vagrantfile @@ -0,0 +1,115 @@ +# -*- mode: ruby -*- +# vi: set ft=ruby : + +# All Vagrant configuration is done below. The "2" in Vagrant.configure +# configures the configuration version (we support older styles for +# backwards compatibility). Please don't change it unless you know what +# you're doing. +Vagrant.configure("2") do |config| + # The most common configuration options are documented and commented below. + # For a complete reference, please see the online documentation at + # https://docs.vagrantup.com. + + # Every Vagrant development environment requires a box. You can search for + # boxes at https://vagrantcloud.com/search. + config.vm.guest = :netbsd + config.vm.box = "generic/netbsd9" + config.vm.provider "virtualbox" do |v| + v.name = "ortools_netbsd_cpp" + end + config.ssh.shell = "sh" + + # Disable automatic box update checking. If you disable this, then + # boxes will only be checked for updates when the user runs + # `vagrant box outdated`. This is not recommended. + # config.vm.box_check_update = false + + # Create a forwarded port mapping which allows access to a specific port + # within the machine from a port on the host machine. In the example below, + # accessing "localhost:8080" will access port 80 on the guest machine. + # NOTE: This will enable public access to the opened port + # config.vm.network "forwarded_port", guest: 80, host: 8080 + + # Create a forwarded port mapping which allows access to a specific port + # within the machine from a port on the host machine and only allow access + # via 127.0.0.1 to disable public access + # config.vm.network "forwarded_port", guest: 80, host: 8080, host_ip: "127.0.0.1" + + # Create a private network, which allows host-only access to the machine + # using a specific IP. + # config.vm.network "private_network", ip: "192.168.33.10" + + # Create a public network, which generally matched to bridged network. + # Bridged networks make the machine appear as another physical device on + # your network. + # config.vm.network "public_network" + + # Share an additional folder to the guest VM. The first argument is + # the path on the host to the actual folder. The second argument is + # the path on the guest to mount the folder. And the optional third + # argument is a set of non-required options. + #config.vm.synced_folder "../../..", "/home/vagrant/project" + config.vm.synced_folder ".", "/vagrant", id: "vagrant-root", disabled: true + + + # Provider-specific configuration so you can fine-tune various + # backing providers for Vagrant. These expose provider-specific options. + # Example for VirtualBox: + # + # config.vm.provider "virtualbox" do |vb| + # # Display the VirtualBox GUI when booting the machine + # vb.gui = true + # + # # Customize the amount of memory on the VM: + # vb.memory = "1024" + # end + # + # View the documentation for the provider you are using for more + # information on available options. + + # Enable provisioning with a shell script. Additional provisioners such as + # Ansible, Chef, Docker, Puppet and Salt are also available. Please see the + # documentation for more information about their specific syntax and use. + # note: clang installed by default + config.vm.provision "env", type: "shell", inline:<<-SHELL + set -x + pkg update -f + pkg install -y git cmake + SHELL + + config.vm.provision "file", source: "../../../../CMakeLists.txt", destination: "$HOME/project/" + config.vm.provision "file", source: "../../../../cmake", destination: "$HOME/project/" + config.vm.provision "file", source: "../../../../ortools", destination: "$HOME/project/" + config.vm.provision "file", source: "../../../../examples/contrib", destination: "$HOME/project/examples/" + config.vm.provision "file", source: "../../../../examples/cpp", destination: "$HOME/project/examples/" + config.vm.provision "file", source: "../../../../examples/dotnet", destination: "$HOME/project/examples/" + config.vm.provision "file", source: "../../../../examples/java", destination: "$HOME/project/examples/" + config.vm.provision "file", source: "../../../../examples/python", destination: "$HOME/project/examples/" + config.vm.provision "file", source: "../../../../examples/tests", destination: "$HOME/project/examples/" + config.vm.provision "file", source: "../../../../patches", destination: "$HOME/project/" + config.vm.provision "file", source: "../../../../Version.txt", destination: "$HOME/project/" + + config.vm.provision "devel", type: "shell", inline:<<-SHELL + set -x + cd project + ls + SHELL + + config.vm.provision "configure", type: "shell", inline:<<-SHELL + set -x + cd project + cmake -S. -Bbuild -DBUILD_DEPS=ON + SHELL + + config.vm.provision "build", type: "shell", inline:<<-SHELL + set -x + cd project + cmake --build build -v + SHELL + + config.vm.provision "test", type: "shell", inline:<<-SHELL + set -x + cd project + cmake --build build --target test -v + SHELL +end diff --git a/cmake/vagrant/netbsd/dotnet/Vagrantfile b/cmake/vagrant/netbsd/dotnet/Vagrantfile new file mode 100644 index 0000000000..bceb231d8b --- /dev/null +++ b/cmake/vagrant/netbsd/dotnet/Vagrantfile @@ -0,0 +1,118 @@ +# -*- mode: ruby -*- +# vi: set ft=ruby : + +# All Vagrant configuration is done below. The "2" in Vagrant.configure +# configures the configuration version (we support older styles for +# backwards compatibility). Please don't change it unless you know what +# you're doing. +Vagrant.configure("2") do |config| + # The most common configuration options are documented and commented below. + # For a complete reference, please see the online documentation at + # https://docs.vagrantup.com. + + # Every Vagrant development environment requires a box. You can search for + # boxes at https://vagrantcloud.com/search. + config.vm.guest = :netbsd + config.vm.box = "generic/netbsd9" + config.vm.provider "virtualbox" do |v| + v.name = "ortools_netbsd_dotnet" + end + config.ssh.shell = "sh" + + # Disable automatic box update checking. If you disable this, then + # boxes will only be checked for updates when the user runs + # `vagrant box outdated`. This is not recommended. + # config.vm.box_check_update = false + + # Create a forwarded port mapping which allows access to a specific port + # within the machine from a port on the host machine. In the example below, + # accessing "localhost:8080" will access port 80 on the guest machine. + # NOTE: This will enable public access to the opened port + # config.vm.network "forwarded_port", guest: 80, host: 8080 + + # Create a forwarded port mapping which allows access to a specific port + # within the machine from a port on the host machine and only allow access + # via 127.0.0.1 to disable public access + # config.vm.network "forwarded_port", guest: 80, host: 8080, host_ip: "127.0.0.1" + + # Create a private network, which allows host-only access to the machine + # using a specific IP. + # config.vm.network "private_network", ip: "192.168.33.10" + + # Create a public network, which generally matched to bridged network. + # Bridged networks make the machine appear as another physical device on + # your network. + # config.vm.network "public_network" + + # Share an additional folder to the guest VM. The first argument is + # the path on the host to the actual folder. The second argument is + # the path on the guest to mount the folder. And the optional third + # argument is a set of non-required options. + #config.vm.synced_folder "../../..", "/home/vagrant/project" + config.vm.synced_folder ".", "/vagrant", id: "vagrant-root", disabled: true + + + # Provider-specific configuration so you can fine-tune various + # backing providers for Vagrant. These expose provider-specific options. + # Example for VirtualBox: + # + # config.vm.provider "virtualbox" do |vb| + # # Display the VirtualBox GUI when booting the machine + # vb.gui = true + # + # # Customize the amount of memory on the VM: + # vb.memory = "1024" + # end + # + # View the documentation for the provider you are using for more + # information on available options. + + # Enable provisioning with a shell script. Additional provisioners such as + # Ansible, Chef, Docker, Puppet and Salt are also available. Please see the + # documentation for more information about their specific syntax and use. + # note: clang installed by default + config.vm.provision "env", type: "shell", inline:<<-SHELL + set -x + pkg update -f + pkg install -y git cmake + kldload linux64 + pkg install -y swig linux-dotnet-sdk + SHELL + + config.vm.provision "file", source: "../../../../CMakeLists.txt", destination: "$HOME/project/" + config.vm.provision "file", source: "../../../../cmake", destination: "$HOME/project/" + config.vm.provision "file", source: "../../../../ortools", destination: "$HOME/project/" + config.vm.provision "file", source: "../../../../examples/contrib", destination: "$HOME/project/examples/" + config.vm.provision "file", source: "../../../../examples/cpp", destination: "$HOME/project/examples/" + config.vm.provision "file", source: "../../../../examples/dotnet", destination: "$HOME/project/examples/" + config.vm.provision "file", source: "../../../../examples/java", destination: "$HOME/project/examples/" + config.vm.provision "file", source: "../../../../examples/python", destination: "$HOME/project/examples/" + config.vm.provision "file", source: "../../../../examples/tests", destination: "$HOME/project/examples/" + config.vm.provision "file", source: "../../../../patches", destination: "$HOME/project/" + config.vm.provision "file", source: "../../../../Version.txt", destination: "$HOME/project/" + config.vm.provision "file", source: "../../../../tools/doc/orLogo.png", destination: "$HOME/project/tools/doc/" + + config.vm.provision "devel", type: "shell", inline:<<-SHELL + set -x + cd project + ls + SHELL + + config.vm.provision "configure", type: "shell", inline:<<-SHELL + set -x + cd project + cmake -S. -Bbuild -DBUILD_DOTNET=ON -DBUILD_CXX_SAMPLES=OFF -DBUILD_CXX_EXAMPLES=OFF + SHELL + + config.vm.provision "build", type: "shell", inline:<<-SHELL + set -x + cd project + cmake --build build -v + SHELL + + config.vm.provision "test", type: "shell", inline:<<-SHELL + set -x + cd project + cmake --build build --target test -v + SHELL +end diff --git a/cmake/vagrant/netbsd/java/Vagrantfile b/cmake/vagrant/netbsd/java/Vagrantfile new file mode 100644 index 0000000000..050e73496e --- /dev/null +++ b/cmake/vagrant/netbsd/java/Vagrantfile @@ -0,0 +1,119 @@ +# -*- mode: ruby -*- +# vi: set ft=ruby : + +# All Vagrant configuration is done below. The "2" in Vagrant.configure +# configures the configuration version (we support older styles for +# backwards compatibility). Please don't change it unless you know what +# you're doing. +Vagrant.configure("2") do |config| + # The most common configuration options are documented and commented below. + # For a complete reference, please see the online documentation at + # https://docs.vagrantup.com. + + # Every Vagrant development environment requires a box. You can search for + # boxes at https://vagrantcloud.com/search. + config.vm.guest = :netbsd + config.vm.box = "generic/netbsd9" + config.vm.provider "virtualbox" do |v| + v.name = "ortools_netbsd_java" + end + config.ssh.shell = "sh" + + # Disable automatic box update checking. If you disable this, then + # boxes will only be checked for updates when the user runs + # `vagrant box outdated`. This is not recommended. + # config.vm.box_check_update = false + + # Create a forwarded port mapping which allows access to a specific port + # within the machine from a port on the host machine. In the example below, + # accessing "localhost:8080" will access port 80 on the guest machine. + # NOTE: This will enable public access to the opened port + # config.vm.network "forwarded_port", guest: 80, host: 8080 + + # Create a forwarded port mapping which allows access to a specific port + # within the machine from a port on the host machine and only allow access + # via 127.0.0.1 to disable public access + # config.vm.network "forwarded_port", guest: 80, host: 8080, host_ip: "127.0.0.1" + + # Create a private network, which allows host-only access to the machine + # using a specific IP. + # config.vm.network "private_network", ip: "192.168.33.10" + + # Create a public network, which generally matched to bridged network. + # Bridged networks make the machine appear as another physical device on + # your network. + # config.vm.network "public_network" + + # Share an additional folder to the guest VM. The first argument is + # the path on the host to the actual folder. The second argument is + # the path on the guest to mount the folder. And the optional third + # argument is a set of non-required options. + #config.vm.synced_folder "../../..", "/home/vagrant/project" + config.vm.synced_folder ".", "/vagrant", id: "vagrant-root", disabled: true + + + # Provider-specific configuration so you can fine-tune various + # backing providers for Vagrant. These expose provider-specific options. + # Example for VirtualBox: + # + # config.vm.provider "virtualbox" do |vb| + # # Display the VirtualBox GUI when booting the machine + # vb.gui = true + # + # # Customize the amount of memory on the VM: + # vb.memory = "1024" + # end + # + # View the documentation for the provider you are using for more + # information on available options. + + # Enable provisioning with a shell script. Additional provisioners such as + # Ansible, Chef, Docker, Puppet and Salt are also available. Please see the + # documentation for more information about their specific syntax and use. + # note: clang installed by default + config.vm.provision "env", type: "shell", inline:<<-SHELL + set -x + pkg update -f + pkg install -y git cmake + pkg install -y swig openjdk11 maven + mount -t fdescfs fdesc /dev/fd + mount -t procfs proc /proc + SHELL + + config.vm.provision "file", source: "../../../../CMakeLists.txt", destination: "$HOME/project/" + config.vm.provision "file", source: "../../../../cmake", destination: "$HOME/project/" + config.vm.provision "file", source: "../../../../ortools", destination: "$HOME/project/" + config.vm.provision "file", source: "../../../../examples/contrib", destination: "$HOME/project/examples/" + config.vm.provision "file", source: "../../../../examples/cpp", destination: "$HOME/project/examples/" + config.vm.provision "file", source: "../../../../examples/dotnet", destination: "$HOME/project/examples/" + config.vm.provision "file", source: "../../../../examples/java", destination: "$HOME/project/examples/" + config.vm.provision "file", source: "../../../../examples/python", destination: "$HOME/project/examples/" + config.vm.provision "file", source: "../../../../examples/tests", destination: "$HOME/project/examples/" + config.vm.provision "file", source: "../../../../patches", destination: "$HOME/project/" + config.vm.provision "file", source: "../../../../Version.txt", destination: "$HOME/project/" + + config.vm.provision "devel", type: "shell", inline:<<-SHELL + set -x + cd project + ls + SHELL + + config.vm.provision "configure", type: "shell", inline:<<-SHELL + set -x + cd project + export JAVA_HOME=/usr/local/openjdk11 + cmake -S. -Bbuild -DBUILD_JAVA=ON -DBUILD_CXX_SAMPLES=OFF -DBUILD_CXX_EXAMPLES=OFF + SHELL + + config.vm.provision "build", type: "shell", inline:<<-SHELL + set -x + cd project + cmake --build build -v + SHELL + + config.vm.provision "test", type: "shell", inline:<<-SHELL + set -x + cd project + cmake --build build --target test -v + SHELL +end diff --git a/cmake/vagrant/netbsd/python/Vagrantfile b/cmake/vagrant/netbsd/python/Vagrantfile new file mode 100644 index 0000000000..6cfb5783f6 --- /dev/null +++ b/cmake/vagrant/netbsd/python/Vagrantfile @@ -0,0 +1,122 @@ +# -*- mode: ruby -*- +# vi: set ft=ruby : + +# All Vagrant configuration is done below. The "2" in Vagrant.configure +# configures the configuration version (we support older styles for +# backwards compatibility). Please don't change it unless you know what +# you're doing. +Vagrant.configure("2") do |config| + # The most common configuration options are documented and commented below. + # For a complete reference, please see the online documentation at + # https://docs.vagrantup.com. + + # Every Vagrant development environment requires a box. You can search for + # boxes at https://vagrantcloud.com/search. + config.vm.guest = :netbsd + config.vm.box = "generic/netbsd9" + config.vm.provider "virtualbox" do |v| + v.name = "ortools_netbsd_python" + end + config.ssh.shell = "sh" + + # Disable automatic box update checking. If you disable this, then + # boxes will only be checked for updates when the user runs + # `vagrant box outdated`. This is not recommended. + # config.vm.box_check_update = false + + # Create a forwarded port mapping which allows access to a specific port + # within the machine from a port on the host machine. In the example below, + # accessing "localhost:8080" will access port 80 on the guest machine. + # NOTE: This will enable public access to the opened port + # config.vm.network "forwarded_port", guest: 80, host: 8080 + + # Create a forwarded port mapping which allows access to a specific port + # within the machine from a port on the host machine and only allow access + # via 127.0.0.1 to disable public access + # config.vm.network "forwarded_port", guest: 80, host: 8080, host_ip: "127.0.0.1" + + # Create a private network, which allows host-only access to the machine + # using a specific IP. + # config.vm.network "private_network", ip: "192.168.33.10" + + # Create a public network, which generally matched to bridged network. + # Bridged networks make the machine appear as another physical device on + # your network. + # config.vm.network "public_network" + + # Share an additional folder to the guest VM. The first argument is + # the path on the host to the actual folder. The second argument is + # the path on the guest to mount the folder. And the optional third + # argument is a set of non-required options. + #config.vm.synced_folder "../../..", "/home/vagrant/project" + config.vm.synced_folder ".", "/vagrant", id: "vagrant-root", disabled: true + + + # Provider-specific configuration so you can fine-tune various + # backing providers for Vagrant. These expose provider-specific options. + # Example for VirtualBox: + # + # config.vm.provider "virtualbox" do |vb| + # # Display the VirtualBox GUI when booting the machine + # vb.gui = true + # + # # Customize the amount of memory on the VM: + # vb.memory = "1024" + # end + # + # View the documentation for the provider you are using for more + # information on available options. + + # Enable provisioning with a shell script. Additional provisioners such as + # Ansible, Chef, Docker, Puppet and Salt are also available. Please see the + # documentation for more information about their specific syntax and use. + # note: clang installed by default + config.vm.provision "env", type: "shell", inline:<<-SHELL + set -x + pkg update -f + pkg install -y git cmake + pkg install -y swig python39 py39-wheel py39-pip py39-pytest-virtualenv + pkg install -y py39-numpy py39-pandas py39-matplotlib + SHELL + + config.vm.provision "file", source: "../../../../CMakeLists.txt", destination: "$HOME/project/" + config.vm.provision "file", source: "../../../../cmake", destination: "$HOME/project/" + config.vm.provision "file", source: "../../../../ortools", destination: "$HOME/project/" + config.vm.provision "file", source: "../../../../examples/contrib", destination: "$HOME/project/examples/" + config.vm.provision "file", source: "../../../../examples/cpp", destination: "$HOME/project/examples/" + config.vm.provision "file", source: "../../../../examples/dotnet", destination: "$HOME/project/examples/" + config.vm.provision "file", source: "../../../../examples/java", destination: "$HOME/project/examples/" + config.vm.provision "file", source: "../../../../examples/python", destination: "$HOME/project/examples/" + config.vm.provision "file", source: "../../../../examples/tests", destination: "$HOME/project/examples/" + config.vm.provision "file", source: "../../../../patches", destination: "$HOME/project/" + config.vm.provision "file", source: "../../../../LICENSE", destination: "$HOME/project/" + config.vm.provision "file", source: "../../../../Version.txt", destination: "$HOME/project/" + + config.vm.provision "devel", type: "shell", inline:<<-SHELL + set -x + export PATH=${HOME}/.local/bin:"$PATH" + cd project + ls + SHELL + + config.vm.provision "configure", type: "shell", inline:<<-SHELL + set -x + export PATH=${HOME}/.local/bin:"$PATH" + cd project + cmake -S. -Bbuild -DBUILD_PYTHON=ON -DVENV_USE_SYSTEM_SITE_PACKAGES=ON -DBUILD_CXX_SAMPLES=OFF -DBUILD_CXX_EXAMPLES=OFF + SHELL + + config.vm.provision "build", type: "shell", inline:<<-SHELL + set -x + export PATH=${HOME}/.local/bin:"$PATH" + cd project + cmake --build build -v + SHELL + + config.vm.provision "test", type: "shell", inline:<<-SHELL + set -x + export PATH=${HOME}/.local/bin:"$PATH" + cd project + cmake --build build --target test -v + SHELL +end diff --git a/ortools/base/sysinfo.cc b/ortools/base/sysinfo.cc index bb019f1a4c..4418c3dc0e 100644 --- a/ortools/base/sysinfo.cc +++ b/ortools/base/sysinfo.cc @@ -17,7 +17,7 @@ #if defined(__APPLE__) && defined(__GNUC__) // MacOS #include #include -#elif (defined(__FreeBSD__) || defined(__OpenBSD__)) // FreeBSD or OpenBSD +#elif (defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__)) // [Free,Net,Open]BSD #include #include // Windows @@ -48,7 +48,7 @@ int64_t GetProcessMemoryUsage() { int64_t resident_memory = t_info.resident_size; return resident_memory; } -#elif defined(__GNUC__) && !defined(__FreeBSD__) && !defined(__OpenBSD__) && \ +#elif defined(__GNUC__) && !defined(__FreeBSD__) && !defined(__NetBSD__) && !defined(__OpenBSD__) && \ !defined(__EMSCRIPTEN__) && !defined(_WIN32) // Linux int64_t GetProcessMemoryUsage() { unsigned size = 0; @@ -61,7 +61,7 @@ int64_t GetProcessMemoryUsage() { fclose(pf); return int64_t{1024} * size; } -#elif (defined(__FreeBSD__) || defined(__OpenBSD__)) // FreeBSD or OpenBSD +#elif (defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__)) // [Free,Net,Open]BSD int64_t GetProcessMemoryUsage() { int who = RUSAGE_SELF; struct rusage rusage; diff --git a/ortools/python/setup.py.in b/ortools/python/setup.py.in index d750714e21..2ffbce0675 100644 --- a/ortools/python/setup.py.in +++ b/ortools/python/setup.py.in @@ -149,6 +149,7 @@ setup( 'Operating System :: Unix', 'Operating System :: POSIX :: Linux', 'Operating System :: POSIX :: BSD :: FreeBSD', + 'Operating System :: POSIX :: BSD :: NetBSD', 'Operating System :: POSIX :: BSD :: OpenBSD', 'Operating System :: MacOS', 'Operating System :: MacOS :: MacOS X', diff --git a/ortools/util/fp_utils.h b/ortools/util/fp_utils.h index 34e0a0d8e7..88e962e1e1 100644 --- a/ortools/util/fp_utils.h +++ b/ortools/util/fp_utils.h @@ -92,10 +92,16 @@ class ScopedFloatingPointEnv { fenv_.__control &= ~excepts; #elif (defined(__FreeBSD__) || defined(__OpenBSD__)) fenv_.__x87.__control &= ~excepts; +#elif defined(__NetBSD__) + fenv_.x87.control &= ~excepts; #else // Linux fenv_.__control_word &= ~excepts; #endif +#if defined(__NetBSD__) + fenv_.mxcsr &= ~(excepts << 7); +#else fenv_.__mxcsr &= ~(excepts << 7); +#endif CHECK_EQ(0, fesetenv(&fenv_)); #endif } diff --git a/ortools/util/zvector.h b/ortools/util/zvector.h index ec21520c70..e5e51f71ca 100644 --- a/ortools/util/zvector.h +++ b/ortools/util/zvector.h @@ -14,7 +14,8 @@ #ifndef OR_TOOLS_UTIL_ZVECTOR_H_ #define OR_TOOLS_UTIL_ZVECTOR_H_ -#if (defined(__APPLE__) || defined(__FreeBSD__) || defined(__OpenBSD__)) && \ +#if (defined(__APPLE__) || \ + defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__)) && \ defined(__GNUC__) #include #elif !defined(_MSC_VER) && !defined(__MINGW32__) && !defined(__MINGW64__) From 129038009babb9256d12b8e7d188822ad047dc21 Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Fri, 27 Sep 2024 17:04:19 +0200 Subject: [PATCH 030/105] cmake: Add openBSD vagrant based CI --- cmake/Makefile | 2 + cmake/vagrant/openbsd/cpp/Vagrantfile | 115 +++++++++++++++++++++ cmake/vagrant/openbsd/dotnet/Vagrantfile | 118 ++++++++++++++++++++++ cmake/vagrant/openbsd/java/Vagrantfile | 119 ++++++++++++++++++++++ cmake/vagrant/openbsd/python/Vagrantfile | 122 +++++++++++++++++++++++ 5 files changed, 476 insertions(+) create mode 100644 cmake/vagrant/openbsd/cpp/Vagrantfile create mode 100644 cmake/vagrant/openbsd/dotnet/Vagrantfile create mode 100644 cmake/vagrant/openbsd/java/Vagrantfile create mode 100644 cmake/vagrant/openbsd/python/Vagrantfile diff --git a/cmake/Makefile b/cmake/Makefile index 6d6dfa5fc8..a2e09550c2 100644 --- a/cmake/Makefile +++ b/cmake/Makefile @@ -145,6 +145,7 @@ help: @echo -e "\tWith ${BOLD}${RESET}:" @echo -e "\t\t${BOLD}freebsd${RESET} (FreeBSD 14)" @echo -e "\t\t${BOLD}netbsd${RESET} (NetBSD 9)" + @echo -e "\t\t${BOLD}openbsd${RESET} (OpenBSD 7)" @echo -e "\te.g. 'make freebsd_cpp'" @echo @echo -e "\t${BOLD}glop_${RESET}: Build Glop using an Ubuntu:rolling docker image." @@ -721,6 +722,7 @@ clean_web: $(addprefix clean_web_, $(WEB_STAGES)) VAGRANT_VMS := \ freebsd \ netbsd \ + openbsd define make-vagrant-target = #$$(info VMS: $1) diff --git a/cmake/vagrant/openbsd/cpp/Vagrantfile b/cmake/vagrant/openbsd/cpp/Vagrantfile new file mode 100644 index 0000000000..85a99d367b --- /dev/null +++ b/cmake/vagrant/openbsd/cpp/Vagrantfile @@ -0,0 +1,115 @@ +# -*- mode: ruby -*- +# vi: set ft=ruby : + +# All Vagrant configuration is done below. The "2" in Vagrant.configure +# configures the configuration version (we support older styles for +# backwards compatibility). Please don't change it unless you know what +# you're doing. +Vagrant.configure("2") do |config| + # The most common configuration options are documented and commented below. + # For a complete reference, please see the online documentation at + # https://docs.vagrantup.com. + + # Every Vagrant development environment requires a box. You can search for + # boxes at https://vagrantcloud.com/search. + config.vm.guest = :openbsd + config.vm.box = "generic/openbsd7" + config.vm.provider "virtualbox" do |v| + v.name = "ortools_openbsd_cpp" + end + config.ssh.shell = "sh" + + # Disable automatic box update checking. If you disable this, then + # boxes will only be checked for updates when the user runs + # `vagrant box outdated`. This is not recommended. + # config.vm.box_check_update = false + + # Create a forwarded port mapping which allows access to a specific port + # within the machine from a port on the host machine. In the example below, + # accessing "localhost:8080" will access port 80 on the guest machine. + # NOTE: This will enable public access to the opened port + # config.vm.network "forwarded_port", guest: 80, host: 8080 + + # Create a forwarded port mapping which allows access to a specific port + # within the machine from a port on the host machine and only allow access + # via 127.0.0.1 to disable public access + # config.vm.network "forwarded_port", guest: 80, host: 8080, host_ip: "127.0.0.1" + + # Create a private network, which allows host-only access to the machine + # using a specific IP. + # config.vm.network "private_network", ip: "192.168.33.10" + + # Create a public network, which generally matched to bridged network. + # Bridged networks make the machine appear as another physical device on + # your network. + # config.vm.network "public_network" + + # Share an additional folder to the guest VM. The first argument is + # the path on the host to the actual folder. The second argument is + # the path on the guest to mount the folder. And the optional third + # argument is a set of non-required options. + #config.vm.synced_folder "../../..", "/home/vagrant/project" + config.vm.synced_folder ".", "/vagrant", id: "vagrant-root", disabled: true + + + # Provider-specific configuration so you can fine-tune various + # backing providers for Vagrant. These expose provider-specific options. + # Example for VirtualBox: + # + # config.vm.provider "virtualbox" do |vb| + # # Display the VirtualBox GUI when booting the machine + # vb.gui = true + # + # # Customize the amount of memory on the VM: + # vb.memory = "1024" + # end + # + # View the documentation for the provider you are using for more + # information on available options. + + # Enable provisioning with a shell script. Additional provisioners such as + # Ansible, Chef, Docker, Puppet and Salt are also available. Please see the + # documentation for more information about their specific syntax and use. + # note: clang installed by default + config.vm.provision "env", type: "shell", inline:<<-SHELL + set -x + pkg update -f + pkg install -y git cmake + SHELL + + config.vm.provision "file", source: "../../../../CMakeLists.txt", destination: "$HOME/project/" + config.vm.provision "file", source: "../../../../cmake", destination: "$HOME/project/" + config.vm.provision "file", source: "../../../../ortools", destination: "$HOME/project/" + config.vm.provision "file", source: "../../../../examples/contrib", destination: "$HOME/project/examples/" + config.vm.provision "file", source: "../../../../examples/cpp", destination: "$HOME/project/examples/" + config.vm.provision "file", source: "../../../../examples/dotnet", destination: "$HOME/project/examples/" + config.vm.provision "file", source: "../../../../examples/java", destination: "$HOME/project/examples/" + config.vm.provision "file", source: "../../../../examples/python", destination: "$HOME/project/examples/" + config.vm.provision "file", source: "../../../../examples/tests", destination: "$HOME/project/examples/" + config.vm.provision "file", source: "../../../../patches", destination: "$HOME/project/" + config.vm.provision "file", source: "../../../../Version.txt", destination: "$HOME/project/" + + config.vm.provision "devel", type: "shell", inline:<<-SHELL + set -x + cd project + ls + SHELL + + config.vm.provision "configure", type: "shell", inline:<<-SHELL + set -x + cd project + cmake -S. -Bbuild -DBUILD_DEPS=ON + SHELL + + config.vm.provision "build", type: "shell", inline:<<-SHELL + set -x + cd project + cmake --build build -v + SHELL + + config.vm.provision "test", type: "shell", inline:<<-SHELL + set -x + cd project + cmake --build build --target test -v + SHELL +end diff --git a/cmake/vagrant/openbsd/dotnet/Vagrantfile b/cmake/vagrant/openbsd/dotnet/Vagrantfile new file mode 100644 index 0000000000..b129bca007 --- /dev/null +++ b/cmake/vagrant/openbsd/dotnet/Vagrantfile @@ -0,0 +1,118 @@ +# -*- mode: ruby -*- +# vi: set ft=ruby : + +# All Vagrant configuration is done below. The "2" in Vagrant.configure +# configures the configuration version (we support older styles for +# backwards compatibility). Please don't change it unless you know what +# you're doing. +Vagrant.configure("2") do |config| + # The most common configuration options are documented and commented below. + # For a complete reference, please see the online documentation at + # https://docs.vagrantup.com. + + # Every Vagrant development environment requires a box. You can search for + # boxes at https://vagrantcloud.com/search. + config.vm.guest = :openbsd + config.vm.box = "generic/openbsd7" + config.vm.provider "virtualbox" do |v| + v.name = "ortools_openbsd_dotnet" + end + config.ssh.shell = "sh" + + # Disable automatic box update checking. If you disable this, then + # boxes will only be checked for updates when the user runs + # `vagrant box outdated`. This is not recommended. + # config.vm.box_check_update = false + + # Create a forwarded port mapping which allows access to a specific port + # within the machine from a port on the host machine. In the example below, + # accessing "localhost:8080" will access port 80 on the guest machine. + # NOTE: This will enable public access to the opened port + # config.vm.network "forwarded_port", guest: 80, host: 8080 + + # Create a forwarded port mapping which allows access to a specific port + # within the machine from a port on the host machine and only allow access + # via 127.0.0.1 to disable public access + # config.vm.network "forwarded_port", guest: 80, host: 8080, host_ip: "127.0.0.1" + + # Create a private network, which allows host-only access to the machine + # using a specific IP. + # config.vm.network "private_network", ip: "192.168.33.10" + + # Create a public network, which generally matched to bridged network. + # Bridged networks make the machine appear as another physical device on + # your network. + # config.vm.network "public_network" + + # Share an additional folder to the guest VM. The first argument is + # the path on the host to the actual folder. The second argument is + # the path on the guest to mount the folder. And the optional third + # argument is a set of non-required options. + #config.vm.synced_folder "../../..", "/home/vagrant/project" + config.vm.synced_folder ".", "/vagrant", id: "vagrant-root", disabled: true + + + # Provider-specific configuration so you can fine-tune various + # backing providers for Vagrant. These expose provider-specific options. + # Example for VirtualBox: + # + # config.vm.provider "virtualbox" do |vb| + # # Display the VirtualBox GUI when booting the machine + # vb.gui = true + # + # # Customize the amount of memory on the VM: + # vb.memory = "1024" + # end + # + # View the documentation for the provider you are using for more + # information on available options. + + # Enable provisioning with a shell script. Additional provisioners such as + # Ansible, Chef, Docker, Puppet and Salt are also available. Please see the + # documentation for more information about their specific syntax and use. + # note: clang installed by default + config.vm.provision "env", type: "shell", inline:<<-SHELL + set -x + pkg update -f + pkg install -y git cmake + kldload linux64 + pkg install -y swig linux-dotnet-sdk + SHELL + + config.vm.provision "file", source: "../../../../CMakeLists.txt", destination: "$HOME/project/" + config.vm.provision "file", source: "../../../../cmake", destination: "$HOME/project/" + config.vm.provision "file", source: "../../../../ortools", destination: "$HOME/project/" + config.vm.provision "file", source: "../../../../examples/contrib", destination: "$HOME/project/examples/" + config.vm.provision "file", source: "../../../../examples/cpp", destination: "$HOME/project/examples/" + config.vm.provision "file", source: "../../../../examples/dotnet", destination: "$HOME/project/examples/" + config.vm.provision "file", source: "../../../../examples/java", destination: "$HOME/project/examples/" + config.vm.provision "file", source: "../../../../examples/python", destination: "$HOME/project/examples/" + config.vm.provision "file", source: "../../../../examples/tests", destination: "$HOME/project/examples/" + config.vm.provision "file", source: "../../../../patches", destination: "$HOME/project/" + config.vm.provision "file", source: "../../../../Version.txt", destination: "$HOME/project/" + config.vm.provision "file", source: "../../../../tools/doc/orLogo.png", destination: "$HOME/project/tools/doc/" + + config.vm.provision "devel", type: "shell", inline:<<-SHELL + set -x + cd project + ls + SHELL + + config.vm.provision "configure", type: "shell", inline:<<-SHELL + set -x + cd project + cmake -S. -Bbuild -DBUILD_DOTNET=ON -DBUILD_CXX_SAMPLES=OFF -DBUILD_CXX_EXAMPLES=OFF + SHELL + + config.vm.provision "build", type: "shell", inline:<<-SHELL + set -x + cd project + cmake --build build -v + SHELL + + config.vm.provision "test", type: "shell", inline:<<-SHELL + set -x + cd project + cmake --build build --target test -v + SHELL +end diff --git a/cmake/vagrant/openbsd/java/Vagrantfile b/cmake/vagrant/openbsd/java/Vagrantfile new file mode 100644 index 0000000000..c029867471 --- /dev/null +++ b/cmake/vagrant/openbsd/java/Vagrantfile @@ -0,0 +1,119 @@ +# -*- mode: ruby -*- +# vi: set ft=ruby : + +# All Vagrant configuration is done below. The "2" in Vagrant.configure +# configures the configuration version (we support older styles for +# backwards compatibility). Please don't change it unless you know what +# you're doing. +Vagrant.configure("2") do |config| + # The most common configuration options are documented and commented below. + # For a complete reference, please see the online documentation at + # https://docs.vagrantup.com. + + # Every Vagrant development environment requires a box. You can search for + # boxes at https://vagrantcloud.com/search. + config.vm.guest = :openbsd + config.vm.box = "generic/openbsd7" + config.vm.provider "virtualbox" do |v| + v.name = "ortools_openbsd_java" + end + config.ssh.shell = "sh" + + # Disable automatic box update checking. If you disable this, then + # boxes will only be checked for updates when the user runs + # `vagrant box outdated`. This is not recommended. + # config.vm.box_check_update = false + + # Create a forwarded port mapping which allows access to a specific port + # within the machine from a port on the host machine. In the example below, + # accessing "localhost:8080" will access port 80 on the guest machine. + # NOTE: This will enable public access to the opened port + # config.vm.network "forwarded_port", guest: 80, host: 8080 + + # Create a forwarded port mapping which allows access to a specific port + # within the machine from a port on the host machine and only allow access + # via 127.0.0.1 to disable public access + # config.vm.network "forwarded_port", guest: 80, host: 8080, host_ip: "127.0.0.1" + + # Create a private network, which allows host-only access to the machine + # using a specific IP. + # config.vm.network "private_network", ip: "192.168.33.10" + + # Create a public network, which generally matched to bridged network. + # Bridged networks make the machine appear as another physical device on + # your network. + # config.vm.network "public_network" + + # Share an additional folder to the guest VM. The first argument is + # the path on the host to the actual folder. The second argument is + # the path on the guest to mount the folder. And the optional third + # argument is a set of non-required options. + #config.vm.synced_folder "../../..", "/home/vagrant/project" + config.vm.synced_folder ".", "/vagrant", id: "vagrant-root", disabled: true + + + # Provider-specific configuration so you can fine-tune various + # backing providers for Vagrant. These expose provider-specific options. + # Example for VirtualBox: + # + # config.vm.provider "virtualbox" do |vb| + # # Display the VirtualBox GUI when booting the machine + # vb.gui = true + # + # # Customize the amount of memory on the VM: + # vb.memory = "1024" + # end + # + # View the documentation for the provider you are using for more + # information on available options. + + # Enable provisioning with a shell script. Additional provisioners such as + # Ansible, Chef, Docker, Puppet and Salt are also available. Please see the + # documentation for more information about their specific syntax and use. + # note: clang installed by default + config.vm.provision "env", type: "shell", inline:<<-SHELL + set -x + pkg update -f + pkg install -y git cmake + pkg install -y swig openjdk11 maven + mount -t fdescfs fdesc /dev/fd + mount -t procfs proc /proc + SHELL + + config.vm.provision "file", source: "../../../../CMakeLists.txt", destination: "$HOME/project/" + config.vm.provision "file", source: "../../../../cmake", destination: "$HOME/project/" + config.vm.provision "file", source: "../../../../ortools", destination: "$HOME/project/" + config.vm.provision "file", source: "../../../../examples/contrib", destination: "$HOME/project/examples/" + config.vm.provision "file", source: "../../../../examples/cpp", destination: "$HOME/project/examples/" + config.vm.provision "file", source: "../../../../examples/dotnet", destination: "$HOME/project/examples/" + config.vm.provision "file", source: "../../../../examples/java", destination: "$HOME/project/examples/" + config.vm.provision "file", source: "../../../../examples/python", destination: "$HOME/project/examples/" + config.vm.provision "file", source: "../../../../examples/tests", destination: "$HOME/project/examples/" + config.vm.provision "file", source: "../../../../patches", destination: "$HOME/project/" + config.vm.provision "file", source: "../../../../Version.txt", destination: "$HOME/project/" + + config.vm.provision "devel", type: "shell", inline:<<-SHELL + set -x + cd project + ls + SHELL + + config.vm.provision "configure", type: "shell", inline:<<-SHELL + set -x + cd project + export JAVA_HOME=/usr/local/openjdk11 + cmake -S. -Bbuild -DBUILD_JAVA=ON -DBUILD_CXX_SAMPLES=OFF -DBUILD_CXX_EXAMPLES=OFF + SHELL + + config.vm.provision "build", type: "shell", inline:<<-SHELL + set -x + cd project + cmake --build build -v + SHELL + + config.vm.provision "test", type: "shell", inline:<<-SHELL + set -x + cd project + cmake --build build --target test -v + SHELL +end diff --git a/cmake/vagrant/openbsd/python/Vagrantfile b/cmake/vagrant/openbsd/python/Vagrantfile new file mode 100644 index 0000000000..d2083458d3 --- /dev/null +++ b/cmake/vagrant/openbsd/python/Vagrantfile @@ -0,0 +1,122 @@ +# -*- mode: ruby -*- +# vi: set ft=ruby : + +# All Vagrant configuration is done below. The "2" in Vagrant.configure +# configures the configuration version (we support older styles for +# backwards compatibility). Please don't change it unless you know what +# you're doing. +Vagrant.configure("2") do |config| + # The most common configuration options are documented and commented below. + # For a complete reference, please see the online documentation at + # https://docs.vagrantup.com. + + # Every Vagrant development environment requires a box. You can search for + # boxes at https://vagrantcloud.com/search. + config.vm.guest = :openbsd + config.vm.box = "generic/openbsd7" + config.vm.provider "virtualbox" do |v| + v.name = "ortools_openbsd_python" + end + config.ssh.shell = "sh" + + # Disable automatic box update checking. If you disable this, then + # boxes will only be checked for updates when the user runs + # `vagrant box outdated`. This is not recommended. + # config.vm.box_check_update = false + + # Create a forwarded port mapping which allows access to a specific port + # within the machine from a port on the host machine. In the example below, + # accessing "localhost:8080" will access port 80 on the guest machine. + # NOTE: This will enable public access to the opened port + # config.vm.network "forwarded_port", guest: 80, host: 8080 + + # Create a forwarded port mapping which allows access to a specific port + # within the machine from a port on the host machine and only allow access + # via 127.0.0.1 to disable public access + # config.vm.network "forwarded_port", guest: 80, host: 8080, host_ip: "127.0.0.1" + + # Create a private network, which allows host-only access to the machine + # using a specific IP. + # config.vm.network "private_network", ip: "192.168.33.10" + + # Create a public network, which generally matched to bridged network. + # Bridged networks make the machine appear as another physical device on + # your network. + # config.vm.network "public_network" + + # Share an additional folder to the guest VM. The first argument is + # the path on the host to the actual folder. The second argument is + # the path on the guest to mount the folder. And the optional third + # argument is a set of non-required options. + #config.vm.synced_folder "../../..", "/home/vagrant/project" + config.vm.synced_folder ".", "/vagrant", id: "vagrant-root", disabled: true + + + # Provider-specific configuration so you can fine-tune various + # backing providers for Vagrant. These expose provider-specific options. + # Example for VirtualBox: + # + # config.vm.provider "virtualbox" do |vb| + # # Display the VirtualBox GUI when booting the machine + # vb.gui = true + # + # # Customize the amount of memory on the VM: + # vb.memory = "1024" + # end + # + # View the documentation for the provider you are using for more + # information on available options. + + # Enable provisioning with a shell script. Additional provisioners such as + # Ansible, Chef, Docker, Puppet and Salt are also available. Please see the + # documentation for more information about their specific syntax and use. + # note: clang installed by default + config.vm.provision "env", type: "shell", inline:<<-SHELL + set -x + pkg update -f + pkg install -y git cmake + pkg install -y swig python39 py39-wheel py39-pip py39-pytest-virtualenv + pkg install -y py39-numpy py39-pandas py39-matplotlib + SHELL + + config.vm.provision "file", source: "../../../../CMakeLists.txt", destination: "$HOME/project/" + config.vm.provision "file", source: "../../../../cmake", destination: "$HOME/project/" + config.vm.provision "file", source: "../../../../ortools", destination: "$HOME/project/" + config.vm.provision "file", source: "../../../../examples/contrib", destination: "$HOME/project/examples/" + config.vm.provision "file", source: "../../../../examples/cpp", destination: "$HOME/project/examples/" + config.vm.provision "file", source: "../../../../examples/dotnet", destination: "$HOME/project/examples/" + config.vm.provision "file", source: "../../../../examples/java", destination: "$HOME/project/examples/" + config.vm.provision "file", source: "../../../../examples/python", destination: "$HOME/project/examples/" + config.vm.provision "file", source: "../../../../examples/tests", destination: "$HOME/project/examples/" + config.vm.provision "file", source: "../../../../patches", destination: "$HOME/project/" + config.vm.provision "file", source: "../../../../LICENSE", destination: "$HOME/project/" + config.vm.provision "file", source: "../../../../Version.txt", destination: "$HOME/project/" + + config.vm.provision "devel", type: "shell", inline:<<-SHELL + set -x + export PATH=${HOME}/.local/bin:"$PATH" + cd project + ls + SHELL + + config.vm.provision "configure", type: "shell", inline:<<-SHELL + set -x + export PATH=${HOME}/.local/bin:"$PATH" + cd project + cmake -S. -Bbuild -DBUILD_PYTHON=ON -DVENV_USE_SYSTEM_SITE_PACKAGES=ON -DBUILD_CXX_SAMPLES=OFF -DBUILD_CXX_EXAMPLES=OFF + SHELL + + config.vm.provision "build", type: "shell", inline:<<-SHELL + set -x + export PATH=${HOME}/.local/bin:"$PATH" + cd project + cmake --build build -v + SHELL + + config.vm.provision "test", type: "shell", inline:<<-SHELL + set -x + export PATH=${HOME}/.local/bin:"$PATH" + cd project + cmake --build build --target test -v + SHELL +end From df4b32b4a46277f6e100f2e417cbb599e30b4436 Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Mon, 30 Sep 2024 11:17:31 +0200 Subject: [PATCH 031/105] sat: Fix golang doc --- ortools/sat/docs/README.md | 2 +- ortools/sat/docs/boolean_logic.md | 12 ++++++------ ortools/sat/docs/channeling.md | 8 ++++---- ortools/sat/docs/integer_arithmetic.md | 14 +++++++------- ortools/sat/docs/model.md | 2 +- ortools/sat/docs/scheduling.md | 8 ++++---- ortools/sat/docs/solver.md | 18 +++++++++--------- 7 files changed, 32 insertions(+), 32 deletions(-) diff --git a/ortools/sat/docs/README.md b/ortools/sat/docs/README.md index 82df9dec24..d5f6111098 100644 --- a/ortools/sat/docs/README.md +++ b/ortools/sat/docs/README.md @@ -227,8 +227,8 @@ import ( "fmt" log "github.com/golang/glog" + "github.com/google/or-tools/ortools/sat/go/cpmodel" cmpb "github.com/google/or-tools/ortools/sat/proto/cpmodel" - "ortools/sat/go/cpmodel" ) func simpleSatProgram() error { diff --git a/ortools/sat/docs/boolean_logic.md b/ortools/sat/docs/boolean_logic.md index db47873d84..bef7d6a775 100644 --- a/ortools/sat/docs/boolean_logic.md +++ b/ortools/sat/docs/boolean_logic.md @@ -114,7 +114,7 @@ package main import ( log "github.com/golang/glog" - "ortools/sat/go/cpmodel" + "github.com/google/or-tools/ortools/sat/go/cpmodel" ) func literalSampleSat() { @@ -248,7 +248,7 @@ public class BoolOrSampleSat package main import ( - "ortools/sat/go/cpmodel" + "github.com/google/or-tools/ortools/sat/go/cpmodel" ) func boolOrSampleSat() { @@ -434,7 +434,7 @@ public class ReifiedSampleSat package main import ( - "ortools/sat/go/cpmodel" + "github.com/google/or-tools/ortools/sat/go/cpmodel" ) func reifiedSampleSat() { @@ -526,9 +526,9 @@ import ( "fmt" log "github.com/golang/glog" + "github.com/google/or-tools/ortools/sat/go/cpmodel" sppb "github.com/google/or-tools/ortools/sat/proto/satparameters" "google.golang.org/protobuf/proto" - "ortools/sat/go/cpmodel" ) func booleanProductSample() error { @@ -552,11 +552,11 @@ func booleanProductSample() error { } // Set `fill_additional_solutions_in_response` and `enumerate_all_solutions` to true so // the solver returns all solutions found. - params := sppb.SatParameters_builder{ + params := &sppb.SatParameters{ FillAdditionalSolutionsInResponse: proto.Bool(true), EnumerateAllSolutions: proto.Bool(true), SolutionPoolSize: proto.Int32(4), - }.Build() + } response, err := cpmodel.SolveCpModelWithParameters(m, params) if err != nil { return fmt.Errorf("failed to solve the model: %w", err) diff --git a/ortools/sat/docs/channeling.md b/ortools/sat/docs/channeling.md index f4811905ff..6feed79c20 100644 --- a/ortools/sat/docs/channeling.md +++ b/ortools/sat/docs/channeling.md @@ -309,10 +309,10 @@ import ( "fmt" log "github.com/golang/glog" + "github.com/google/or-tools/ortools/sat/go/cpmodel" cmpb "github.com/google/or-tools/ortools/sat/proto/cpmodel" sppb "github.com/google/or-tools/ortools/sat/proto/satparameters" "google.golang.org/protobuf/proto" - "ortools/sat/go/cpmodel" ) func channelingSampleSat() error { @@ -344,12 +344,12 @@ func channelingSampleSat() error { if err != nil { return fmt.Errorf("failed to instantiate the CP model: %w", err) } - params := sppb.SatParameters_builder{ + params := &sppb.SatParameters{ FillAdditionalSolutionsInResponse: proto.Bool(true), EnumerateAllSolutions: proto.Bool(true), SolutionPoolSize: proto.Int32(11), SearchBranching: sppb.SatParameters_FIXED_SEARCH.Enum(), - }.Build() + } response, err := cpmodel.SolveCpModelWithParameters(m, params) if err != nil { return fmt.Errorf("failed to solve the model: %w", err) @@ -896,7 +896,7 @@ import ( "fmt" log "github.com/golang/glog" - "ortools/sat/go/cpmodel" + "github.com/google/or-tools/ortools/sat/go/cpmodel" ) const ( diff --git a/ortools/sat/docs/integer_arithmetic.md b/ortools/sat/docs/integer_arithmetic.md index 1a7e0e3aac..7fac0f7d1e 100644 --- a/ortools/sat/docs/integer_arithmetic.md +++ b/ortools/sat/docs/integer_arithmetic.md @@ -276,8 +276,8 @@ import ( "fmt" log "github.com/golang/glog" + "github.com/google/or-tools/ortools/sat/go/cpmodel" cmpb "github.com/google/or-tools/ortools/sat/proto/cpmodel" - "ortools/sat/go/cpmodel" ) const numAnimals = 20 @@ -676,10 +676,10 @@ import ( "fmt" log "github.com/golang/glog" + "github.com/google/or-tools/ortools/sat/go/cpmodel" cmpb "github.com/google/or-tools/ortools/sat/proto/cpmodel" sppb "github.com/google/or-tools/ortools/sat/proto/satparameters" "google.golang.org/protobuf/proto" - "ortools/sat/go/cpmodel" ) const ( @@ -719,12 +719,12 @@ func earlinessTardinessCostSampleSat() error { if err != nil { return fmt.Errorf("failed to instantiate the CP model: %w", err) } - params := sppb.SatParameters_builder{ + params := &sppb.SatParameters{ FillAdditionalSolutionsInResponse: proto.Bool(true), EnumerateAllSolutions: proto.Bool(true), SolutionPoolSize: proto.Int32(21), SearchBranching: sppb.SatParameters_FIXED_SEARCH.Enum(), - }.Build() + } response, err := cpmodel.SolveCpModelWithParameters(m, params) if err != nil { return fmt.Errorf("failed to solve the model: %w", err) @@ -1132,10 +1132,10 @@ import ( "fmt" log "github.com/golang/glog" + "github.com/google/or-tools/ortools/sat/go/cpmodel" cmpb "github.com/google/or-tools/ortools/sat/proto/cpmodel" sppb "github.com/google/or-tools/ortools/sat/proto/satparameters" "google.golang.org/protobuf/proto" - "ortools/sat/go/cpmodel" ) func stepFunctionSampleSat() error { @@ -1185,12 +1185,12 @@ func stepFunctionSampleSat() error { if err != nil { return fmt.Errorf("failed to instantiate the CP model: %w", err) } - params := sppb.SatParameters_builder{ + params := &sppb.SatParameters{ FillAdditionalSolutionsInResponse: proto.Bool(true), EnumerateAllSolutions: proto.Bool(true), SolutionPoolSize: proto.Int32(21), SearchBranching: sppb.SatParameters_FIXED_SEARCH.Enum(), - }.Build() + } response, err := cpmodel.SolveCpModelWithParameters(m, params) if err != nil { return fmt.Errorf("failed to solve the model: %w", err) diff --git a/ortools/sat/docs/model.md b/ortools/sat/docs/model.md index 2218611998..1bc3f3ce50 100644 --- a/ortools/sat/docs/model.md +++ b/ortools/sat/docs/model.md @@ -311,8 +311,8 @@ import ( "fmt" log "github.com/golang/glog" + "github.com/google/or-tools/ortools/sat/go/cpmodel" cmpb "github.com/google/or-tools/ortools/sat/proto/cpmodel" - "ortools/sat/go/cpmodel" ) func solutionHintingSampleSat() error { diff --git a/ortools/sat/docs/scheduling.md b/ortools/sat/docs/scheduling.md index 00aacda186..091ab8891f 100644 --- a/ortools/sat/docs/scheduling.md +++ b/ortools/sat/docs/scheduling.md @@ -196,7 +196,7 @@ import ( "fmt" log "github.com/golang/glog" - "ortools/sat/go/cpmodel" + "github.com/google/or-tools/ortools/sat/go/cpmodel" ) const horizon = 100 @@ -422,7 +422,7 @@ import ( "fmt" log "github.com/golang/glog" - "ortools/sat/go/cpmodel" + "github.com/google/or-tools/ortools/sat/go/cpmodel" ) const horizon = 100 @@ -842,8 +842,8 @@ import ( "fmt" log "github.com/golang/glog" + "github.com/google/or-tools/ortools/sat/go/cpmodel" cmpb "github.com/google/or-tools/ortools/sat/proto/cpmodel" - "ortools/sat/go/cpmodel" ) const horizon = 21 // 3 weeks @@ -1866,8 +1866,8 @@ import ( "fmt" log "github.com/golang/glog" + "github.com/google/or-tools/ortools/sat/go/cpmodel" cmpb "github.com/google/or-tools/ortools/sat/proto/cpmodel" - "ortools/sat/go/cpmodel" ) const ( diff --git a/ortools/sat/docs/solver.md b/ortools/sat/docs/solver.md index 6feee57dde..6bff2619c0 100644 --- a/ortools/sat/docs/solver.md +++ b/ortools/sat/docs/solver.md @@ -195,10 +195,10 @@ import ( "fmt" log "github.com/golang/glog" + "github.com/google/or-tools/ortools/sat/go/cpmodel" cmpb "github.com/google/or-tools/ortools/sat/proto/cpmodel" sppb "github.com/google/or-tools/ortools/sat/proto/satparameters" "google.golang.org/protobuf/proto" - "ortools/sat/go/cpmodel" ) func solveWithTimeLimitSampleSat() error { @@ -217,9 +217,9 @@ func solveWithTimeLimitSampleSat() error { } // Sets a time limit of 10 seconds. - params := sppb.SatParameters_builder{ + params := &sppb.SatParameters{ MaxTimeInSeconds: proto.Float64(10.0), - }.Build() + } // Solve. response, err := cpmodel.SolveCpModelWithParameters(m, params) @@ -536,9 +536,9 @@ import ( "fmt" log "github.com/golang/glog" + "github.com/google/or-tools/ortools/sat/go/cpmodel" sppb "github.com/google/or-tools/ortools/sat/proto/satparameters" "google.golang.org/protobuf/proto" - "ortools/sat/go/cpmodel" ) func solveAndPrintIntermediateSolutionsSampleSat() error { @@ -562,10 +562,10 @@ func solveAndPrintIntermediateSolutionsSampleSat() error { // Currently, the CpModelBuilder does not allow for callbacks, so intermediate solutions // cannot be printed while solving. However, the CP-SAT solver does allow for returning // the intermediate solutions found while solving in the response. - params := sppb.SatParameters_builder{ + params := &sppb.SatParameters{ FillAdditionalSolutionsInResponse: proto.Bool(true), SolutionPoolSize: proto.Int32(10), - }.Build() + } response, err := cpmodel.SolveCpModelWithParameters(m, params) if err != nil { return fmt.Errorf("failed to solve the model: %w", err) @@ -873,9 +873,9 @@ import ( "fmt" log "github.com/golang/glog" + "github.com/google/or-tools/ortools/sat/go/cpmodel" sppb "github.com/google/or-tools/ortools/sat/proto/satparameters" "google.golang.org/protobuf/proto" - "ortools/sat/go/cpmodel" ) func searchForAllSolutionsSampleSat() error { @@ -895,11 +895,11 @@ func searchForAllSolutionsSampleSat() error { // Currently, the CpModelBuilder does not allow for callbacks, so each feasible solution cannot // be printed while solving. However, the CP Solver can return all of the enumerated solutions // in the response by setting the following parameters. - params := sppb.SatParameters_builder{ + params := &sppb.SatParameters{ EnumerateAllSolutions: proto.Bool(true), FillAdditionalSolutionsInResponse: proto.Bool(true), SolutionPoolSize: proto.Int32(27), - }.Build() + } response, err := cpmodel.SolveCpModelWithParameters(m, params) if err != nil { return fmt.Errorf("failed to solve the model: %w", err) From b1c46facd631be24b2b586a4e2619e95634d4461 Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Mon, 30 Sep 2024 11:17:53 +0200 Subject: [PATCH 032/105] base: fix missing include in memutil.h --- ortools/base/memutil.h | 3 +++ 1 file changed, 3 insertions(+) diff --git a/ortools/base/memutil.h b/ortools/base/memutil.h index 1d6fd87bae..d2bdbab628 100644 --- a/ortools/base/memutil.h +++ b/ortools/base/memutil.h @@ -14,6 +14,9 @@ #ifndef OR_TOOLS_BASE_MEMUTIL_H_ #define OR_TOOLS_BASE_MEMUTIL_H_ +#include +#include + #include "absl/strings/internal/memutil.h" namespace strings { From e34c9ee4e3a3b8c1ca128e9b559e39f826bff491 Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Mon, 30 Sep 2024 11:18:08 +0200 Subject: [PATCH 033/105] pdlp: export from google3 --- ortools/pdlp/BUILD.bazel | 7 +-- ortools/pdlp/primal_dual_hybrid_gradient.cc | 3 +- ortools/pdlp/sharded_quadratic_program.cc | 20 ++++---- ortools/pdlp/sharded_quadratic_program.h | 14 +++--- ortools/pdlp/sharder.cc | 52 ++++++++++----------- ortools/pdlp/sharder.h | 18 +++---- ortools/pdlp/sharder_test.cc | 12 ++--- ortools/pdlp/solvers.proto | 5 ++ 8 files changed, 67 insertions(+), 64 deletions(-) diff --git a/ortools/pdlp/BUILD.bazel b/ortools/pdlp/BUILD.bazel index 0dcc5fc5f7..d1e2b01098 100644 --- a/ortools/pdlp/BUILD.bazel +++ b/ortools/pdlp/BUILD.bazel @@ -273,9 +273,10 @@ cc_library( hdrs = ["sharded_quadratic_program.h"], deps = [ ":quadratic_program", + ":scheduler", ":sharder", + ":solvers_cc_proto", "//ortools/base", - "//ortools/base:threadpool", "//ortools/util:logging", "@com_google_absl//absl/memory", "@com_google_absl//absl/strings", @@ -301,9 +302,9 @@ cc_library( srcs = ["sharder.cc"], hdrs = ["sharder.h"], deps = [ + ":scheduler", "//ortools/base", "//ortools/base:mathutil", - "//ortools/base:threadpool", "//ortools/base:timer", "@com_google_absl//absl/synchronization", "@com_google_absl//absl/time", @@ -317,10 +318,10 @@ cc_test( srcs = ["sharder_test.cc"], deps = [ ":gtest_main", + ":scheduler", ":sharder", "//ortools/base", "//ortools/base:mathutil", - "//ortools/base:threadpool", "@com_google_absl//absl/random:distributions", "@eigen//:eigen3", ], diff --git a/ortools/pdlp/primal_dual_hybrid_gradient.cc b/ortools/pdlp/primal_dual_hybrid_gradient.cc index 7b2a8013aa..28daaf173b 100644 --- a/ortools/pdlp/primal_dual_hybrid_gradient.cc +++ b/ortools/pdlp/primal_dual_hybrid_gradient.cc @@ -718,7 +718,8 @@ PreprocessSolver::PreprocessSolver(QuadraticProgram qp, : num_threads_( NumThreads(params.num_threads(), params.num_shards(), qp, *logger)), num_shards_(NumShards(num_threads_, params.num_shards())), - sharded_qp_(std::move(qp), num_threads_, num_shards_), + sharded_qp_(std::move(qp), num_threads_, num_shards_, + params.scheduler_type(), nullptr), logger_(*logger) {} SolverResult ErrorSolverResult(const TerminationReason reason, diff --git a/ortools/pdlp/sharded_quadratic_program.cc b/ortools/pdlp/sharded_quadratic_program.cc index d4f72b3106..d00198f735 100644 --- a/ortools/pdlp/sharded_quadratic_program.cc +++ b/ortools/pdlp/sharded_quadratic_program.cc @@ -14,6 +14,7 @@ #include "ortools/pdlp/sharded_quadratic_program.h" #include +#include #include #include #include @@ -23,9 +24,10 @@ #include "absl/log/check.h" #include "absl/strings/string_view.h" #include "ortools/base/logging.h" -#include "ortools/base/threadpool.h" #include "ortools/pdlp/quadratic_program.h" +#include "ortools/pdlp/scheduler.h" #include "ortools/pdlp/sharder.h" +#include "ortools/pdlp/solvers.pb.h" #include "ortools/util/logging.h" namespace operations_research::pdlp { @@ -76,24 +78,22 @@ void WarnIfMatrixUnbalanced( ShardedQuadraticProgram::ShardedQuadraticProgram( QuadraticProgram qp, const int num_threads, const int num_shards, - operations_research::SolverLogger* logger) + SchedulerType scheduler_type, operations_research::SolverLogger* logger) : qp_(std::move(qp)), transposed_constraint_matrix_(qp_.constraint_matrix.transpose()), - thread_pool_(num_threads == 1 - ? nullptr - : std::make_unique("PDLP", num_threads)), + scheduler_(num_threads == 1 ? nullptr + : MakeScheduler(scheduler_type, num_threads)), constraint_matrix_sharder_(qp_.constraint_matrix, num_shards, - thread_pool_.get()), + scheduler_.get()), transposed_constraint_matrix_sharder_(transposed_constraint_matrix_, - num_shards, thread_pool_.get()), + num_shards, scheduler_.get()), primal_sharder_(qp_.variable_lower_bounds.size(), num_shards, - thread_pool_.get()), + scheduler_.get()), dual_sharder_(qp_.constraint_lower_bounds.size(), num_shards, - thread_pool_.get()) { + scheduler_.get()) { CHECK_GE(num_threads, 1); CHECK_GE(num_shards, num_threads); if (num_threads > 1) { - thread_pool_->StartWorkers(); const int64_t work_per_iteration = qp_.constraint_matrix.nonZeros() + qp_.variable_lower_bounds.size() + qp_.constraint_lower_bounds.size(); diff --git a/ortools/pdlp/sharded_quadratic_program.h b/ortools/pdlp/sharded_quadratic_program.h index 9a80ec5f3a..fa5c34a91e 100644 --- a/ortools/pdlp/sharded_quadratic_program.h +++ b/ortools/pdlp/sharded_quadratic_program.h @@ -17,13 +17,13 @@ #include #include #include -#include #include "Eigen/Core" #include "Eigen/SparseCore" -#include "ortools/base/threadpool.h" #include "ortools/pdlp/quadratic_program.h" +#include "ortools/pdlp/scheduler.h" #include "ortools/pdlp/sharder.h" +#include "ortools/pdlp/solvers.pb.h" #include "ortools/util/logging.h" namespace operations_research::pdlp { @@ -31,7 +31,7 @@ namespace operations_research::pdlp { // This class stores: // - A `QuadraticProgram` (QP) // - A transposed version of the QP's constraint matrix -// - A thread pool +// - A thread scheduler // - Various `Sharder` objects for doing sharded matrix and vector // computations. class ShardedQuadraticProgram { @@ -40,8 +40,10 @@ class ShardedQuadraticProgram { // Note that the `qp` is intentionally passed by value. // If `logger` is not nullptr, warns about unbalanced matrices using it; // otherwise warns via Google standard logging. - ShardedQuadraticProgram(QuadraticProgram qp, int num_threads, int num_shards, - operations_research::SolverLogger* logger = nullptr); + ShardedQuadraticProgram( + QuadraticProgram qp, int num_threads, int num_shards, + SchedulerType scheduler_type = SCHEDULER_TYPE_GOOGLE_THREADPOOL, + operations_research::SolverLogger* logger = nullptr); // Movable but not copyable. ShardedQuadraticProgram(const ShardedQuadraticProgram&) = delete; @@ -114,7 +116,7 @@ class ShardedQuadraticProgram { QuadraticProgram qp_; Eigen::SparseMatrix transposed_constraint_matrix_; - std::unique_ptr thread_pool_; + std::unique_ptr scheduler_; Sharder constraint_matrix_sharder_; Sharder transposed_constraint_matrix_sharder_; Sharder primal_sharder_; diff --git a/ortools/pdlp/sharder.cc b/ortools/pdlp/sharder.cc index 308b7e0bf5..2ff72ce80a 100644 --- a/ortools/pdlp/sharder.cc +++ b/ortools/pdlp/sharder.cc @@ -26,17 +26,17 @@ #include "absl/time/time.h" #include "ortools/base/logging.h" #include "ortools/base/mathutil.h" -#include "ortools/base/threadpool.h" #include "ortools/base/timer.h" +#include "ortools/pdlp/scheduler.h" namespace operations_research::pdlp { using ::Eigen::VectorXd; Sharder::Sharder(const int64_t num_elements, const int num_shards, - ThreadPool* const thread_pool, + Scheduler* const scheduler, const std::function& element_mass) - : thread_pool_(thread_pool) { + : scheduler_(scheduler) { CHECK_GE(num_elements, 0); if (num_elements == 0) { shard_starts_.push_back(0); @@ -70,8 +70,8 @@ Sharder::Sharder(const int64_t num_elements, const int num_shards, } Sharder::Sharder(const int64_t num_elements, const int num_shards, - ThreadPool* const thread_pool) - : thread_pool_(thread_pool) { + Scheduler* const scheduler) + : scheduler_(scheduler) { CHECK_GE(num_elements, 0); if (num_elements == 0) { shard_starts_.push_back(0); @@ -104,34 +104,30 @@ Sharder::Sharder(const Sharder& other_sharder, const int64_t num_elements) // The `std::max()` protects against `other_sharder.NumShards() == 0`, which // will happen if `other_sharder` had `num_elements == 0`. : Sharder(num_elements, std::max(1, other_sharder.NumShards()), - other_sharder.thread_pool_) {} + other_sharder.scheduler_) {} void Sharder::ParallelForEachShard( const std::function& func) const { - if (thread_pool_) { + if (scheduler_) { absl::BlockingCounter counter(NumShards()); VLOG(2) << "Starting ParallelForEachShard()"; - for (int shard_num = 0; shard_num < NumShards(); ++shard_num) { - thread_pool_->Schedule([&, shard_num]() { - WallTimer timer; - if (VLOG_IS_ON(2)) { - timer.Start(); - } - func(Shard(shard_num, this)); - if (VLOG_IS_ON(2)) { - timer.Stop(); - VLOG(2) << "Shard " << shard_num << " with " << ShardSize(shard_num) - << " elements and " << ShardMass(shard_num) - << " mass finished with " - << ShardMass(shard_num) / - std::max(int64_t{1}, absl::ToInt64Microseconds( - timer.GetDuration())) - << " mass/usec."; - } - counter.DecrementCount(); - }); - } - counter.Wait(); + scheduler_->ParallelFor(0, NumShards(), [&](int shard_num) { + WallTimer timer; + if (VLOG_IS_ON(2)) { + timer.Start(); + } + func(Shard(shard_num, this)); + if (VLOG_IS_ON(2)) { + timer.Stop(); + VLOG(2) << "Shard " << shard_num << " with " << ShardSize(shard_num) + << " elements and " << ShardMass(shard_num) + << " mass finished with " + << ShardMass(shard_num) / + std::max(int64_t{1}, + absl::ToInt64Microseconds(timer.GetDuration())) + << " mass/usec."; + } + }); VLOG(2) << "Done ParallelForEachShard()"; } else { for (int shard_num = 0; shard_num < NumShards(); ++shard_num) { diff --git a/ortools/pdlp/sharder.h b/ortools/pdlp/sharder.h index 2187be3795..877d543620 100644 --- a/ortools/pdlp/sharder.h +++ b/ortools/pdlp/sharder.h @@ -22,7 +22,7 @@ #include "Eigen/Core" #include "Eigen/SparseCore" #include "absl/log/check.h" -#include "ortools/base/threadpool.h" +#include "ortools/pdlp/scheduler.h" namespace operations_research::pdlp { @@ -141,26 +141,26 @@ class Sharder { // Creates a `Sharder` for problems with `num_elements` elements and mass of // each element given by `element_mass`. Each shard will have roughly the same // mass. The number of shards in the resulting `Sharder` will be approximately - // `num_shards` but may differ. The `thread_pool` will be used for parallel - // operations executed by e.g. `ParallelForEachShard()`. The `thread_pool` may + // `num_shards` but may differ. The `scheduler` will be used for parallel + // operations executed by e.g. `ParallelForEachShard()`. The `scheduler` may // be nullptr, which means work will be executed in the same thread. If - // `thread_pool` is not nullptr, the underlying object is not owned and must + // `scheduler` is not nullptr, the underlying object is not owned and must // outlive the `Sharder`. - Sharder(int64_t num_elements, int num_shards, ThreadPool* thread_pool, + Sharder(int64_t num_elements, int num_shards, Scheduler* scheduler, const std::function& element_mass); // Creates a `Sharder` for problems with `num_elements` elements and unit // mass. This constructor exploits having all element mass equal to 1 to take // time proportional to `num_shards` instead of `num_elements`. Also see the // comments above the first constructor. - Sharder(int64_t num_elements, int num_shards, ThreadPool* thread_pool); + Sharder(int64_t num_elements, int num_shards, Scheduler* scheduler); // Creates a `Sharder` for processing `matrix`. The elements correspond to // columns of `matrix` and have mass linear in the number of non-zeros. Also // see the comments above the first constructor. Sharder(const Eigen::SparseMatrix& matrix, - int num_shards, ThreadPool* thread_pool) - : Sharder(matrix.cols(), num_shards, thread_pool, [&matrix](int64_t col) { + int num_shards, Scheduler* scheduler) + : Sharder(matrix.cols(), num_shards, scheduler, [&matrix](int64_t col) { return 1 + 1 * matrix.col(col).nonZeros(); }) {} @@ -227,7 +227,7 @@ class Sharder { // Size: `NumShards()`. The mass of each shard. std::vector shard_masses_; // NOT owned. May be nullptr. - ThreadPool* thread_pool_; + Scheduler* scheduler_; }; // Like `matrix.transpose() * vector` but executed in parallel using `sharder`. diff --git a/ortools/pdlp/sharder_test.cc b/ortools/pdlp/sharder_test.cc index fb37b34881..e77274e278 100644 --- a/ortools/pdlp/sharder_test.cc +++ b/ortools/pdlp/sharder_test.cc @@ -27,7 +27,7 @@ #include "ortools/base/gmock.h" #include "ortools/base/logging.h" #include "ortools/base/mathutil.h" -#include "ortools/base/threadpool.h" +#include "ortools/pdlp/scheduler.h" namespace operations_research::pdlp { namespace { @@ -434,9 +434,8 @@ TEST_P(VariousSizesTest, LargeMatVec) { LargeSparseMatrix(size); const int num_threads = 5; const int shards_per_thread = 3; - ThreadPool pool("MatrixVectorProductTest", num_threads); - pool.StartWorkers(); - Sharder sharder(mat, shards_per_thread * num_threads, &pool); + GoogleThreadPoolScheduler scheduler(num_threads); + Sharder sharder(mat, shards_per_thread * num_threads, &scheduler); VectorXd rhs = VectorXd::Random(size); VectorXd direct = mat.transpose() * rhs; VectorXd threaded = TransposedMatrixVectorProduct(mat, rhs, sharder); @@ -446,9 +445,8 @@ TEST_P(VariousSizesTest, LargeMatVec) { TEST_P(VariousSizesTest, LargeVectors) { const int64_t size = GetParam(); const int num_threads = 5; - ThreadPool pool("SquaredNormTest", num_threads); - pool.StartWorkers(); - Sharder sharder(size, num_threads, &pool); + GoogleThreadPoolScheduler scheduler(num_threads); + Sharder sharder(size, num_threads, &scheduler); VectorXd vec = VectorXd::Random(size); const double direct = vec.squaredNorm(); const double threaded = SquaredNorm(vec, sharder); diff --git a/ortools/pdlp/solvers.proto b/ortools/pdlp/solvers.proto index 163b83af2b..fefbdfe409 100644 --- a/ortools/pdlp/solvers.proto +++ b/ortools/pdlp/solvers.proto @@ -295,6 +295,11 @@ message PrimalDualHybridGradientParams { // Otherwise a default that depends on num_threads will be used. optional int32 num_shards = 27 [default = 0]; + // The type of scheduler used for CPU multi-threading. See the documentation + // of the corresponding enum for more details. + optional SchedulerType scheduler_type = 32 + [default = SCHEDULER_TYPE_GOOGLE_THREADPOOL]; + // If true, the iteration_stats field of the SolveLog output will be populated // at every iteration. Note that we only compute solution statistics at // termination checks. Setting this parameter to true may substantially From 7ab4c1f4380a9ebf86485df6b4746df9260ef9dc Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Mon, 30 Sep 2024 14:30:43 +0200 Subject: [PATCH 034/105] routing: Fix parsers export --- ortools/routing/parsers/cvrptw_lib.cc | 57 ++++++++++-------- ortools/routing/parsers/cvrptw_lib.h | 34 +++++------ ortools/routing/samples/cvrp_disjoint_tw.cc | 31 +++++----- ortools/routing/samples/cvrptw.cc | 31 +++++----- .../routing/samples/cvrptw_soft_capacity.cc | 31 +++++----- ortools/routing/samples/cvrptw_with_breaks.cc | 33 +++++----- .../samples/cvrptw_with_precedences.cc | 30 +++++----- .../routing/samples/cvrptw_with_refueling.cc | 60 ++++++++++++------- .../routing/samples/cvrptw_with_resources.cc | 30 +++++----- .../cvrptw_with_stop_times_and_resources.cc | 30 +++++----- .../cvrptw_with_time_dependent_costs.cc | 32 +++++----- 11 files changed, 215 insertions(+), 184 deletions(-) diff --git a/ortools/routing/parsers/cvrptw_lib.cc b/ortools/routing/parsers/cvrptw_lib.cc index 446cb04cb5..d3f8553199 100644 --- a/ortools/routing/parsers/cvrptw_lib.cc +++ b/ortools/routing/parsers/cvrptw_lib.cc @@ -20,9 +20,11 @@ #include #include #include +#include #include "absl/container/btree_set.h" #include "absl/random/distributions.h" +#include "absl/random/random.h" #include "absl/strings/str_format.h" #include "ortools/base/logging.h" #include "ortools/constraint_solver/constraint_solver.h" @@ -158,12 +160,17 @@ int64_t StopServiceTimePlusTransition::Compute(NodeIndex from, : stop_time_ + transition_time_(from, to); } -void DisplayPlan( - const RoutingIndexManager& manager, const RoutingModel& routing, - const operations_research::Assignment& plan, bool use_same_vehicle_costs, - int64_t max_nodes_per_group, int64_t same_vehicle_cost, - const operations_research::RoutingDimension& capacity_dimension, - const operations_research::RoutingDimension& time_dimension) { +void DisplayPlan(const RoutingIndexManager& manager, + const RoutingModel& routing, + const operations_research::Assignment& plan, + bool use_same_vehicle_costs, int64_t max_nodes_per_group, + int64_t same_vehicle_cost, + const std::vector& dimension_names) { + std::vector dimensions; + for (const std::string& dimension_name : dimension_names) { + dimensions.push_back(&routing.GetDimensionOrDie(dimension_name)); + } + // Display plan cost. std::string plan_output = absl::StrFormat("Cost %d\n", plan.ObjectiveValue()); @@ -208,6 +215,18 @@ void DisplayPlan( } // Display actual output for each vehicle. + const auto str_append_variable = + [&plan, &plan_output](const IntVar* var, const std::string& name) { + if (var == nullptr || !plan.Contains(var)) return; + const int64_t var_min = plan.Min(var); + const int64_t var_max = plan.Max(var); + if (var_min == var_max) { + absl::StrAppendFormat(&plan_output, "%s(%d) ", name, var_min); + } else { + absl::StrAppendFormat(&plan_output, "%s(%d, %d) ", name, var_min, + var_max); + } + }; for (int route_number = 0; route_number < routing.vehicles(); ++route_number) { int64_t order = routing.Start(route_number); @@ -216,26 +235,16 @@ void DisplayPlan( plan_output += "Empty\n"; } else { while (true) { - operations_research::IntVar* const load_var = - capacity_dimension.CumulVar(order); - operations_research::IntVar* const time_var = - time_dimension.CumulVar(order); - operations_research::IntVar* const slack_var = - routing.IsEnd(order) ? nullptr : time_dimension.SlackVar(order); - if (slack_var != nullptr && plan.Contains(slack_var)) { - absl::StrAppendFormat( - &plan_output, "%d Load(%d) Time(%d, %d) Slack(%d, %d)", - manager.IndexToNode(order).value(), plan.Value(load_var), - plan.Min(time_var), plan.Max(time_var), plan.Min(slack_var), - plan.Max(slack_var)); - } else { - absl::StrAppendFormat(&plan_output, "%d Load(%d) Time(%d, %d)", - manager.IndexToNode(order).value(), - plan.Value(load_var), plan.Min(time_var), - plan.Max(time_var)); + absl::StrAppendFormat(&plan_output, "%d ", + manager.IndexToNode(order).value()); + for (const RoutingDimension* dimension : dimensions) { + str_append_variable(dimension->CumulVar(order), dimension->name()); + operations_research::IntVar* const slack_var = + routing.IsEnd(order) ? nullptr : dimension->SlackVar(order); + str_append_variable(slack_var, dimension->name() + "Slack"); } if (routing.IsEnd(order)) break; - plan_output += " -> "; + plan_output += "-> "; order = plan.Value(routing.NextVar(order)); } plan_output += "\n"; diff --git a/ortools/routing/parsers/cvrptw_lib.h b/ortools/routing/parsers/cvrptw_lib.h index b855d174ca..18c7c7d9a0 100644 --- a/ortools/routing/parsers/cvrptw_lib.h +++ b/ortools/routing/parsers/cvrptw_lib.h @@ -93,44 +93,42 @@ class RandomDemand { // Service time (proportional to demand) + transition time callback. class ServiceTimePlusTransition { public: - ServiceTimePlusTransition( - int64_t time_per_demand_unit, - operations_research::RoutingNodeEvaluator2 demand, - operations_research::RoutingNodeEvaluator2 transition_time); + ServiceTimePlusTransition(int64_t time_per_demand_unit, + RoutingNodeEvaluator2 demand, + RoutingNodeEvaluator2 transition_time); int64_t Compute(RoutingIndexManager::NodeIndex from, RoutingIndexManager::NodeIndex to) const; private: const int64_t time_per_demand_unit_; - operations_research::RoutingNodeEvaluator2 demand_; - operations_research::RoutingNodeEvaluator2 transition_time_; + RoutingNodeEvaluator2 demand_; + RoutingNodeEvaluator2 transition_time_; }; // Stop service time + transition time callback. class StopServiceTimePlusTransition { public: - StopServiceTimePlusTransition( - int64_t stop_time, const LocationContainer& location_container, - operations_research::RoutingNodeEvaluator2 transition_time); + StopServiceTimePlusTransition(int64_t stop_time, + const LocationContainer& location_container, + RoutingNodeEvaluator2 transition_time); int64_t Compute(RoutingIndexManager::NodeIndex from, RoutingIndexManager::NodeIndex to) const; private: const int64_t stop_time_; const LocationContainer& location_container_; - operations_research::RoutingNodeEvaluator2 demand_; - operations_research::RoutingNodeEvaluator2 transition_time_; + RoutingNodeEvaluator2 demand_; + RoutingNodeEvaluator2 transition_time_; }; // Route plan displayer. // TODO(user): Move the display code to the routing library. -void DisplayPlan( - const operations_research::RoutingIndexManager& manager, - const operations_research::RoutingModel& routing, - const operations_research::Assignment& plan, bool use_same_vehicle_costs, - int64_t max_nodes_per_group, int64_t same_vehicle_cost, - const operations_research::RoutingDimension& capacity_dimension, - const operations_research::RoutingDimension& time_dimension); +void DisplayPlan(const RoutingIndexManager& manager, + const RoutingModel& routing, + const operations_research::Assignment& plan, + bool use_same_vehicle_costs, int64_t max_nodes_per_group, + int64_t same_vehicle_cost, + const std::vector& dimension_names); } // namespace operations_research::routing diff --git a/ortools/routing/samples/cvrp_disjoint_tw.cc b/ortools/routing/samples/cvrp_disjoint_tw.cc index 5e66df3c8d..563de8a7c6 100644 --- a/ortools/routing/samples/cvrp_disjoint_tw.cc +++ b/ortools/routing/samples/cvrp_disjoint_tw.cc @@ -24,34 +24,37 @@ #include #include +#include #include +#include #include #include +#include "absl/flags/flag.h" #include "absl/random/random.h" #include "google/protobuf/text_format.h" -#include "ortools/base/commandlineflags.h" #include "ortools/base/init_google.h" #include "ortools/base/logging.h" -#include "ortools/base/types.h" +#include "ortools/constraint_solver/constraint_solver.h" #include "ortools/routing/index_manager.h" #include "ortools/routing/parameters.h" #include "ortools/routing/parameters.pb.h" #include "ortools/routing/parsers/cvrptw_lib.h" #include "ortools/routing/routing.h" +#include "ortools/routing/types.h" using operations_research::Assignment; -using operations_research::DefaultRoutingSearchParameters; -using operations_research::GetSeed; -using operations_research::LocationContainer; -using operations_research::RandomDemand; -using operations_research::RoutingDimension; -using operations_research::RoutingIndexManager; -using operations_research::RoutingModel; -using operations_research::RoutingNodeIndex; -using operations_research::RoutingSearchParameters; -using operations_research::ServiceTimePlusTransition; using operations_research::Solver; +using operations_research::routing::DefaultRoutingSearchParameters; +using operations_research::routing::GetSeed; +using operations_research::routing::LocationContainer; +using operations_research::routing::RandomDemand; +using operations_research::routing::RoutingDimension; +using operations_research::routing::RoutingIndexManager; +using operations_research::routing::RoutingModel; +using operations_research::routing::RoutingNodeIndex; +using operations_research::routing::RoutingSearchParameters; +using operations_research::routing::ServiceTimePlusTransition; ABSL_FLAG(int, vrp_orders, 100, "Number of nodes in the problem."); ABSL_FLAG(int, vrp_vehicles, 20, "Number of vehicles in the problem."); @@ -189,9 +192,7 @@ int main(int argc, char** argv) { if (solution != nullptr) { DisplayPlan(manager, routing, *solution, absl::GetFlag(FLAGS_vrp_use_same_vehicle_costs), - kMaxNodesPerGroup, kSameVehicleCost, - routing.GetDimensionOrDie(kCapacity), - routing.GetDimensionOrDie(kTime)); + kMaxNodesPerGroup, kSameVehicleCost, {kCapacity, kTime}); } else { LOG(INFO) << "No solution found."; } diff --git a/ortools/routing/samples/cvrptw.cc b/ortools/routing/samples/cvrptw.cc index f30796a4a6..c0ec0973c9 100644 --- a/ortools/routing/samples/cvrptw.cc +++ b/ortools/routing/samples/cvrptw.cc @@ -22,33 +22,36 @@ // to be in meters and times in seconds. #include +#include #include +#include #include #include +#include "absl/flags/flag.h" #include "absl/random/random.h" #include "google/protobuf/text_format.h" -#include "ortools/base/commandlineflags.h" #include "ortools/base/init_google.h" #include "ortools/base/logging.h" -#include "ortools/base/types.h" +#include "ortools/constraint_solver/constraint_solver.h" #include "ortools/routing/index_manager.h" #include "ortools/routing/parameters.h" #include "ortools/routing/parameters.pb.h" #include "ortools/routing/parsers/cvrptw_lib.h" #include "ortools/routing/routing.h" +#include "ortools/routing/types.h" using operations_research::Assignment; -using operations_research::DefaultRoutingSearchParameters; -using operations_research::GetSeed; -using operations_research::LocationContainer; -using operations_research::RandomDemand; -using operations_research::RoutingDimension; -using operations_research::RoutingIndexManager; -using operations_research::RoutingModel; -using operations_research::RoutingNodeIndex; -using operations_research::RoutingSearchParameters; -using operations_research::ServiceTimePlusTransition; +using operations_research::routing::DefaultRoutingSearchParameters; +using operations_research::routing::GetSeed; +using operations_research::routing::LocationContainer; +using operations_research::routing::RandomDemand; +using operations_research::routing::RoutingDimension; +using operations_research::routing::RoutingIndexManager; +using operations_research::routing::RoutingModel; +using operations_research::routing::RoutingNodeIndex; +using operations_research::routing::RoutingSearchParameters; +using operations_research::routing::ServiceTimePlusTransition; ABSL_FLAG(int, vrp_orders, 100, "Number of nodes in the problem"); ABSL_FLAG(int, vrp_vehicles, 20, "Number of vehicles in the problem"); @@ -174,9 +177,7 @@ int main(int argc, char** argv) { if (solution != nullptr) { DisplayPlan(manager, routing, *solution, absl::GetFlag(FLAGS_vrp_use_same_vehicle_costs), - kMaxNodesPerGroup, kSameVehicleCost, - routing.GetDimensionOrDie(kCapacity), - routing.GetDimensionOrDie(kTime)); + kMaxNodesPerGroup, kSameVehicleCost, {kCapacity, kTime}); } else { LOG(INFO) << "No solution found."; } diff --git a/ortools/routing/samples/cvrptw_soft_capacity.cc b/ortools/routing/samples/cvrptw_soft_capacity.cc index cc9e0234ce..8b5f95b869 100644 --- a/ortools/routing/samples/cvrptw_soft_capacity.cc +++ b/ortools/routing/samples/cvrptw_soft_capacity.cc @@ -21,33 +21,36 @@ // distance. Distances are assumed to be in meters and times in seconds. #include +#include #include +#include #include #include +#include "absl/flags/flag.h" #include "absl/random/random.h" #include "google/protobuf/text_format.h" -#include "ortools/base/commandlineflags.h" #include "ortools/base/init_google.h" #include "ortools/base/logging.h" -#include "ortools/base/types.h" +#include "ortools/constraint_solver/constraint_solver.h" #include "ortools/routing/index_manager.h" #include "ortools/routing/parameters.h" #include "ortools/routing/parameters.pb.h" #include "ortools/routing/parsers/cvrptw_lib.h" #include "ortools/routing/routing.h" +#include "ortools/routing/types.h" using operations_research::Assignment; -using operations_research::DefaultRoutingSearchParameters; -using operations_research::GetSeed; -using operations_research::LocationContainer; -using operations_research::RandomDemand; -using operations_research::RoutingDimension; -using operations_research::RoutingIndexManager; -using operations_research::RoutingModel; -using operations_research::RoutingNodeIndex; -using operations_research::RoutingSearchParameters; -using operations_research::ServiceTimePlusTransition; +using operations_research::routing::DefaultRoutingSearchParameters; +using operations_research::routing::GetSeed; +using operations_research::routing::LocationContainer; +using operations_research::routing::RandomDemand; +using operations_research::routing::RoutingDimension; +using operations_research::routing::RoutingIndexManager; +using operations_research::routing::RoutingModel; +using operations_research::routing::RoutingNodeIndex; +using operations_research::routing::RoutingSearchParameters; +using operations_research::routing::ServiceTimePlusTransition; ABSL_FLAG(int, vrp_orders, 100, "Number of nodes in the problem."); ABSL_FLAG(int, vrp_vehicles, 20, "Number of vehicles in the problem."); @@ -198,9 +201,7 @@ int main(int argc, char** argv) { if (solution != nullptr) { DisplayPlan(manager, routing, *solution, absl::GetFlag(FLAGS_vrp_use_same_vehicle_costs), - kMaxNodesPerGroup, kSameVehicleCost, - routing.GetDimensionOrDie(kCapacity), - routing.GetDimensionOrDie(kTime)); + kMaxNodesPerGroup, kSameVehicleCost, {kCapacity, kTime}); } else { LOG(INFO) << "No solution found."; } diff --git a/ortools/routing/samples/cvrptw_with_breaks.cc b/ortools/routing/samples/cvrptw_with_breaks.cc index 2b2b0cf4c4..a62a019cd6 100644 --- a/ortools/routing/samples/cvrptw_with_breaks.cc +++ b/ortools/routing/samples/cvrptw_with_breaks.cc @@ -26,38 +26,41 @@ // day or two smaller ones which can be taken during a longer period of the day. #include +#include #include +#include #include #include +#include "absl/flags/flag.h" #include "absl/random/random.h" #include "absl/strings/str_cat.h" #include "google/protobuf/text_format.h" -#include "ortools/base/commandlineflags.h" #include "ortools/base/init_google.h" #include "ortools/base/logging.h" -#include "ortools/base/types.h" +#include "ortools/constraint_solver/constraint_solver.h" #include "ortools/routing/enums.pb.h" #include "ortools/routing/index_manager.h" #include "ortools/routing/parameters.h" #include "ortools/routing/parameters.pb.h" #include "ortools/routing/parsers/cvrptw_lib.h" #include "ortools/routing/routing.h" +#include "ortools/routing/types.h" using operations_research::Assignment; -using operations_research::DefaultRoutingSearchParameters; -using operations_research::FirstSolutionStrategy; -using operations_research::GetSeed; using operations_research::IntervalVar; -using operations_research::LocationContainer; -using operations_research::RandomDemand; -using operations_research::RoutingDimension; -using operations_research::RoutingIndexManager; -using operations_research::RoutingModel; -using operations_research::RoutingNodeIndex; -using operations_research::RoutingSearchParameters; -using operations_research::ServiceTimePlusTransition; using operations_research::Solver; +using operations_research::routing::DefaultRoutingSearchParameters; +using operations_research::routing::FirstSolutionStrategy; +using operations_research::routing::GetSeed; +using operations_research::routing::LocationContainer; +using operations_research::routing::RandomDemand; +using operations_research::routing::RoutingDimension; +using operations_research::routing::RoutingIndexManager; +using operations_research::routing::RoutingModel; +using operations_research::routing::RoutingNodeIndex; +using operations_research::routing::RoutingSearchParameters; +using operations_research::routing::ServiceTimePlusTransition; ABSL_FLAG(int, vrp_orders, 100, "Nodes in the problem."); ABSL_FLAG(int, vrp_vehicles, 20, @@ -227,9 +230,7 @@ int main(int argc, char** argv) { LOG(INFO) << break_interval.Var()->name() << " unperformed"; } } - DisplayPlan(manager, routing, *solution, false, 0, 0, - routing.GetDimensionOrDie(kCapacity), - routing.GetDimensionOrDie(kTime)); + DisplayPlan(manager, routing, *solution, false, 0, 0, {kCapacity, kTime}); } else { LOG(INFO) << "No solution found."; } diff --git a/ortools/routing/samples/cvrptw_with_precedences.cc b/ortools/routing/samples/cvrptw_with_precedences.cc index 12ad737f16..d70041bdd8 100644 --- a/ortools/routing/samples/cvrptw_with_precedences.cc +++ b/ortools/routing/samples/cvrptw_with_precedences.cc @@ -23,33 +23,35 @@ #include #include +#include #include #include +#include "absl/flags/flag.h" #include "absl/random/random.h" #include "google/protobuf/text_format.h" -#include "ortools/base/commandlineflags.h" #include "ortools/base/init_google.h" #include "ortools/base/logging.h" -#include "ortools/base/types.h" +#include "ortools/constraint_solver/constraint_solver.h" #include "ortools/graph/graph_builder.h" #include "ortools/routing/index_manager.h" #include "ortools/routing/parameters.h" #include "ortools/routing/parameters.pb.h" #include "ortools/routing/parsers/cvrptw_lib.h" #include "ortools/routing/routing.h" +#include "ortools/routing/types.h" using operations_research::Assignment; -using operations_research::DefaultRoutingSearchParameters; -using operations_research::GetSeed; -using operations_research::LocationContainer; -using operations_research::RandomDemand; -using operations_research::RoutingDimension; -using operations_research::RoutingIndexManager; -using operations_research::RoutingModel; -using operations_research::RoutingNodeIndex; -using operations_research::RoutingSearchParameters; -using operations_research::ServiceTimePlusTransition; +using operations_research::routing::DefaultRoutingSearchParameters; +using operations_research::routing::GetSeed; +using operations_research::routing::LocationContainer; +using operations_research::routing::RandomDemand; +using operations_research::routing::RoutingDimension; +using operations_research::routing::RoutingIndexManager; +using operations_research::routing::RoutingModel; +using operations_research::routing::RoutingNodeIndex; +using operations_research::routing::RoutingSearchParameters; +using operations_research::routing::ServiceTimePlusTransition; ABSL_FLAG(int, vrp_orders, 100, "Nodes in the problem."); ABSL_FLAG(int, vrp_vehicles, 20, @@ -202,9 +204,7 @@ int main(int argc, char** argv) { if (solution != nullptr) { DisplayPlan(manager, routing, *solution, absl::GetFlag(FLAGS_vrp_use_same_vehicle_costs), - kMaxNodesPerGroup, kSameVehicleCost, - routing.GetDimensionOrDie(kCapacity), - routing.GetDimensionOrDie(kTime)); + kMaxNodesPerGroup, kSameVehicleCost, {kCapacity, kTime}); } else { LOG(INFO) << "No solution found."; } diff --git a/ortools/routing/samples/cvrptw_with_refueling.cc b/ortools/routing/samples/cvrptw_with_refueling.cc index 609fcfbbf9..e74c031af8 100644 --- a/ortools/routing/samples/cvrptw_with_refueling.cc +++ b/ortools/routing/samples/cvrptw_with_refueling.cc @@ -20,36 +20,40 @@ // reaches zero. Fuel consumption is proportional to the distance traveled. #include +#include #include +#include #include +#include "absl/flags/flag.h" #include "absl/random/random.h" #include "google/protobuf/text_format.h" -#include "ortools/base/commandlineflags.h" #include "ortools/base/init_google.h" #include "ortools/base/logging.h" -#include "ortools/base/types.h" +#include "ortools/constraint_solver/constraint_solver.h" #include "ortools/routing/index_manager.h" #include "ortools/routing/parameters.h" #include "ortools/routing/parameters.pb.h" #include "ortools/routing/parsers/cvrptw_lib.h" #include "ortools/routing/routing.h" +#include "ortools/routing/types.h" using operations_research::Assignment; -using operations_research::DefaultRoutingSearchParameters; -using operations_research::GetSeed; -using operations_research::LocationContainer; -using operations_research::RandomDemand; -using operations_research::RoutingDimension; -using operations_research::RoutingIndexManager; -using operations_research::RoutingModel; -using operations_research::RoutingNodeIndex; -using operations_research::RoutingSearchParameters; -using operations_research::ServiceTimePlusTransition; +using operations_research::Solver; +using operations_research::routing::DefaultRoutingSearchParameters; +using operations_research::routing::GetSeed; +using operations_research::routing::LocationContainer; +using operations_research::routing::RandomDemand; +using operations_research::routing::RoutingDimension; +using operations_research::routing::RoutingIndexManager; +using operations_research::routing::RoutingModel; +using operations_research::routing::RoutingNodeIndex; +using operations_research::routing::RoutingSearchParameters; +using operations_research::routing::ServiceTimePlusTransition; -ABSL_FLAG(int, vrp_orders, 100, "Nodes in the problem."); -ABSL_FLAG(int, vrp_vehicles, 20, - "Size of Traveling Salesman Problem instance."); +ABSL_FLAG(int, vrp_orders, 20, "Nodes in the problem."); +ABSL_FLAG(int, vrp_vehicles, 4, + "Size of the Vehicle Routing Problem instance."); ABSL_FLAG(bool, vrp_use_deterministic_random_seed, false, "Use deterministic random seeds."); ABSL_FLAG(std::string, routing_search_parameters, "", @@ -84,6 +88,7 @@ int main(int argc, char** argv) { const int64_t kXMax = 100000; const int64_t kYMax = 100000; const int64_t kSpeed = 10; + const int64_t kRefuelCost = 10; LocationContainer locations( kSpeed, absl::GetFlag(FLAGS_vrp_use_deterministic_random_seed)); for (int location = 0; location <= absl::GetFlag(FLAGS_vrp_orders); @@ -95,7 +100,8 @@ int main(int argc, char** argv) { const int vehicle_cost = routing.RegisterTransitCallback( [&locations, &manager](int64_t i, int64_t j) { return locations.ManhattanDistance(manager.IndexToNode(i), - manager.IndexToNode(j)); + manager.IndexToNode(j)) + + (IsRefuelNode(i) ? kRefuelCost : 0); }); routing.SetArcCostEvaluatorOfAllVehicles(vehicle_cost); @@ -162,9 +168,21 @@ int main(int argc, char** argv) { // Only let slack free for refueling nodes. if (!IsRefuelNode(order) || routing.IsStart(order)) { fuel_dimension.SlackVar(order)->SetValue(0); + } else { + // Ensure that we do not refuel more than the capacity. + Solver* solver = routing.solver(); + solver->AddConstraint(solver->MakeSumLessOrEqual( + {fuel_dimension.SlackVar(order), fuel_dimension.CumulVar(order)}, + kFuelCapacity)); + routing.AddToAssignment(fuel_dimension.SlackVar(order)); } - // Needed to instantiate fuel quantity at each node. - routing.AddVariableMinimizedByFinalizer(fuel_dimension.CumulVar(order)); + // Needed to instantiate fuel quantity at each node. Deciding to refuel as + // much as possible to minimize the risk of running out of fuel. + routing.AddVariableMaximizedByFinalizer(fuel_dimension.CumulVar(order)); + } + for (int vehicle = 0; vehicle < routing.vehicles(); ++vehicle) { + routing.AddVariableMaximizedByFinalizer( + fuel_dimension.CumulVar(routing.End(vehicle))); } // Adding penalty costs to allow skipping orders. @@ -173,7 +191,8 @@ int main(int argc, char** argv) { for (RoutingIndexManager::NodeIndex order = kFirstNodeAfterDepot; order < routing.nodes(); ++order) { std::vector orders(1, manager.NodeToIndex(order)); - routing.AddDisjunction(orders, kPenalty); + routing.AddDisjunction( + orders, IsRefuelNode(manager.NodeToIndex(order)) ? 0 : kPenalty); } // Solve, returns a solution if any (owned by RoutingModel). @@ -184,8 +203,7 @@ int main(int argc, char** argv) { if (solution != nullptr) { DisplayPlan(manager, routing, *solution, /*use_same_vehicle_costs=*/false, /*max_nodes_per_group=*/0, /*same_vehicle_cost=*/0, - routing.GetDimensionOrDie(kCapacity), - routing.GetDimensionOrDie(kTime)); + {kTime, kCapacity, kFuel}); } else { LOG(INFO) << "No solution found."; } diff --git a/ortools/routing/samples/cvrptw_with_resources.cc b/ortools/routing/samples/cvrptw_with_resources.cc index 6c4876975a..f32c74203d 100644 --- a/ortools/routing/samples/cvrptw_with_resources.cc +++ b/ortools/routing/samples/cvrptw_with_resources.cc @@ -22,35 +22,38 @@ // with variable demands. #include +#include #include +#include #include +#include "absl/flags/flag.h" #include "absl/random/random.h" #include "google/protobuf/text_format.h" -#include "ortools/base/commandlineflags.h" #include "ortools/base/init_google.h" #include "ortools/base/logging.h" -#include "ortools/base/types.h" +#include "ortools/constraint_solver/constraint_solver.h" #include "ortools/routing/index_manager.h" #include "ortools/routing/parameters.h" #include "ortools/routing/parameters.pb.h" #include "ortools/routing/parsers/cvrptw_lib.h" #include "ortools/routing/routing.h" +#include "ortools/routing/types.h" using operations_research::Assignment; -using operations_research::DefaultRoutingSearchParameters; -using operations_research::GetSeed; using operations_research::IntervalVar; using operations_research::IntVar; -using operations_research::LocationContainer; -using operations_research::RandomDemand; -using operations_research::RoutingDimension; -using operations_research::RoutingIndexManager; -using operations_research::RoutingModel; -using operations_research::RoutingNodeIndex; -using operations_research::RoutingSearchParameters; -using operations_research::ServiceTimePlusTransition; using operations_research::Solver; +using operations_research::routing::DefaultRoutingSearchParameters; +using operations_research::routing::GetSeed; +using operations_research::routing::LocationContainer; +using operations_research::routing::RandomDemand; +using operations_research::routing::RoutingDimension; +using operations_research::routing::RoutingIndexManager; +using operations_research::routing::RoutingModel; +using operations_research::routing::RoutingNodeIndex; +using operations_research::routing::RoutingSearchParameters; +using operations_research::routing::ServiceTimePlusTransition; ABSL_FLAG(int, vrp_orders, 100, "Nodes in the problem."); ABSL_FLAG(int, vrp_vehicles, 20, @@ -179,8 +182,7 @@ int main(int argc, char** argv) { if (solution != nullptr) { DisplayPlan(manager, routing, *solution, /*use_same_vehicle_costs=*/false, /*max_nodes_per_group=*/0, /*same_vehicle_cost=*/0, - routing.GetDimensionOrDie(kCapacity), - routing.GetDimensionOrDie(kTime)); + {kCapacity, kTime}); } else { LOG(INFO) << "No solution found."; } diff --git a/ortools/routing/samples/cvrptw_with_stop_times_and_resources.cc b/ortools/routing/samples/cvrptw_with_stop_times_and_resources.cc index cd046c6801..d4dcb60046 100644 --- a/ortools/routing/samples/cvrptw_with_stop_times_and_resources.cc +++ b/ortools/routing/samples/cvrptw_with_stop_times_and_resources.cc @@ -20,36 +20,39 @@ // to one. #include +#include #include +#include #include +#include "absl/flags/flag.h" #include "absl/random/random.h" #include "absl/strings/str_cat.h" #include "google/protobuf/text_format.h" -#include "ortools/base/commandlineflags.h" #include "ortools/base/init_google.h" #include "ortools/base/logging.h" -#include "ortools/base/types.h" +#include "ortools/constraint_solver/constraint_solver.h" #include "ortools/routing/index_manager.h" #include "ortools/routing/parameters.h" #include "ortools/routing/parameters.pb.h" #include "ortools/routing/parsers/cvrptw_lib.h" #include "ortools/routing/routing.h" +#include "ortools/routing/types.h" using operations_research::Assignment; -using operations_research::DefaultRoutingSearchParameters; -using operations_research::GetSeed; using operations_research::IntervalVar; using operations_research::IntVar; -using operations_research::LocationContainer; -using operations_research::RandomDemand; -using operations_research::RoutingDimension; -using operations_research::RoutingIndexManager; -using operations_research::RoutingModel; -using operations_research::RoutingNodeIndex; -using operations_research::RoutingSearchParameters; using operations_research::Solver; -using operations_research::StopServiceTimePlusTransition; +using operations_research::routing::DefaultRoutingSearchParameters; +using operations_research::routing::GetSeed; +using operations_research::routing::LocationContainer; +using operations_research::routing::RandomDemand; +using operations_research::routing::RoutingDimension; +using operations_research::routing::RoutingIndexManager; +using operations_research::routing::RoutingModel; +using operations_research::routing::RoutingNodeIndex; +using operations_research::routing::RoutingSearchParameters; +using operations_research::routing::StopServiceTimePlusTransition; ABSL_FLAG(int, vrp_stops, 25, "Stop locations in the problem."); ABSL_FLAG(int, vrp_orders_per_stop, 5, "Nodes for each stop."); @@ -209,8 +212,7 @@ int main(int argc, char** argv) { if (solution != nullptr) { DisplayPlan(manager, routing, *solution, /*use_same_vehicle_costs=*/false, /*max_nodes_per_group=*/0, /*same_vehicle_cost=*/0, - routing.GetDimensionOrDie(kCapacity), - routing.GetDimensionOrDie(kTime)); + {kCapacity, kTime}); LOG(INFO) << "Stop intervals:"; for (IntervalVar* const interval : intervals) { if (solution->PerformedValue(interval)) { diff --git a/ortools/routing/samples/cvrptw_with_time_dependent_costs.cc b/ortools/routing/samples/cvrptw_with_time_dependent_costs.cc index 7cfd184859..72c8a0954c 100644 --- a/ortools/routing/samples/cvrptw_with_time_dependent_costs.cc +++ b/ortools/routing/samples/cvrptw_with_time_dependent_costs.cc @@ -17,38 +17,37 @@ #include #include #include -#include #include -#include +#include #include +#include "absl/flags/flag.h" #include "absl/functional/bind_front.h" #include "absl/random/random.h" #include "google/protobuf/text_format.h" -#include "ortools/base/commandlineflags.h" #include "ortools/base/init_google.h" #include "ortools/base/logging.h" -#include "ortools/base/types.h" +#include "ortools/constraint_solver/constraint_solver.h" #include "ortools/routing/index_manager.h" #include "ortools/routing/parameters.h" #include "ortools/routing/parameters.pb.h" #include "ortools/routing/parsers/cvrptw_lib.h" #include "ortools/routing/routing.h" -#include "ortools/util/range_query_function.h" +#include "ortools/routing/types.h" #include "ortools/util/step_function.h" using operations_research::Assignment; -using operations_research::DefaultRoutingSearchParameters; -using operations_research::GetSeed; -using operations_research::LocationContainer; -using operations_research::RandomDemand; -using operations_research::RoutingDimension; -using operations_research::RoutingIndexManager; -using operations_research::RoutingModel; -using operations_research::RoutingNodeIndex; -using operations_research::RoutingSearchParameters; -using operations_research::ServiceTimePlusTransition; using operations_research::StepFunction; +using operations_research::routing::DefaultRoutingSearchParameters; +using operations_research::routing::GetSeed; +using operations_research::routing::LocationContainer; +using operations_research::routing::RandomDemand; +using operations_research::routing::RoutingDimension; +using operations_research::routing::RoutingIndexManager; +using operations_research::routing::RoutingModel; +using operations_research::routing::RoutingNodeIndex; +using operations_research::routing::RoutingSearchParameters; +using operations_research::routing::ServiceTimePlusTransition; ABSL_FLAG(int, vrp_orders, 25, "Nodes in the problem."); ABSL_FLAG(int, vrp_vehicles, 10, @@ -239,8 +238,7 @@ int main(int argc, char** argv) { if (solution != nullptr) { DisplayPlan(manager, routing, *solution, /*use_same_vehicle_costs=*/false, /*max_nodes_per_group=*/0, /*same_vehicle_cost=*/0, - routing.GetDimensionOrDie(kCapacity), - routing.GetDimensionOrDie(kTime)); + {kCapacity, kTime, kTimeDependentCost}); } else { LOG(INFO) << "No solution found."; } From 5eae04cc2ab0e6c08c22c382378cd7dfe60276e8 Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Mon, 30 Sep 2024 14:33:33 +0200 Subject: [PATCH 035/105] routing: export from google3 --- ortools/routing/filters.cc | 187 +++++++++++++++------- ortools/routing/filters.h | 315 ++++++++++++++++++++++++++++++++++++- ortools/routing/ils.cc | 3 +- ortools/util/BUILD.bazel | 1 + 4 files changed, 446 insertions(+), 60 deletions(-) diff --git a/ortools/routing/filters.cc b/ortools/routing/filters.cc index 9eedfea741..29778789e8 100644 --- a/ortools/routing/filters.cc +++ b/ortools/routing/filters.cc @@ -43,6 +43,7 @@ #include "ortools/base/map_util.h" #include "ortools/base/small_map.h" #include "ortools/base/strong_vector.h" +#include "ortools/base/types.h" #include "ortools/constraint_solver/constraint_solver.h" #include "ortools/constraint_solver/constraint_solveri.h" #include "ortools/routing/lp_scheduling.h" @@ -1150,10 +1151,20 @@ class PathCumulFilter : public BasePathFilter { }; struct SoftBound { - SoftBound() : bound(-1), coefficient(0) {} - int64_t bound; - int64_t coefficient; + int64_t bound = -1; + int64_t coefficient = 0; }; + struct Interval { + int64_t min; + int64_t max; + }; + std::vector> + ExtractNodeIndexToPrecedences() const; + std::vector ExtractCumulSoftUpperBounds() const; + std::vector ExtractCumulSoftLowerBounds() const; + std::vector ExtractCumulPiecewiseLinearCosts() + const; + std::vector ExtractEvaluators() const; // This class caches transit values between nodes of paths. Transit and path // nodes are to be added in the order in which they appear on a path. @@ -1230,7 +1241,9 @@ class PathCumulFilter : public BasePathFilter { !dimension_.GetBreakIntervalsOfVehicle(vehicle).empty(); } - bool FilterCumulSoftBounds() const { return !cumul_soft_bounds_.empty(); } + bool FilterCumulSoftBounds() const { + return !cumul_soft_upper_bounds_.empty(); + } int64_t GetCumulSoftCost(int64_t node, int64_t cumul_value) const; @@ -1341,8 +1354,8 @@ class PathCumulFilter : public BasePathFilter { const std::vector cumuls_; const std::vector slacks_; std::vector start_to_vehicle_; - std::vector evaluators_; - std::vector vehicle_span_upper_bounds_; + const std::vector evaluators_; + const std::vector vehicle_span_upper_bounds_; const bool has_vehicle_span_upper_bounds_; int64_t total_current_cumul_cost_value_; int64_t synchronized_objective_value_; @@ -1354,16 +1367,17 @@ class PathCumulFilter : public BasePathFilter { // Cumul cost values for paths in delta, indexed by vehicle. std::vector delta_path_cumul_cost_values_; const int64_t global_span_cost_coefficient_; - std::vector cumul_soft_bounds_; - std::vector cumul_soft_lower_bounds_; - std::vector cumul_piecewise_linear_costs_; + const std::vector cumul_soft_upper_bounds_; + const std::vector cumul_soft_lower_bounds_; + const std::vector + cumul_piecewise_linear_costs_; std::vector vehicle_total_slack_cost_coefficients_; bool has_nonzero_vehicle_total_slack_cost_coefficients_; const std::vector vehicle_capacities_; // node_index_to_precedences_[node_index] contains all NodePrecedence elements // with node_index as either "first_node" or "second_node". // This vector is empty if there are no precedences on the dimension_. - std::vector> + const std::vector> node_index_to_precedences_; // Data reflecting information on paths and cumul variables for the solution // to which the filter was synchronized. @@ -1405,6 +1419,84 @@ std::vector SumOfVectors(const std::vector& v1, } } // namespace +std::vector +PathCumulFilter::ExtractCumulSoftUpperBounds() const { + const int num_cumuls = dimension_.cumuls().size(); + std::vector bounds(num_cumuls, + {.bound = kint64max, .coefficient = 0}); + bool has_some_bound = false; + for (int i = 0; i < num_cumuls; ++i) { + if (!dimension_.HasCumulVarSoftUpperBound(i)) continue; + const int64_t bound = dimension_.GetCumulVarSoftUpperBound(i); + const int64_t coeff = dimension_.GetCumulVarSoftUpperBoundCoefficient(i); + bounds[i] = {.bound = bound, .coefficient = coeff}; + has_some_bound |= bound < kint64max && coeff != 0; + } + if (!has_some_bound) bounds.clear(); + return bounds; +} + +std::vector +PathCumulFilter::ExtractCumulSoftLowerBounds() const { + const int num_cumuls = dimension_.cumuls().size(); + std::vector bounds(num_cumuls, {.bound = 0, .coefficient = 0}); + bool has_some_bound = false; + for (int i = 0; i < num_cumuls; ++i) { + if (!dimension_.HasCumulVarSoftLowerBound(i)) continue; + const int64_t bound = dimension_.GetCumulVarSoftLowerBound(i); + const int64_t coeff = dimension_.GetCumulVarSoftLowerBoundCoefficient(i); + bounds[i] = {.bound = bound, .coefficient = coeff}; + has_some_bound |= bound > 0 && coeff != 0; + } + if (!has_some_bound) bounds.clear(); + return bounds; +} + +std::vector +PathCumulFilter::ExtractCumulPiecewiseLinearCosts() const { + const int num_cumuls = dimension_.cumuls().size(); + std::vector costs(num_cumuls, nullptr); + bool has_some_cost = false; + for (int i = 0; i < dimension_.cumuls().size(); ++i) { + if (!dimension_.HasCumulVarPiecewiseLinearCost(i)) continue; + const PiecewiseLinearFunction* const cost = + dimension_.GetCumulVarPiecewiseLinearCost(i); + if (cost == nullptr) continue; + has_some_cost = true; + costs[i] = cost; + } + if (!has_some_cost) costs.clear(); + return costs; +} + +std::vector +PathCumulFilter::ExtractEvaluators() const { + const int num_paths = NumPaths(); + std::vector evaluators(num_paths); + for (int i = 0; i < num_paths; ++i) { + evaluators[i] = &dimension_.transit_evaluator(i); + } + return evaluators; +} + +std::vector> +PathCumulFilter::ExtractNodeIndexToPrecedences() const { + std::vector> + node_index_to_precedences; + const std::vector& node_precedences = + dimension_.GetNodePrecedences(); + if (!node_precedences.empty()) { + node_index_to_precedences.resize(dimension_.cumuls().size()); + for (const auto& node_precedence : node_precedences) { + node_index_to_precedences[node_precedence.first_node].push_back( + node_precedence); + node_index_to_precedences[node_precedence.second_node].push_back( + node_precedence); + } + } + return node_index_to_precedences; +} + PathCumulFilter::PathCumulFilter(const RoutingModel& routing_model, const RoutingDimension& dimension, bool propagate_own_objective_value, @@ -1416,7 +1508,7 @@ PathCumulFilter::PathCumulFilter(const RoutingModel& routing_model, dimension_(dimension), cumuls_(dimension.cumuls()), slacks_(dimension.slacks()), - evaluators_(routing_model.vehicles(), nullptr), + evaluators_(ExtractEvaluators()), vehicle_span_upper_bounds_(dimension.vehicle_span_upper_bounds()), has_vehicle_span_upper_bounds_(absl::c_any_of( vehicle_span_upper_bounds_, @@ -1431,6 +1523,9 @@ PathCumulFilter::PathCumulFilter(const RoutingModel& routing_model, delta_path_cumul_cost_values_(routing_model.vehicles(), std::numeric_limits::min()), global_span_cost_coefficient_(dimension.global_span_cost_coefficient()), + cumul_soft_upper_bounds_(ExtractCumulSoftUpperBounds()), + cumul_soft_lower_bounds_(ExtractCumulSoftLowerBounds()), + cumul_piecewise_linear_costs_(ExtractCumulPiecewiseLinearCosts()), vehicle_total_slack_cost_coefficients_( SumOfVectors(dimension.vehicle_span_cost_coefficients(), dimension.vehicle_slack_cost_coefficients())), @@ -1438,6 +1533,7 @@ PathCumulFilter::PathCumulFilter(const RoutingModel& routing_model, absl::c_any_of(vehicle_total_slack_cost_coefficients_, [](int64_t coefficient) { return coefficient != 0; })), vehicle_capacities_(dimension.vehicle_capacities()), + node_index_to_precedences_(ExtractNodeIndexToPrecedences()), delta_max_end_cumul_(0), delta_nodes_with_precedences_and_changed_cumul_(routing_model.Size()), name_(dimension.name()), @@ -1447,12 +1543,6 @@ PathCumulFilter::PathCumulFilter(const RoutingModel& routing_model, filter_objective_cost_(filter_objective_cost), may_use_optimizers_(may_use_optimizers), propagate_own_objective_value_(propagate_own_objective_value) { - cumul_soft_bounds_.resize(cumuls_.size()); - cumul_soft_lower_bounds_.resize(cumuls_.size()); - cumul_piecewise_linear_costs_.resize(cumuls_.size()); - bool has_cumul_soft_bounds = false; - bool has_cumul_soft_lower_bounds = false; - bool has_cumul_piecewise_linear_costs = false; bool has_cumul_hard_bounds = false; for (const IntVar* const slack : slacks_) { if (slack->Min() > 0) { @@ -1461,39 +1551,12 @@ PathCumulFilter::PathCumulFilter(const RoutingModel& routing_model, } } for (int i = 0; i < cumuls_.size(); ++i) { - if (dimension.HasCumulVarSoftUpperBound(i)) { - has_cumul_soft_bounds = true; - cumul_soft_bounds_[i].bound = dimension.GetCumulVarSoftUpperBound(i); - cumul_soft_bounds_[i].coefficient = - dimension.GetCumulVarSoftUpperBoundCoefficient(i); - } - if (dimension.HasCumulVarSoftLowerBound(i)) { - has_cumul_soft_lower_bounds = true; - cumul_soft_lower_bounds_[i].bound = - dimension.GetCumulVarSoftLowerBound(i); - cumul_soft_lower_bounds_[i].coefficient = - dimension.GetCumulVarSoftLowerBoundCoefficient(i); - } - if (dimension.HasCumulVarPiecewiseLinearCost(i)) { - has_cumul_piecewise_linear_costs = true; - cumul_piecewise_linear_costs_[i] = - dimension.GetCumulVarPiecewiseLinearCost(i); - } IntVar* const cumul_var = cumuls_[i]; if (cumul_var->Min() > 0 || cumul_var->Max() < std::numeric_limits::max()) { has_cumul_hard_bounds = true; } } - if (!has_cumul_soft_bounds) { - cumul_soft_bounds_.clear(); - } - if (!has_cumul_soft_lower_bounds) { - cumul_soft_lower_bounds_.clear(); - } - if (!has_cumul_piecewise_linear_costs) { - cumul_piecewise_linear_costs_.clear(); - } if (!has_cumul_hard_bounds) { // Slacks don't need to be constrained if the cumuls don't have hard bounds; // therefore we can ignore the vehicle span/slack cost coefficient (note @@ -1505,20 +1568,12 @@ PathCumulFilter::PathCumulFilter(const RoutingModel& routing_model, start_to_vehicle_.resize(Size(), -1); for (int i = 0; i < routing_model.vehicles(); ++i) { start_to_vehicle_[routing_model.Start(i)] = i; - evaluators_[i] = &dimension.transit_evaluator(i); } const std::vector& node_precedences = dimension.GetNodePrecedences(); if (!node_precedences.empty()) { current_min_max_node_cumuls_.resize(cumuls_.size(), {-1, -1}); - node_index_to_precedences_.resize(cumuls_.size()); - for (const auto& node_precedence : node_precedences) { - node_index_to_precedences_[node_precedence.first_node].push_back( - node_precedence); - node_index_to_precedences_[node_precedence.second_node].push_back( - node_precedence); - } } #ifndef NDEBUG @@ -1533,9 +1588,9 @@ PathCumulFilter::PathCumulFilter(const RoutingModel& routing_model, int64_t PathCumulFilter::GetCumulSoftCost(int64_t node, int64_t cumul_value) const { - if (node < cumul_soft_bounds_.size()) { - const int64_t bound = cumul_soft_bounds_[node].bound; - const int64_t coefficient = cumul_soft_bounds_[node].coefficient; + if (node < cumul_soft_upper_bounds_.size()) { + const int64_t bound = cumul_soft_upper_bounds_[node].bound; + const int64_t coefficient = cumul_soft_upper_bounds_[node].coefficient; if (coefficient > 0 && bound < cumul_value) { return CapProd(CapSub(cumul_value, bound), coefficient); } @@ -4223,11 +4278,27 @@ bool LightVehicleBreaksChecker::Check() const { CapAddTo(br.duration_min, &lb_span_tw); } } - if (!data.span.SetMin(lb_span_tw)) return false; - if (!data.start_cumul.SetMax(CapSub(data.end_cumul.Max(), lb_span_tw))) { + int64_t lb_span_interbreak = 0; + for (const auto& [max_interbreak, min_break_duration] : + data.interbreak_limits) { + // Minimal number of breaks depends on total transit: + // 0 breaks for 0 <= total transit <= limit, + // 1 break for limit + 1 <= total transit <= 2 * limit, + // i breaks for i * limit + 1 <= total transit <= (i+1) * limit, ... + if (total_transit == 0) continue; + if (max_interbreak == 0) return false; + const int min_num_breaks = (total_transit - 1) / max_interbreak; + if (min_num_breaks > data.vehicle_breaks.size()) return false; + lb_span_interbreak = std::max( + lb_span_interbreak, CapProd(min_num_breaks, min_break_duration)); + } + lb_span_interbreak = CapAdd(lb_span_interbreak, total_transit); + const int64_t lb_span = std::max(lb_span_tw, lb_span_interbreak); + if (!data.span.SetMin(lb_span)) return false; + if (!data.start_cumul.SetMax(CapSub(data.end_cumul.Max(), lb_span))) { return false; } - if (!data.end_cumul.SetMin(CapAdd(data.start_cumul.Min(), lb_span_tw))) { + if (!data.end_cumul.SetMin(CapAdd(data.start_cumul.Min(), lb_span))) { return false; } } diff --git a/ortools/routing/filters.h b/ortools/routing/filters.h index ee332c8f81..2a7bd2800b 100644 --- a/ortools/routing/filters.h +++ b/ortools/routing/filters.h @@ -19,7 +19,6 @@ #include #include #include -#include #include #include @@ -40,6 +39,31 @@ namespace operations_research::routing { // A vector that allows to revert back to a previously committed state, // get the set of changed indices, and get current and committed values. +template +class CommittableValue { + public: + explicit CommittableValue(const T& value) + : current_(value), committed_(value) {} + + const T& Get() const { return current_; } + const T& GetCommitted() const { return committed_; } + + void Set(const T& value) { current_ = value; } + + void SetAndCommit(const T& value) { + Set(value); + Commit(); + } + + void Revert() { current_ = committed_; } + + void Commit() { committed_ = current_; } + + private: + T current_; + T committed_; +}; + template class CommittableVector { public: @@ -116,6 +140,290 @@ class CommittableVector { SparseBitset changed_; }; +// This class allows to represent a state of dimension values for all paths of +// a vehicle routing problem. Values of interest for each path are: +// - nodes, +// - cumuls (min/max), +// - transit times, +// - sum of transit times since the beginning of the path, +// - span (min/max). +// +// This class can maintain two states at once: a committed state and a current +// state. The current state can be modified by first describing a path p to be +// modified with PushNode() and MakePathFromNewNodes(). Then the dimension +// values of this path can be modified with views returned by MutableXXX() +// methods. +// +// When a set of paths has been modified, the caller can decide to definitely +// change the committed state to the new state, or to revert to the committed +// state. +// +// Operations are meant to be efficient: +// - all path modifications, i.e. PushNode(), MakePathFromNewNodes(), +// MutableXXX(), SetSpan() operations are O(1). +// - Revert() is O(num changed paths). +// - Commit() has two behaviors: +// - if there are less than max_num_committed_elements_ elements in the +// committed state, then Commit() is O(num changed paths). +// - otherwise, Commit() does a compaction of the committed state, in +// O(num_nodes + num_paths). +// The amortized cost of Commit(), when taking modifications into account, +// is O(size of changed paths), because all modifications pay at worst +// O(1) for its own compaction. +// +// Note that this class does not support the semantics associated with its +// fields names, for instance it does not make sure that cumul_min <= cumul_max. +// The field names are meant for readability for the user. +// However, path sizes are enforced: if a path has n nodes, then it has +// n fields for cumul min/max, n for transit_sums, and max(0, n-1) for transits. +class DimensionValues { + public: + DimensionValues(int num_paths, int num_nodes) + : range_of_path_(num_paths, {.begin = 0, .end = 0}), + committed_range_of_path_(num_paths, {.begin = 0, .end = 0}), + span_min_(num_paths, 0), + span_max_(num_paths, kint64max), + changed_paths_(num_paths), + max_num_committed_elements_(16 * num_nodes) { + nodes_.reserve(max_num_committed_elements_); + transit_.reserve(max_num_committed_elements_); + transit_sum_.reserve(max_num_committed_elements_); + cumul_min_.reserve(max_num_committed_elements_); + cumul_max_.reserve(max_num_committed_elements_); + } + + // Adds a node to new nodes. + void PushNode(int node) { nodes_.push_back(node); } + + // Turns new nodes into a new path, allocating dimension values for it. + void MakePathFromNewNodes(int path) { + DCHECK_GE(path, 0); + DCHECK_LT(path, range_of_path_.size()); + DCHECK(!changed_paths_[path]); + range_of_path_[path] = {.begin = num_current_elements_, + .end = nodes_.size()}; + changed_paths_.Set(path); + // Allocate dimension values. We allocate n cells for all dimension values, + // even transits, so they can all be indexed by the same range_of_path. + transit_.resize(nodes_.size(), 0); + transit_sum_.resize(nodes_.size(), 0); + cumul_min_.resize(nodes_.size(), kint64min); + cumul_max_.resize(nodes_.size(), kint64max); + num_current_elements_ = nodes_.size(); + span_min_.Set(path, 0); + span_max_.Set(path, kint64max); + } + + // Resets all path to empty, in both committed and current state. + void Reset() { + const int num_paths = range_of_path_.size(); + range_of_path_.assign(num_paths, {.begin = 0, .end = 0}); + committed_range_of_path_.assign(num_paths, {.begin = 0, .end = 0}); + changed_paths_.SparseClearAll(); + num_current_elements_ = 0; + num_committed_elements_ = 0; + nodes_.clear(); + transit_.clear(); + transit_sum_.clear(); + cumul_min_.clear(); + cumul_max_.clear(); + span_min_.Revert(); + span_min_.SetAllAndCommit(0); + span_max_.Revert(); + span_max_.SetAllAndCommit(kint64max); + } + + // Clears the changed state, make it point to the committed state. + void Revert() { + for (const int path : changed_paths_.PositionsSetAtLeastOnce()) { + range_of_path_[path] = committed_range_of_path_[path]; + } + changed_paths_.SparseClearAll(); + num_current_elements_ = num_committed_elements_; + nodes_.resize(num_current_elements_); + transit_.resize(num_current_elements_); + transit_sum_.resize(num_current_elements_); + cumul_min_.resize(num_current_elements_); + cumul_max_.resize(num_current_elements_); + span_min_.Revert(); + span_max_.Revert(); + } + + // Makes the committed state point to the current state. + // If the state representation is too large, reclaims memory by compacting + // the committed state. + void Commit() { + for (const int path : changed_paths_.PositionsSetAtLeastOnce()) { + committed_range_of_path_[path] = range_of_path_[path]; + } + changed_paths_.SparseClearAll(); + num_committed_elements_ = num_current_elements_; + span_min_.Commit(); + span_max_.Commit(); + // If the committed data would take too much space, compact the data: + // copy committed data to the end of vectors, erase old data, refresh + // indexing (range_of_path_). + if (num_current_elements_ <= max_num_committed_elements_) return; + temp_nodes_.clear(); + temp_transit_.clear(); + temp_transit_sum_.clear(); + temp_cumul_min_.clear(); + temp_cumul_max_.clear(); + for (int path = 0; path < range_of_path_.size(); ++path) { + if (committed_range_of_path_[path].Size() == 0) continue; + const size_t new_begin = temp_nodes_.size(); + const auto [begin, end] = committed_range_of_path_[path]; + temp_nodes_.insert(temp_nodes_.end(), nodes_.begin() + begin, + nodes_.begin() + end); + temp_transit_.insert(temp_transit_.end(), transit_.begin() + begin, + transit_.begin() + end); + temp_transit_sum_.insert(temp_transit_sum_.end(), + transit_sum_.begin() + begin, + transit_sum_.begin() + end); + temp_cumul_min_.insert(temp_cumul_min_.end(), cumul_min_.begin() + begin, + cumul_min_.begin() + end); + temp_cumul_max_.insert(temp_cumul_max_.end(), cumul_max_.begin() + begin, + cumul_max_.begin() + end); + committed_range_of_path_[path] = {.begin = new_begin, + .end = temp_nodes_.size()}; + } + std::swap(nodes_, temp_nodes_); + std::swap(transit_, temp_transit_); + std::swap(transit_sum_, temp_transit_sum_); + std::swap(cumul_min_, temp_cumul_min_); + std::swap(cumul_max_, temp_cumul_max_); + range_of_path_ = committed_range_of_path_; + num_committed_elements_ = nodes_.size(); + num_current_elements_ = nodes_.size(); + } + + // Returns a const view of the nodes of the path, in the committed state. + absl::Span CommittedNodes(int path) const { + const auto [begin, end] = committed_range_of_path_[path]; + return absl::MakeConstSpan(nodes_.data() + begin, nodes_.data() + end); + } + + // Returns a const view of the nodes of the path, in the current state. + absl::Span Nodes(int path) const { + const auto [begin, end] = range_of_path_[path]; + return absl::MakeConstSpan(nodes_.data() + begin, nodes_.data() + end); + } + + // Returns a const view of the transits of the path, in the current state. + absl::Span Transits(int path) const { + auto [begin, end] = range_of_path_[path]; + // When the path is not empty, #transits = #nodes - 1. + // When the path is empty, begin = end, return empty span. + if (begin < end) --end; + return absl::MakeConstSpan(transit_.data() + begin, transit_.data() + end); + } + + // Returns a mutable view of the transits of the path, in the current state. + absl::Span MutableTransits(int path) { + auto [begin, end] = range_of_path_[path]; + // When the path is not empty, #transits = #nodes - 1. + // When the path is empty, begin = end, return empty span. + if (begin < end) --end; + return absl::MakeSpan(transit_.data() + begin, transit_.data() + end); + } + + // Returns a const view of the transits sums of the path, in the current + // state. + absl::Span TransitSums(int path) const { + const auto [begin, end] = range_of_path_[path]; + return absl::MakeConstSpan(transit_sum_.data() + begin, + transit_sum_.data() + end); + } + + // Returns a mutable view of the transits sums of the path, in the current + // state. + absl::Span MutableTransitSums(int path) { + const auto [begin, end] = range_of_path_[path]; + return absl::MakeSpan(transit_sum_.data() + begin, + transit_sum_.data() + end); + } + + // Returns a const view of the cumul mins of the path, in the current state. + absl::Span CumulMins(int path) const { + const auto [begin, end] = range_of_path_[path]; + return absl::MakeConstSpan(cumul_min_.data() + begin, + cumul_min_.data() + end); + } + + // Returns a mutable view of the cumul mins of the path, in the current state. + absl::Span MutableCumulMins(int path) { + const auto [begin, end] = range_of_path_[path]; + return absl::MakeSpan(cumul_min_.data() + begin, cumul_min_.data() + end); + } + + // Returns a const view of the cumul maxs of the path, in the current state. + absl::Span CumulMaxs(int path) const { + const auto [begin, end] = range_of_path_[path]; + return absl::MakeConstSpan(cumul_max_.data() + begin, + cumul_max_.data() + end); + } + + // Returns a mutable view of the cumul maxs of the path, in the current state. + absl::Span MutableCumulMaxs(int path) { + const auto [begin, end] = range_of_path_[path]; + return absl::MakeSpan(cumul_max_.data() + begin, cumul_max_.data() + end); + } + + // Returns the min span of the path, in the current state. + int64_t SpanMin(int path) const { return span_min_.Get(path); } + // Returns the max span of the path in the current state. + int64_t SpanMax(int path) const { return span_max_.Get(path); } + // Sets the min span of the path, in the current state. + void SetSpanMin(int path, int64_t value) { span_min_.Set(path, value); } + // Sets the max span of the path, in the current state. + void SetSpanMax(int path, int64_t value) { span_max_.Set(path, value); } + + // Returns the number of nodes of the path, in the current state. + int NumNodes(int path) const { return range_of_path_[path].Size(); } + // Returns a const view of the set of paths changed, in the current state. + absl::Span ChangedPaths() const { + return absl::MakeConstSpan(changed_paths_.PositionsSetAtLeastOnce()); + } + // Returns whether the given path was changed, in the current state. + bool PathHasChanged(int path) const { return changed_paths_[path]; } + + private: + // These vectors hold the data of both committed and current states. + // The ranges below determine which indices are associated to each path and + // each state. + std::vector nodes_; + std::vector transit_; + std::vector transit_sum_; + std::vector cumul_min_; + std::vector cumul_max_; + std::vector temp_nodes_; + std::vector temp_transit_; + std::vector temp_transit_sum_; + std::vector temp_cumul_min_; + std::vector temp_cumul_max_; + // A path has a range of indices in the committed state and another one in the + // current state. + struct Range { + size_t begin = 0; + size_t end = 0; + int Size() const { return end - begin; } + }; + std::vector range_of_path_; + std::vector committed_range_of_path_; + // Associates span to each path. + CommittableVector span_min_; + CommittableVector span_max_; + // Stores whether each path has been changed since last committed state. + SparseBitset changed_paths_; + // Threshold for the size of the committed vector. This is purely heuristic: + // it should be more than the number of nodes so compactions do not occur at + // each submit, but ranges should not be too far apart to avoid cache misses. + const size_t max_num_committed_elements_; + // This locates the start of new nodes. + size_t num_current_elements_ = 0; + size_t num_committed_elements_ = 0; +}; + /// Returns a filter tracking route constraints. IntVarLocalSearchFilter* MakeRouteConstraintFilter( const RoutingModel& routing_model); @@ -646,9 +954,14 @@ class LightVehicleBreaksChecker { bool is_performed_min; bool is_performed_max; }; + struct InterbreakLimit { + int64_t max_interbreak_duration; + int64_t min_break_duration; + }; struct PathData { std::vector vehicle_breaks; + std::vector interbreak_limits; LocalSearchState::Variable start_cumul; LocalSearchState::Variable end_cumul; LocalSearchState::Variable total_transit; diff --git a/ortools/routing/ils.cc b/ortools/routing/ils.cc index 8af8cbb820..7727b23161 100644 --- a/ortools/routing/ils.cc +++ b/ortools/routing/ils.cc @@ -27,6 +27,7 @@ #include "absl/functional/bind_front.h" #include "absl/log/check.h" #include "absl/time/time.h" +#include "absl/types/span.h" #include "google/protobuf/repeated_ptr_field.h" #include "ortools/base/logging.h" #include "ortools/base/protoutil.h" @@ -168,7 +169,7 @@ class SingleRandomCompositionStrategy // Returns a composition strategy based on the input parameters. std::unique_ptr MakeRuinCompositionStrategy( - const std::vector>& ruins, + absl::Span> ruins, RuinCompositionStrategy::Value composition_strategy, std::mt19937* rnd) { std::vector ruin_ptrs; ruin_ptrs.reserve(ruins.size()); diff --git a/ortools/util/BUILD.bazel b/ortools/util/BUILD.bazel index 38c67dd3ad..420738a84b 100644 --- a/ortools/util/BUILD.bazel +++ b/ortools/util/BUILD.bazel @@ -130,6 +130,7 @@ cc_library( ":saturated_arithmetic", "//ortools/base", "//ortools/base:dump_vars", + "//ortools/base:mathutil", "//ortools/base:types", "@com_google_absl//absl/container:btree", "@com_google_absl//absl/strings", From 4d2dfae43e5635f535460c3c4b926deeb04c25b2 Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Mon, 30 Sep 2024 15:30:44 +0200 Subject: [PATCH 036/105] bazel: don't use native rules for samples --- ortools/algorithms/samples/code_samples.bzl | 8 +++++--- ortools/constraint_solver/samples/code_samples.bzl | 6 ++++-- ortools/glop/samples/code_samples.bzl | 6 ++++-- ortools/graph/samples/code_samples.bzl | 8 +++++--- ortools/linear_solver/samples/code_samples.bzl | 8 +++++--- ortools/pdlp/samples/code_samples.bzl | 6 ++++-- ortools/routing/samples/code_samples.bzl | 6 ++++-- ortools/sat/samples/code_samples.bzl | 5 +++-- 8 files changed, 34 insertions(+), 19 deletions(-) diff --git a/ortools/algorithms/samples/code_samples.bzl b/ortools/algorithms/samples/code_samples.bzl index 5f8039a325..8960d993a1 100644 --- a/ortools/algorithms/samples/code_samples.bzl +++ b/ortools/algorithms/samples/code_samples.bzl @@ -14,10 +14,12 @@ """Helper macro to compile and test code samples.""" load("@pip_deps//:requirements.bzl", "requirement") +load("@rules_cc//cc:defs.bzl", "cc_binary", "cc_test") +load("@rules_java//java:defs.bzl", "java_test") load("@rules_python//python:defs.bzl", "py_binary", "py_test") def code_sample_cc(name): - native.cc_binary( + cc_binary( name = name + "_cc", srcs = [name + ".cc"], deps = [ @@ -25,7 +27,7 @@ def code_sample_cc(name): ], ) - native.cc_test( + cc_test( name = name + "_cc_test", size = "small", srcs = [name + ".cc"], @@ -68,7 +70,7 @@ def code_sample_cc_py(name): code_sample_py(name = name) def code_sample_java(name): - native.java_test( + java_test( name = name + "_java_test", size = "small", srcs = [name + ".java"], diff --git a/ortools/constraint_solver/samples/code_samples.bzl b/ortools/constraint_solver/samples/code_samples.bzl index 8939670c67..9faaeca55a 100644 --- a/ortools/constraint_solver/samples/code_samples.bzl +++ b/ortools/constraint_solver/samples/code_samples.bzl @@ -13,8 +13,10 @@ """Helper macro to compile and test code samples.""" +load("@rules_cc//cc:defs.bzl", "cc_binary", "cc_test") + def code_sample_cc(name): - native.cc_binary( + cc_binary( name = name + "_cc", srcs = [name + ".cc"], deps = [ @@ -23,7 +25,7 @@ def code_sample_cc(name): ], ) - native.cc_test( + cc_test( name = name + "_cc_test", size = "small", srcs = [name + ".cc"], diff --git a/ortools/glop/samples/code_samples.bzl b/ortools/glop/samples/code_samples.bzl index f0d55d3b0d..f2163edba6 100644 --- a/ortools/glop/samples/code_samples.bzl +++ b/ortools/glop/samples/code_samples.bzl @@ -13,8 +13,10 @@ """Helper macro to compile and test code samples.""" +load("@rules_cc//cc:defs.bzl", "cc_binary", "cc_test") + def code_sample_cc(name): - native.cc_binary( + cc_binary( name = name + "_cc", srcs = [name + ".cc"], deps = [ @@ -24,7 +26,7 @@ def code_sample_cc(name): ], ) - native.cc_test( + cc_test( name = name + "_cc_test", size = "small", srcs = [name + ".cc"], diff --git a/ortools/graph/samples/code_samples.bzl b/ortools/graph/samples/code_samples.bzl index cda07eb817..fcd019c54e 100644 --- a/ortools/graph/samples/code_samples.bzl +++ b/ortools/graph/samples/code_samples.bzl @@ -14,10 +14,12 @@ """Helper macro to compile and test code samples.""" load("@pip_deps//:requirements.bzl", "requirement") +load("@rules_cc//cc:defs.bzl", "cc_binary", "cc_test") +load("@rules_java//java:defs.bzl", "java_test") load("@rules_python//python:defs.bzl", "py_binary", "py_test") def code_sample_cc(name): - native.cc_binary( + cc_binary( name = name + "_cc", srcs = [name + ".cc"], deps = [ @@ -38,7 +40,7 @@ def code_sample_cc(name): ], ) - native.cc_test( + cc_test( name = name + "_cc_test", size = "small", srcs = [name + ".cc"], @@ -98,7 +100,7 @@ def code_sample_cc_py(name): code_sample_py(name = name) def code_sample_java(name): - native.java_test( + java_test( name = name + "_java_test", size = "small", srcs = [name + ".java"], diff --git a/ortools/linear_solver/samples/code_samples.bzl b/ortools/linear_solver/samples/code_samples.bzl index aab743a690..d91202efec 100644 --- a/ortools/linear_solver/samples/code_samples.bzl +++ b/ortools/linear_solver/samples/code_samples.bzl @@ -14,10 +14,12 @@ """Helper macro to compile and test code samples.""" load("@pip_deps//:requirements.bzl", "requirement") +load("@rules_cc//cc:defs.bzl", "cc_binary", "cc_test") +load("@rules_java//java:defs.bzl", "java_test") load("@rules_python//python:defs.bzl", "py_binary", "py_test") def code_sample_cc(name): - native.cc_binary( + cc_binary( name = name + "_cc", srcs = [name + ".cc"], deps = [ @@ -28,7 +30,7 @@ def code_sample_cc(name): ], ) - native.cc_test( + cc_test( name = name + "_cc_test", size = "small", srcs = [name + ".cc"], @@ -78,7 +80,7 @@ def code_sample_py(name): ) def code_sample_java(name): - native.java_test( + java_test( name = name + "_java_test", size = "small", srcs = [name + ".java"], diff --git a/ortools/pdlp/samples/code_samples.bzl b/ortools/pdlp/samples/code_samples.bzl index 12d7480e28..9dc8cab8e8 100644 --- a/ortools/pdlp/samples/code_samples.bzl +++ b/ortools/pdlp/samples/code_samples.bzl @@ -13,8 +13,10 @@ """Helper macro to compile and test code samples.""" +load("@rules_cc//cc:defs.bzl", "cc_binary", "cc_test") + def code_sample_cc(name): - native.cc_binary( + cc_binary( name = name + "_cc", srcs = [name + ".cc"], deps = [ @@ -28,7 +30,7 @@ def code_sample_cc(name): ], ) - native.cc_test( + cc_test( name = name + "_cc_test", size = "small", srcs = [name + ".cc"], diff --git a/ortools/routing/samples/code_samples.bzl b/ortools/routing/samples/code_samples.bzl index 88b102fe00..85c2e9e7c7 100644 --- a/ortools/routing/samples/code_samples.bzl +++ b/ortools/routing/samples/code_samples.bzl @@ -13,8 +13,10 @@ """Helper macro to compile and test code samples.""" +load("@rules_cc//cc:defs.bzl", "cc_binary", "cc_test") + def code_sample_cc(name): - native.cc_binary( + cc_binary( name = name + "_cc", srcs = [name + ".cc"], deps = [ @@ -24,7 +26,7 @@ def code_sample_cc(name): ], ) - native.cc_test( + cc_test( name = name + "_cc_test", size = "small", srcs = [name + ".cc"], diff --git a/ortools/sat/samples/code_samples.bzl b/ortools/sat/samples/code_samples.bzl index 2d1e4afb84..48764a31c4 100644 --- a/ortools/sat/samples/code_samples.bzl +++ b/ortools/sat/samples/code_samples.bzl @@ -13,9 +13,10 @@ """Helper macro to compile and test code samples.""" -load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_test") +load("@io_bazel_rules_go//go:def.bzl", "go_test") load("@pip_deps//:requirements.bzl", "requirement") load("@rules_cc//cc:defs.bzl", "cc_binary", "cc_test") +load("@rules_java//java:defs.bzl", "java_test") load("@rules_python//python:defs.bzl", "py_binary", "py_test") def code_sample_cc(name): @@ -100,7 +101,7 @@ def code_sample_cc_py(name): code_sample_py(name = name) def code_sample_java(name): - native.java_test( + java_test( name = name + "_java_test", size = "small", srcs = [name + ".java"], From b3994a28022f0328fbe85af435c4dd705b0ff994 Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Mon, 30 Sep 2024 15:52:34 +0200 Subject: [PATCH 037/105] fixup indent --- ortools/base/sysinfo.cc | 11 +++++++---- ortools/flatzinc/presolve.cc | 2 +- ortools/util/zvector.h | 4 ++-- 3 files changed, 10 insertions(+), 7 deletions(-) diff --git a/ortools/base/sysinfo.cc b/ortools/base/sysinfo.cc index 4418c3dc0e..bff963d519 100644 --- a/ortools/base/sysinfo.cc +++ b/ortools/base/sysinfo.cc @@ -17,7 +17,8 @@ #if defined(__APPLE__) && defined(__GNUC__) // MacOS #include #include -#elif (defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__)) // [Free,Net,Open]BSD +#elif (defined(__FreeBSD__) || defined(__NetBSD__) || \ + defined(__OpenBSD__)) // [Free,Net,Open]BSD #include #include // Windows @@ -48,8 +49,9 @@ int64_t GetProcessMemoryUsage() { int64_t resident_memory = t_info.resident_size; return resident_memory; } -#elif defined(__GNUC__) && !defined(__FreeBSD__) && !defined(__NetBSD__) && !defined(__OpenBSD__) && \ - !defined(__EMSCRIPTEN__) && !defined(_WIN32) // Linux +#elif defined(__GNUC__) && !defined(__FreeBSD__) && !defined(__NetBSD__) && \ + !defined(__OpenBSD__) && !defined(__EMSCRIPTEN__) && \ + !defined(_WIN32) // Linux int64_t GetProcessMemoryUsage() { unsigned size = 0; char buf[30]; @@ -61,7 +63,8 @@ int64_t GetProcessMemoryUsage() { fclose(pf); return int64_t{1024} * size; } -#elif (defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__)) // [Free,Net,Open]BSD +#elif (defined(__FreeBSD__) || defined(__NetBSD__) || \ + defined(__OpenBSD__)) // [Free,Net,Open]BSD int64_t GetProcessMemoryUsage() { int who = RUSAGE_SELF; struct rusage rusage; diff --git a/ortools/flatzinc/presolve.cc b/ortools/flatzinc/presolve.cc index fb98500645..7b5984dee4 100644 --- a/ortools/flatzinc/presolve.cc +++ b/ortools/flatzinc/presolve.cc @@ -183,7 +183,7 @@ bool IsIncreasingAndContiguous(absl::Span values) { return true; } -bool AreOnesFollowedByMinusOne(const std::vector& coeffs) { +bool AreOnesFollowedByMinusOne(absl::Span coeffs) { CHECK(!coeffs.empty()); for (int i = 0; i < coeffs.size() - 1; ++i) { if (coeffs[i] != 1) { diff --git a/ortools/util/zvector.h b/ortools/util/zvector.h index e5e51f71ca..cf0cf9322b 100644 --- a/ortools/util/zvector.h +++ b/ortools/util/zvector.h @@ -14,8 +14,8 @@ #ifndef OR_TOOLS_UTIL_ZVECTOR_H_ #define OR_TOOLS_UTIL_ZVECTOR_H_ -#if (defined(__APPLE__) || \ - defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__)) && \ +#if (defined(__APPLE__) || defined(__FreeBSD__) || defined(__NetBSD__) || \ + defined(__OpenBSD__)) && \ defined(__GNUC__) #include #elif !defined(_MSC_VER) && !defined(__MINGW32__) && !defined(__MINGW64__) From f33c0faeb788ce44c44dbb2d18a5dceffb350341 Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Wed, 18 Sep 2024 17:27:37 +0200 Subject: [PATCH 038/105] graph: enable more tests in bazel --- ortools/graph/BUILD.bazel | 98 ++++++++++++++++++++++++++ ortools/graph/bounded_dijkstra_test.cc | 6 +- ortools/graph/cliques_test.cc | 48 ++++--------- ortools/graph/k_shortest_paths_test.cc | 4 +- ortools/graph/min_cost_flow_test.cc | 2 +- ortools/graph/test_util.h | 65 +++++++++++++++++ 6 files changed, 181 insertions(+), 42 deletions(-) create mode 100644 ortools/graph/test_util.h diff --git a/ortools/graph/BUILD.bazel b/ortools/graph/BUILD.bazel index 824d6cb89b..0d1be0e5a5 100644 --- a/ortools/graph/BUILD.bazel +++ b/ortools/graph/BUILD.bazel @@ -76,6 +76,25 @@ cc_library( ], ) +cc_test( + name = "bounded_dijkstra_test", + size = "small", + srcs = ["bounded_dijkstra_test.cc"], + deps = [ + ":bounded_dijkstra", + ":graph", + ":io", + ":test_util", + "//ortools/base:dump_vars", + "//ortools/base:gmock_main", + "//ortools/util:flat_matrix", + "@com_google_absl//absl/log:check", + "@com_google_absl//absl/random", + "@com_google_absl//absl/random:distributions", + "@com_google_benchmark//:benchmark", + ], +) + cc_library( name = "multi_dijkstra", hdrs = ["multi_dijkstra.h"], @@ -152,6 +171,25 @@ cc_library( ], ) +cc_test( + name = "cliques_test", + size = "medium", + srcs = ["cliques_test.cc"], + deps = [ + ":cliques", + "//ortools/base:gmock_main", + "//ortools/base:mathutil", + "//ortools/util:time_limit", + "@com_google_absl//absl/container:flat_hash_set", + "@com_google_absl//absl/functional:bind_front", + "@com_google_absl//absl/log:check", + "@com_google_absl//absl/random:distributions", + "@com_google_absl//absl/strings", + "@com_google_absl//absl/types:span", + "@com_google_benchmark//:benchmark", + ], +) + cc_library( name = "hamiltonian_path", hdrs = ["hamiltonian_path.h"], @@ -271,6 +309,20 @@ cc_library( ], ) +cc_test( + name = "one_tree_lower_bound_test", + size = "medium", + srcs = ["one_tree_lower_bound_test.cc"], + deps = [ + ":one_tree_lower_bound", + "//ortools/base:gmock_main", + "//ortools/base:path", + "//ortools/base:types", + "//ortools/routing/parsers:tsplib_parser", + "@com_google_absl//absl/types:span", + ], +) + cc_library( name = "ebert_graph", hdrs = ["ebert_graph.h"], @@ -355,6 +407,23 @@ cc_library( ], ) +cc_test( + name = "k_shortest_paths_test", + srcs = ["k_shortest_paths_test.cc"], + deps = [ + ":graph", + ":io", + ":k_shortest_paths", + ":shortest_paths", + "//ortools/base:gmock_main", + "@com_google_absl//absl/algorithm:container", + "@com_google_absl//absl/log:check", + "@com_google_absl//absl/random:distributions", + "@com_google_absl//absl/strings", + "@com_google_benchmark//:benchmark", + ], +) + # Flow problem protobuf representation proto_library( name = "flow_problem_proto", @@ -438,6 +507,25 @@ cc_library( ], ) +cc_test( + name = "min_cost_flow_test", + size = "medium", + srcs = ["min_cost_flow_test.cc"], + deps = [ + ":graphs", + ":min_cost_flow", + "//ortools/base:gmock_main", + "@com_google_absl//absl/random:distributions", + "@com_google_absl//absl/strings:str_format", + "@com_google_absl//absl/types:span", + "@com_google_benchmark//:benchmark", + ":graph", + # Using CLP because GLOP is too slow in non-opt mode. + "//ortools/algorithms:binary_search", + "//ortools/linear_solver", + ], +) + # Flow-problem solver cc_binary( name = "solve_flow_model", @@ -748,3 +836,13 @@ cc_library( "@com_google_absl//absl/container:inlined_vector", ], ) + +cc_library( + name = "test_util", + hdrs = ["test_util.h"], + deps = [ + ":graph", + "//ortools/base:types", + "@com_google_absl//absl/memory", + ], +) diff --git a/ortools/graph/bounded_dijkstra_test.cc b/ortools/graph/bounded_dijkstra_test.cc index d35805705b..a2622d2248 100644 --- a/ortools/graph/bounded_dijkstra_test.cc +++ b/ortools/graph/bounded_dijkstra_test.cc @@ -28,13 +28,12 @@ #include "absl/random/random.h" #include "benchmark/benchmark.h" #include "gtest/gtest.h" +#include "ortools/base/dump_vars.h" #include "ortools/base/gmock.h" #include "ortools/graph/graph.h" #include "ortools/graph/io.h" #include "ortools/graph/test_util.h" #include "ortools/util/flat_matrix.h" -#include "ortools/util/saturated_arithmetic.h" -#include "util/tuple/dump_vars.h" namespace operations_research { namespace { @@ -683,8 +682,7 @@ TEST(BoundedDisjktraTest, RandomizedStressTest) { EXPECT_EQ(dijkstra.distances()[node], node_min_dist[node]) << node; } ASSERT_FALSE(HasFailure()) - << DUMP_VARS(num_nodes, num_arcs, num_sources, limit, sources, - lengths) + << DUMP_VARS(num_nodes, num_arcs, num_sources, limit, lengths) << "\n With graph:\n" << util::GraphToString(graph, util::PRINT_GRAPH_ARCS); } diff --git a/ortools/graph/cliques_test.cc b/ortools/graph/cliques_test.cc index 623d104ba3..b28bbda188 100644 --- a/ortools/graph/cliques_test.cc +++ b/ortools/graph/cliques_test.cc @@ -33,7 +33,6 @@ #include "gtest/gtest.h" #include "ortools/base/mathutil.h" #include "ortools/util/time_limit.h" -#include "util/functional/to_callback.h" namespace operations_research { namespace { @@ -163,9 +162,7 @@ TEST(BronKerbosch, CompleteGraph) { CliqueReporter reporter; auto callback = absl::bind_front(&CliqueReporter::AppendClique, &reporter); - operations_research::FindCliques( - ::util::functional::ToPermanentCallback(graph), num_nodes, - ::util::functional::ToPermanentCallback(callback)); + operations_research::FindCliques(graph, num_nodes, callback); const std::vector>& all_cliques = reporter.all_cliques(); EXPECT_EQ(1, all_cliques.size()); EXPECT_EQ(num_nodes, all_cliques[0].size()); @@ -243,9 +240,7 @@ TEST(BronKerbosch, EmptyGraph) { CliqueReporter reporter; auto callback = absl::bind_front(&CliqueReporter::AppendClique, &reporter); - operations_research::FindCliques( - ::util::functional::ToPermanentCallback(graph), 10, - ::util::functional::ToPermanentCallback(callback)); + operations_research::FindCliques(graph, 10, callback); const std::vector>& all_cliques = reporter.all_cliques(); EXPECT_EQ(10, all_cliques.size()); for (int i = 0; i < 10; ++i) { @@ -336,9 +331,7 @@ TEST(BronKerbosch, MatchingGraph) { CliqueReporter reporter; auto callback = absl::bind_front(&CliqueReporter::AppendClique, &reporter); - operations_research::FindCliques( - ::util::functional::ToPermanentCallback(graph), 10, - ::util::functional::ToPermanentCallback(callback)); + operations_research::FindCliques(graph, 10, callback); const std::vector>& all_cliques = reporter.all_cliques(); EXPECT_EQ(5, all_cliques.size()); for (int i = 0; i < 5; ++i) { @@ -370,9 +363,7 @@ TEST(BronKerbosch, ModuloGraph5) { CliqueReporter reporter; auto callback = absl::bind_front(&CliqueReporter::AppendClique, &reporter); - operations_research::FindCliques( - ::util::functional::ToPermanentCallback(graph), 40, - ::util::functional::ToPermanentCallback(callback)); + operations_research::FindCliques(graph, 40, callback); const std::vector>& all_cliques = reporter.all_cliques(); EXPECT_EQ(5, all_cliques.size()); for (int i = 0; i < 5; ++i) { @@ -406,9 +397,7 @@ TEST(BronKerbosch, CompleteGraphCover) { CliqueReporter reporter; auto callback = absl::bind_front(&CliqueReporter::AppendClique, &reporter); - operations_research::CoverArcsByCliques( - ::util::functional::ToPermanentCallback(graph), 10, - ::util::functional::ToPermanentCallback(callback)); + operations_research::CoverArcsByCliques(graph, 10, callback); const std::vector>& all_cliques = reporter.all_cliques(); EXPECT_EQ(1, all_cliques.size()); EXPECT_EQ(10, all_cliques[0].size()); @@ -457,9 +446,7 @@ TEST(BronKerbosch, EmptyGraphCover) { CliqueReporter reporter; auto callback = absl::bind_front(&CliqueReporter::AppendClique, &reporter); - operations_research::CoverArcsByCliques( - ::util::functional::ToPermanentCallback(graph), 10, - ::util::functional::ToPermanentCallback(callback)); + operations_research::CoverArcsByCliques(graph, 10, callback); const std::vector>& all_cliques = reporter.all_cliques(); EXPECT_EQ(0, all_cliques.size()); } @@ -469,9 +456,7 @@ TEST(BronKerbosch, MatchingGraphCover) { CliqueReporter reporter; auto callback = absl::bind_front(&CliqueReporter::AppendClique, &reporter); - operations_research::CoverArcsByCliques( - ::util::functional::ToPermanentCallback(graph), 10, - ::util::functional::ToPermanentCallback(callback)); + operations_research::CoverArcsByCliques(graph, 10, callback); const std::vector>& all_cliques = reporter.all_cliques(); EXPECT_EQ(5, all_cliques.size()); for (int i = 0; i < 5; ++i) { @@ -486,9 +471,7 @@ TEST(BronKerbosch, ModuloGraph5Cover) { CliqueReporter reporter; auto callback = absl::bind_front(&CliqueReporter::AppendClique, &reporter); - operations_research::CoverArcsByCliques( - ::util::functional::ToPermanentCallback(graph), 40, - ::util::functional::ToPermanentCallback(callback)); + operations_research::CoverArcsByCliques(graph, 40, callback); const std::vector>& all_cliques = reporter.all_cliques(); EXPECT_EQ(5, all_cliques.size()); for (int i = 0; i < 5; ++i) { @@ -628,9 +611,7 @@ void BM_FindCliquesInModuloGraph(benchmark::State& state) { auto callback = absl::bind_front(&CliqueSizeVerifier::AppendClique, &verifier); - operations_research::FindCliques( - ::util::functional::ToPermanentCallback(graph), kGraphSize, - ::util::functional::ToPermanentCallback(callback)); + operations_research::FindCliques(graph, kGraphSize, callback); } EXPECT_EQ(state.max_iterations * kExpectedNumCliques, verifier.num_cliques()); } @@ -720,10 +701,8 @@ void BM_FindCliquesInFull7PartiteGraph(benchmark::State& state) { auto callback = absl::bind_front(&CliqueSizeVerifier::AppendClique, &verifier); - operations_research::FindCliques( - ::util::functional::ToPermanentCallback(graph), - kNumPartitions * kNumPartitions, - ::util::functional::ToPermanentCallback(callback)); + operations_research::FindCliques(graph, kNumPartitions * kNumPartitions, + callback); } EXPECT_EQ(state.max_iterations * kExpectedNumCliques, verifier.num_cliques()); } @@ -733,8 +712,7 @@ BENCHMARK(BM_FindCliquesInFull7PartiteGraph); void BM_FindCliquesInFullKPartiteGraphWithBronKerboschAlgorithm( benchmark::State& state) { int num_partitions = state.range(0); - const int kExpectedNumCliques = - ::MathUtil::IPow(num_partitions, num_partitions); + const int kExpectedNumCliques = std::pow(num_partitions, num_partitions); const int kExpectedCliqueSize = num_partitions; const auto graph = [num_partitions](int index1, int index2) { @@ -764,7 +742,7 @@ void BM_FindCliquesInRandomGraphWithBronKerboschAlgorithm( const double arc_probability = arc_probability_permille / 1000.0; const absl::flat_hash_set> adjacency_matrix = MakeRandomGraphAdjacencyMatrix(num_nodes, arc_probability, - absl::GetFlag(FLAGS_test_random_seed)); + GTEST_FLAG_GET(random_seed)); const auto graph = [adjacency_matrix](int index1, int index2) { return BitmapGraph(adjacency_matrix, index1, index2); }; diff --git a/ortools/graph/k_shortest_paths_test.cc b/ortools/graph/k_shortest_paths_test.cc index f36e743ff4..6daa856975 100644 --- a/ortools/graph/k_shortest_paths_test.cc +++ b/ortools/graph/k_shortest_paths_test.cc @@ -237,8 +237,8 @@ Graph GenerateUniformGraph(URBG&& urbg, const NodeIndexType num_nodes, for (ArcIndexType i = 0; i < std::min(num_edges, max_num_arcs); ++i) { NodeIndexType src, dst; std::tie(src, dst) = pick_two_distinct_nodes(); - if (arcs.contains({src, dst})) continue; - if (IsDirected && arcs.contains({dst, src})) continue; + if (arcs.find({src, dst}) != arcs.end()) continue; + if (IsDirected && (arcs.find({dst, src}) != arcs.end())) continue; arcs.insert({src, dst}); graph.AddArc(src, dst); diff --git a/ortools/graph/min_cost_flow_test.cc b/ortools/graph/min_cost_flow_test.cc index 6556bb1b2b..8ad26727b2 100644 --- a/ortools/graph/min_cost_flow_test.cc +++ b/ortools/graph/min_cost_flow_test.cc @@ -588,7 +588,7 @@ CostValue SolveMinCostFlow(GenericMinCostFlow* min_cost_flow) { template CostValue SolveMinCostFlowWithLP(GenericMinCostFlow* min_cost_flow) { - MPSolver solver("LPSolver", MPSolver::CLP_LINEAR_PROGRAMMING); + MPSolver solver("LPSolver", MPSolver::GLOP_LINEAR_PROGRAMMING); const Graph* graph = min_cost_flow->graph(); const NodeIndex num_nodes = graph->num_nodes(); const ArcIndex num_arcs = graph->num_arcs(); diff --git a/ortools/graph/test_util.h b/ortools/graph/test_util.h new file mode 100644 index 0000000000..0841acfc0f --- /dev/null +++ b/ortools/graph/test_util.h @@ -0,0 +1,65 @@ +// Copyright 2010-2024 Google LLC +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Unit test utilities related to graph.h. + +#ifndef UTIL_GRAPH_TEST_UTIL_H_ +#define UTIL_GRAPH_TEST_UTIL_H_ + +#include +#include + +#include "absl/memory/memory.h" +#include "ortools/base/types.h" +#include "ortools/graph/graph.h" + +namespace util { + +// Generate a 2-dimensional undirected grid graph. +// +// Eg. for width=3, height=2, it generates this: +// 0 <---> 1 <---> 2 +// ^ ^ ^ +// | | | +// v v v +// 3 <---> 4 <---> 5 +template +std::unique_ptr Create2DGridGraph(int64_t width, int64_t height) { + const int64_t num_arcs = 2L * ((width - 1) * height + width * (height - 1)); + auto graph = std::make_unique(/*num_nodes=*/width * height, + /*arc_capacity=*/num_arcs); + // Add horizontal edges. + for (int i = 0; i < height; ++i) { + for (int j = 1; j < width; ++j) { + const int left = i * width + (j - 1); + const int right = i * width + j; + graph->AddArc(left, right); + graph->AddArc(right, left); + } + } + // Add vertical edges. + for (int i = 1; i < height; ++i) { + for (int j = 0; j < width; ++j) { + const int up = (i - 1) * width + j; + const int down = i * width + j; + graph->AddArc(up, down); + graph->AddArc(down, up); + } + } + graph->Build(); + return graph; +} + +} // namespace util + +#endif // UTIL_GRAPH_TEST_UTIL_H_ From 7c154553f2fd1754f7133009470dcb0b762b399f Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Thu, 3 Oct 2024 10:59:28 +0200 Subject: [PATCH 039/105] sat: export tests from google3 --- ortools/base/BUILD.bazel | 10 + ortools/base/parse_test_proto.h | 53 + ortools/sat/2d_packing_brute_force_test.cc | 361 ++++ ortools/sat/BUILD.bazel | 265 +++ ortools/sat/cp_constraints.h | 2 +- ortools/sat/cp_model_checker_test.cc | 666 ++++++++ ortools/sat/cp_model_expand_test.cc | 1793 ++++++++++++++++++++ ortools/sat/cp_model_lns.cc | 47 +- ortools/sat/cp_model_postsolve_test.cc | 347 ++++ ortools/sat/cp_model_search_test.cc | 297 ++++ ortools/sat/cp_model_solver.cc | 2 +- ortools/sat/cuts.cc | 95 +- ortools/sat/flaky_models_test.cc | 101 ++ ortools/sat/integer_expr_test.cc | 1644 ++++++++++++++++++ ortools/sat/linear_model_test.cc | 144 ++ ortools/sat/linear_relaxation_test.cc | 1200 +++++++++++++ ortools/sat/presolve_context_test.cc | 1038 +++++++++++ ortools/sat/presolve_util_test.cc | 513 ++++++ ortools/sat/rins_test.cc | 173 ++ ortools/sat/table_test.cc | 603 +++++++ ortools/sat/var_domination_test.cc | 614 +++++++ 21 files changed, 9909 insertions(+), 59 deletions(-) create mode 100644 ortools/base/parse_test_proto.h create mode 100644 ortools/sat/2d_packing_brute_force_test.cc create mode 100644 ortools/sat/cp_model_checker_test.cc create mode 100644 ortools/sat/cp_model_expand_test.cc create mode 100644 ortools/sat/cp_model_postsolve_test.cc create mode 100644 ortools/sat/cp_model_search_test.cc create mode 100644 ortools/sat/flaky_models_test.cc create mode 100644 ortools/sat/integer_expr_test.cc create mode 100644 ortools/sat/linear_model_test.cc create mode 100644 ortools/sat/linear_relaxation_test.cc create mode 100644 ortools/sat/presolve_context_test.cc create mode 100644 ortools/sat/presolve_util_test.cc create mode 100644 ortools/sat/rins_test.cc create mode 100644 ortools/sat/table_test.cc create mode 100644 ortools/sat/var_domination_test.cc diff --git a/ortools/base/BUILD.bazel b/ortools/base/BUILD.bazel index 3a93dede6f..a74cdf1575 100644 --- a/ortools/base/BUILD.bazel +++ b/ortools/base/BUILD.bazel @@ -410,6 +410,16 @@ cc_library( ], ) +cc_library( + name = "parse_test_proto", + hdrs = ["parse_test_proto.h"], + deps = [ + ":gmock", + "@com_google_absl//absl/log:check", + "@com_google_protobuf//:protobuf", + ], +) + cc_library( name = "path", srcs = ["path.cc"], diff --git a/ortools/base/parse_test_proto.h b/ortools/base/parse_test_proto.h new file mode 100644 index 0000000000..e7979b7c16 --- /dev/null +++ b/ortools/base/parse_test_proto.h @@ -0,0 +1,53 @@ +// Copyright 2010-2024 Google LLC +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef OR_TOOLS_BASE_PARSE_TEST_PROTO_H_ +#define OR_TOOLS_BASE_PARSE_TEST_PROTO_H_ + +#include +#include +#include + +#include "google/protobuf/message.h" +#include "google/protobuf/text_format.h" +#include "gtest/gtest.h" + +namespace google::protobuf::contrib::parse_proto { + +namespace parse_proto_internal { + +class ParseProtoHelper { + public: + explicit ParseProtoHelper(std::string_view asciipb) : asciipb_(asciipb) {} + template + operator T() { // NOLINT(runtime/explicit) + T result; + const bool ok = ::google::protobuf::TextFormat::TextFormat::ParseFromString( + asciipb_, &result); + EXPECT_TRUE(ok) << "Failed to parse text proto: " << asciipb_; + return result; + } + + private: + const std::string asciipb_; +}; + +} // namespace parse_proto_internal + +parse_proto_internal::ParseProtoHelper ParseTestProto(std::string_view input) { + return parse_proto_internal::ParseProtoHelper(input); +} + +} // namespace google::protobuf::contrib::parse_proto + +#endif // OR_TOOLS_BASE_PARSE_TEST_PROTO_H_ diff --git a/ortools/sat/2d_packing_brute_force_test.cc b/ortools/sat/2d_packing_brute_force_test.cc new file mode 100644 index 0000000000..0efbbbeb27 --- /dev/null +++ b/ortools/sat/2d_packing_brute_force_test.cc @@ -0,0 +1,361 @@ +// Copyright 2010-2024 Google LLC +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "ortools/sat/2d_packing_brute_force.h" + +#include +#include +#include +#include +#include +#include + +#include "absl/log/check.h" +#include "absl/random/bit_gen_ref.h" +#include "absl/random/distributions.h" +#include "absl/random/random.h" +#include "absl/types/span.h" +#include "benchmark/benchmark.h" +#include "gtest/gtest.h" +#include "ortools/base/gmock.h" +#include "ortools/sat/cp_model.h" +#include "ortools/sat/cp_model_solver.h" +#include "ortools/sat/diffn_util.h" +#include "ortools/sat/integer.h" + +namespace operations_research { +namespace sat { +namespace { + +std::vector SolveOrthogonalPacking( + absl::Span sizes_x, + absl::Span sizes_y, + std::pair bounding_box_size) { + const int num_items = sizes_x.size(); + CHECK_EQ(num_items, sizes_y.size()); + CHECK_GT(bounding_box_size.first, 0); + CHECK_GT(bounding_box_size.second, 0); + CpModelBuilder cp_model; + NoOverlap2DConstraint no_overlap_2d = cp_model.AddNoOverlap2D(); + std::vector start_x_vars; + std::vector start_y_vars; + for (int item = 0; item < num_items; ++item) { + IntVar start_x = cp_model.NewIntVar( + {0, bounding_box_size.first.value() - sizes_x[item].value()}); + IntVar start_y = cp_model.NewIntVar( + {0, bounding_box_size.second.value() - sizes_y[item].value()}); + start_x_vars.push_back(start_x); + start_y_vars.push_back(start_y); + + IntervalVar interval_x = + cp_model.NewFixedSizeIntervalVar(start_x, sizes_x[item].value()); + IntervalVar interval_y = + cp_model.NewFixedSizeIntervalVar(start_y, sizes_y[item].value()); + + no_overlap_2d.AddRectangle(interval_x, interval_y); + } + SatParameters parameters; + // Disable the propagator so we don't use the code we want to test. + parameters.set_use_area_energetic_reasoning_in_no_overlap_2d(false); + const CpSolverResponse response = + SolveWithParameters(cp_model.Build(), parameters); + if (response.status() != CpSolverStatus::OPTIMAL) { + return {}; + } + std::vector solution; + for (int i = 0; i < num_items; ++i) { + const IntegerValue start_x = + SolutionIntegerValue(response, start_x_vars[i]); + const IntegerValue start_y = + SolutionIntegerValue(response, start_y_vars[i]); + solution.push_back({.x_min = start_x, + .x_max = start_x + sizes_x[i], + .y_min = start_y, + .y_max = start_y + sizes_y[i]}); + } + return solution; +} + +bool CumulativeIsFeasible( + absl::Span sizes_x, + absl::Span sizes_y, + std::pair bounding_box_size, bool both_sides) { + const int num_items = sizes_x.size(); + CHECK_EQ(num_items, sizes_y.size()); + CHECK_GT(bounding_box_size.first, 0); + CHECK_GT(bounding_box_size.second, 0); + + CpModelBuilder cp_model; + CumulativeConstraint cumulative = + cp_model.AddCumulative(bounding_box_size.second.value()); + + for (int item = 0; item < num_items; ++item) { + const IntVar start_time = cp_model.NewIntVar( + {0, bounding_box_size.first.value() - sizes_x[item].value()}); + const IntervalVar start_time_interval_x = + cp_model.NewFixedSizeIntervalVar(start_time, sizes_x[item].value()); + cumulative.AddDemand(start_time_interval_x, sizes_y[item].value()); + } + if (both_sides) { + CumulativeConstraint cumulative_y = + cp_model.AddCumulative(bounding_box_size.first.value()); + for (int item = 0; item < num_items; ++item) { + const IntVar start_time = cp_model.NewIntVar( + {0, bounding_box_size.second.value() - sizes_y[item].value()}); + const IntervalVar start_time_interval_y = + cp_model.NewFixedSizeIntervalVar(start_time, sizes_y[item].value()); + cumulative_y.AddDemand(start_time_interval_y, sizes_x[item].value()); + } + } + const CpSolverResponse response = Solve(cp_model.Build()); + return (response.status() == CpSolverStatus::OPTIMAL); +} + +struct OppProblem { + std::vector items_x_sizes; + std::vector items_y_sizes; + std::pair bb_sizes; +}; + +OppProblem CreateRandomOppProblem(absl::BitGenRef random, int num_items) { + OppProblem result; + std::vector& items_x_sizes = result.items_x_sizes; + std::vector& items_y_sizes = result.items_y_sizes; + const int size = 300; + items_x_sizes.clear(); + items_y_sizes.clear(); + IntegerValue area = 0; + for (int i = 0; i < num_items; ++i) { + const IntegerValue x_size = absl::Uniform(random, 1, size); + const IntegerValue y_size = absl::Uniform(random, 1, size); + items_x_sizes.push_back(x_size); + items_y_sizes.push_back(y_size); + area += x_size * y_size; + } + const IntegerValue box_x_size = + absl::Uniform(random, size, static_cast(sqrt(num_items) * size)); + const IntegerValue box_y_size = + std::max(IntegerValue(size), (area + box_x_size - 1) / box_x_size); + result.bb_sizes = {box_x_size, box_y_size}; + return result; +} + +void CheckSolution(const OppProblem& problem, + absl::Span solution) { + CHECK_EQ(problem.items_x_sizes.size(), solution.size()); + for (const Rectangle& item : solution) { + CHECK_GE(item.x_min, 0); + CHECK_LE(item.x_max, problem.bb_sizes.first); + CHECK_GE(item.y_min, 0); + CHECK_LE(item.y_max, problem.bb_sizes.second); + } + + for (int i = 0; i < problem.items_x_sizes.size(); ++i) { + CHECK_EQ(problem.items_x_sizes[i], solution[i].SizeX()); + CHECK_EQ(problem.items_y_sizes[i], solution[i].SizeY()); + + for (int j = i + 1; j < problem.items_x_sizes.size(); ++j) { + CHECK(solution[i].IsDisjoint(solution[j])) + << " for solution: " + << RenderDot(Rectangle{.x_min = 0, + .x_max = problem.bb_sizes.first, + .y_min = 0, + .y_max = problem.bb_sizes.second}, + solution); + } + } +} + +TEST(CheckSolutionTest, CheckWithCPSat) { + EXPECT_TRUE( + SolveOrthogonalPacking({4, 4, 8, 8}, {6, 6, 5, 5}, {13, 10}).empty()); + EXPECT_FALSE( + SolveOrthogonalPacking({4, 4, 8, 8}, {6, 6, 5, 5}, {12, 12}).empty()); + + absl::BitGen random; + int feasible = 0; + for (int i = 0; i < 1000; ++i) { + const OppProblem problem = CreateRandomOppProblem(random, 6); + const auto brute_force_solution = BruteForceOrthogonalPacking( + problem.items_x_sizes, problem.items_y_sizes, problem.bb_sizes, + problem.items_x_sizes.size()); + CHECK(brute_force_solution.status != BruteForceResult::Status::kTooBig); + feasible += + brute_force_solution.status == BruteForceResult::Status::kFoundSolution; + if (brute_force_solution.status == + BruteForceResult::Status::kFoundSolution) { + CheckSolution(problem, brute_force_solution.positions_for_solution); + } + const auto solution = SolveOrthogonalPacking( + problem.items_x_sizes, problem.items_y_sizes, problem.bb_sizes); + if (!solution.empty()) { + CheckSolution(problem, solution); + } + EXPECT_EQ(brute_force_solution.status == + BruteForceResult::Status::kNoSolutionExists, + solution.empty()); + } + std::cout << "feasible: " << feasible << "\n"; +} + +// Example that is feasible for the cumulative in one dimension but not for 2d +// packing: +// +// digraph { +// graph [ bgcolor=lightgray width=18 height=12] +// node [style=filled] +// bb [fillcolor="grey" pos="9,6!" shape=box width=18 height=12] +// 0 [fillcolor="green" pos="5,2!" shape=box width=10 height=4] +// 1 [fillcolor="purple" pos="13,3!" shape=box width=6 height=6] +// 2 [fillcolor="red" pos="17,4!" shape=box width=2 height=8] +// 3 [fillcolor="blue" pos="13,10!" shape=box width=10 height=4] +// 4 [fillcolor="yellow" pos="5,9!" shape=box width=6 height=6] +// 5 [fillcolor="cyan" pos="1,8!" shape=box width=2 height=8] +// 6 [fillcolor="red" pos="7,5!" shape=box width=10 height=2] +// } +// +// +--------------------------------------------------------+ +// |****** @@@@@@@@@@@@@@@@@@ xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx| +// |****** @@@@@@@@@@@@@@@@@@ xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx| +// |****** @@@@@@@@@@@@@@@@@@ xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx| +// |****** @@@@@@@@@@@@@@@@@@ xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx| +// |****** @@@@@@@@@@@@@@@@@@ xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx| +// |****** @@@@@@@@@@@@@@@@@@ xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx| +// |****** @@@@@@@@@@@@@@@@@@ ++++++ ......| +// |****** @@@@@@@@@@@@@@@@@@ ++++++ ......| +// |****** @@@@@@@@@@@@@@@@@@ ++++++ ......| +// |****** ++++++++++++++++++++++++"""""""""""""""""" ......| <----- +// |****** ++++++++++++++++++++++++"""""""""""""""""" ......| +// |****** ++++++++++++++++++++++++"""""""""""""""""" ......| +// |000000000000000000000000000000 """""""""""""""""" ......| +// |000000000000000000000000000000 """""""""""""""""" ......| +// |000000000000000000000000000000 """""""""""""""""" ......| +// |000000000000000000000000000000 """""""""""""""""" ......| +// |000000000000000000000000000000 """""""""""""""""" ......| +// |000000000000000000000000000000 """""""""""""""""" ......| +// +--------------------------------------------------------+ +// ^ +// | +TEST(CheckSolutionTest, CumulativeFeasiblePackingInfeasibleExample) { + std::vector items_x_sizes = {5, 3, 1, 5, 3, 1, 5}; + std::vector items_y_sizes = {2, 3, 4, 2, 3, 4, 1}; + std::pair bb_sizes = {9, 6}; + + EXPECT_TRUE( + SolveOrthogonalPacking(items_x_sizes, items_y_sizes, bb_sizes).empty()); + EXPECT_TRUE( + CumulativeIsFeasible(items_x_sizes, items_y_sizes, bb_sizes, false)); + // Note that it is infeasible if we take the cumulative on the y though: + EXPECT_FALSE(CumulativeIsFeasible(items_y_sizes, items_x_sizes, + {bb_sizes.second, bb_sizes.first}, false)); +} + +// Example that is feasible for the cumulative in both dimensions but not for 2d +// packing. +// +// digraph { +// graph [ bgcolor=lightgray width=30 height=30] +// node [style=filled] +// bb [fillcolor="grey" pos="15,15!" shape=box width=30 height=30] +// 0 [fillcolor="red" pos="29,17!" shape=box width=2 height=26] +// 1 [fillcolor="green" pos="22,10!" shape=box width=4 height=12] +// 2 [fillcolor="blue" pos="26,14!" shape=box width=4 height=20] +// 3 [fillcolor="cyan" pos="10,7!" shape=box width=20 height=14] +// 4 [fillcolor="yellow" pos="15,19!" shape=box width=14 height=10] +// 5 [fillcolor="purple" pos="26,2!" shape=box width=8 height=4] +// 6 [fillcolor="red" pos="4,23!" shape=box width=8 height=14] +// 7 [fillcolor="green" pos="18,27!" shape=box width=20 height=6] +// } +TEST(CheckSolutionTest, CumulativeFeasiblePackingInfeasibleBothExample) { + std::vector items_x_sizes = {1, 2, 2, 10, 7, 4, 4, 10}; + std::vector items_y_sizes = {13, 6, 10, 7, 5, 2, 7, 3}; + std::pair bb_sizes = {15, 15}; + EXPECT_TRUE( + SolveOrthogonalPacking(items_x_sizes, items_y_sizes, bb_sizes).empty()); + EXPECT_TRUE( + CumulativeIsFeasible(items_x_sizes, items_y_sizes, bb_sizes, true)); +} + +MATCHER_P(FieldEq, field, "") { + return testing::Matches(testing::Field(field, ::testing::get<1>(arg)))( + ::testing::get<0>(arg)); +} + +TEST(TestPreprocessing, Works) { + std::vector bb_sizes = {100, 40}; + OppProblem problem = {.items_x_sizes = {95, 93, 90, 5, 7, 8, 20, 30, 20, 20}, + .items_y_sizes = {5, 5, 6, 8, 2, 4, 10, 10, 10, 10}, + .bb_sizes = {100, 40}}; + std::vector items; + for (int i = 0; i < problem.items_x_sizes.size(); ++i) { + items.push_back({.size_x = problem.items_x_sizes[i], + .size_y = problem.items_y_sizes[i]}); + } + absl::Span preprocessed_items = absl::MakeSpan(items); + EXPECT_TRUE(Preprocess(preprocessed_items, problem.bb_sizes, 10)); + // We expect that 95x5, 93x5, 90x6, 5x8, 7x2 and 8x4 be removed. + EXPECT_EQ(preprocessed_items.size(), 4); + EXPECT_THAT(preprocessed_items, + testing::UnorderedPointwise(FieldEq(&PermutableItem::size_x), + {20, 30, 20, 20})); + // Original items don't disappeared, but the order might have been changed. + EXPECT_THAT(items, + testing::UnorderedPointwise(FieldEq(&PermutableItem::size_x), + problem.items_x_sizes)); +} + +void BM_BruteForceOrthogonalPacking(benchmark::State& state) { + absl::BitGen random; + static constexpr int kNumProblems = 100; + std::vector problems; + const bool feasible = state.range(1); + while (problems.size() < kNumProblems) { + OppProblem problem = CreateRandomOppProblem(random, state.range(0)); + if ((BruteForceOrthogonalPacking(problem.items_x_sizes, + problem.items_y_sizes, problem.bb_sizes, + problem.items_x_sizes.size()) + .status == BruteForceResult::Status::kFoundSolution) == feasible) { + problems.push_back(problem); + } + } + int index = 0; + for (auto s : state) { + const auto& problem = problems[index]; + BruteForceOrthogonalPacking(problem.items_x_sizes, problem.items_y_sizes, + problem.bb_sizes, problem.items_x_sizes.size()); + ++index; + if (index == problems.size()) { + index = 0; + } + } +} + +BENCHMARK(BM_BruteForceOrthogonalPacking) + ->ArgPair(3, false) + ->ArgPair(4, false) + ->ArgPair(5, false) + ->ArgPair(6, false) + ->ArgPair(7, false) + ->ArgPair(8, false) + ->ArgPair(9, false) + ->ArgPair(3, true) + ->ArgPair(4, true) + ->ArgPair(5, true) + ->ArgPair(6, true) + ->ArgPair(7, true) + ->ArgPair(8, true) + ->ArgPair(9, true); + +} // namespace +} // namespace sat +} // namespace operations_research diff --git a/ortools/sat/BUILD.bazel b/ortools/sat/BUILD.bazel index c21059e05e..5e1b5ba3d7 100644 --- a/ortools/sat/BUILD.bazel +++ b/ortools/sat/BUILD.bazel @@ -204,6 +204,20 @@ cc_library( ], ) +cc_test( + name = "cp_model_checker_test", + size = "small", + srcs = ["cp_model_checker_test.cc"], + deps = [ + ":cp_model_cc_proto", + ":cp_model_checker", + "//ortools/base:gmock_main", + "//ortools/base:parse_test_proto", + "@com_google_absl//absl/log:check", + "@com_google_absl//absl/types:span", + ], +) + cc_library( name = "constraint_violation", srcs = ["constraint_violation.cc"], @@ -286,6 +300,24 @@ cc_library( ], ) +cc_test( + name = "linear_model_test", + size = "small", + srcs = ["linear_model_test.cc"], + deps = [ + ":cp_model_cc_proto", + ":cp_model_presolve", + ":linear_model", + ":model", + ":presolve_context", + ":sat_parameters_cc_proto", + "//ortools/base:gmock_main", + "//ortools/base:parse_test_proto", + "//ortools/util:logging", + "@com_google_absl//absl/log", + ], +) + cc_library( name = "parameters_validation", srcs = ["parameters_validation.cc"], @@ -337,6 +369,21 @@ cc_library( ], ) +cc_test( + name = "cp_model_search_test", + size = "small", + srcs = ["cp_model_search_test.cc"], + deps = [ + ":cp_model_cc_proto", + ":cp_model_search", + ":cp_model_solver", + ":model", + ":sat_parameters_cc_proto", + "//ortools/base:gmock_main", + "//ortools/base:parse_test_proto", + ], +) + cc_library( name = "cp_model_solver_helpers", srcs = ["cp_model_solver_helpers.cc"], @@ -651,6 +698,27 @@ cc_library( ], ) +cc_test( + name = "presolve_util_test", + size = "small", + srcs = ["presolve_util_test.cc"], + deps = [ + ":cp_model", + ":cp_model_cc_proto", + ":cp_model_solver", + ":cp_model_utils", + ":presolve_util", + ":sat_parameters_cc_proto", + "//ortools/base:gmock_main", + "//ortools/base:parse_test_proto", + "//ortools/util:sorted_interval_list", + "@com_google_absl//absl/container:flat_hash_set", + "@com_google_absl//absl/log", + "@com_google_absl//absl/random", + "@com_google_absl//absl/types:span", + ], +) + cc_library( name = "presolve_context", srcs = ["presolve_context.cc"], @@ -691,6 +759,25 @@ cc_library( ], ) +cc_test( + name = "presolve_context_test", + size = "small", + srcs = ["presolve_context_test.cc"], + deps = [ + ":cp_model_cc_proto", + ":cp_model_utils", + ":model", + ":presolve_context", + "//ortools/base:gmock_main", + "//ortools/base:parse_test_proto", + "//ortools/base:types", + "//ortools/util:affine_relation", + "//ortools/util:sorted_interval_list", + "@com_google_absl//absl/container:flat_hash_set", + "@com_google_absl//absl/types:span", + ], +) + cc_library( name = "cp_model_presolve", srcs = [ @@ -774,6 +861,20 @@ cc_library( ], ) +cc_test( + name = "cp_model_postsolve_test", + size = "small", + srcs = ["cp_model_postsolve_test.cc"], + deps = [ + ":cp_model_cc_proto", + ":cp_model_postsolve", + "//ortools/base:gmock_main", + "//ortools/base:parse_test_proto", + "//ortools/util:logging", + "//ortools/util:sorted_interval_list", + ], +) + cc_library( name = "cp_model_expand", srcs = ["cp_model_expand.cc"], @@ -802,6 +903,29 @@ cc_library( ], ) +cc_test( + name = "cp_model_expand_test", + size = "small", + srcs = ["cp_model_expand_test.cc"], + deps = [ + ":cp_model_cc_proto", + ":cp_model_checker", + ":cp_model_expand", + ":cp_model_solver", + ":cp_model_utils", + ":model", + ":presolve_context", + ":sat_parameters_cc_proto", + "//ortools/base", + "//ortools/base:container_logging", + "//ortools/base:gmock_main", + "//ortools/base:parse_test_proto", + "//ortools/util:sorted_interval_list", + "@com_google_absl//absl/container:btree", + "@com_google_absl//absl/strings", + ], +) + cc_library( name = "sat_base", hdrs = ["sat_base.h"], @@ -1209,6 +1333,22 @@ cc_library( ], ) +cc_test( + name = "var_domination_test", + size = "small", + srcs = ["var_domination_test.cc"], + deps = [ + ":cp_model_cc_proto", + ":integer", + ":model", + ":presolve_context", + ":var_domination", + "//ortools/base:gmock_main", + "//ortools/base:parse_test_proto", + "//ortools/util:sorted_interval_list", + ], +) + cc_library( name = "integer", srcs = ["integer.cc"], @@ -1491,6 +1631,38 @@ cc_library( ], ) +cc_test( + name = "integer_expr_test", + size = "medium", + srcs = ["integer_expr_test.cc"], + deps = [ + "cp_model_checker", + "cp_model_solver", + ":cp_model_cc_proto", + ":cp_model_utils", + ":integer", + ":integer_expr", + ":linear_constraint", + ":model", + ":sat_base", + ":sat_parameters_cc_proto", + ":sat_solver", + "//ortools/base", + "//ortools/base:gmock_main", + "//ortools/base:parse_test_proto", + "//ortools/port:proto_utils", + "//ortools/util:saturated_arithmetic", + "//ortools/util:sorted_interval_list", + "//ortools/util:strong_integers", + "@com_google_absl//absl/container:btree", + "@com_google_absl//absl/log:check", + "@com_google_absl//absl/random", + "@com_google_absl//absl/random:distributions", + "@com_google_absl//absl/strings", + "@com_google_absl//absl/types:span", + ], +) + cc_library( name = "linear_propagation", srcs = ["linear_propagation.cc"], @@ -1886,6 +2058,27 @@ cc_library( ], ) +cc_test( + name = "linear_relaxation_test", + srcs = ["linear_relaxation_test.cc"], + deps = [ + ":cp_model_cc_proto", + ":cp_model_loader", + ":cp_model_mapping", + ":integer", + ":intervals", + ":linear_constraint", + ":linear_relaxation", + ":model", + ":sat_base", + "//ortools/base:gmock_main", + "//ortools/base:parse_test_proto", + "//ortools/util:sorted_interval_list", + "@com_google_absl//absl/base:core_headers", + "@com_google_absl//absl/types:span", + ], +) + cc_library( name = "linear_constraint", srcs = ["linear_constraint.cc"], @@ -2437,6 +2630,28 @@ cc_library( ], ) +cc_test( + name = "table_test", + srcs = ["table_test.cc"], + deps = [ + ":cp_model", + ":cp_model_cc_proto", + ":cp_model_solver", + ":integer", + ":model", + ":sat_base", + ":sat_parameters_cc_proto", + ":sat_solver", + ":table", + "//ortools/base", + "//ortools/base:container_logging", + "//ortools/base:gmock_main", + "//ortools/base:parse_test_proto", + "@com_google_absl//absl/container:btree", + "@com_google_absl//absl/types:span", + ], +) + cc_library( name = "cp_constraints", srcs = ["cp_constraints.cc"], @@ -2527,6 +2742,25 @@ cc_library( ], ) +cc_test( + name = "2d_packing_brute_force_test", + srcs = ["2d_packing_brute_force_test.cc"], + deps = [ + ":2d_packing_brute_force", + ":cp_model", + ":cp_model_solver", + ":diffn_util", + ":integer", + "//ortools/base:gmock_main", + "@com_google_absl//absl/log:check", + "@com_google_absl//absl/random", + "@com_google_absl//absl/random:bit_gen_ref", + "@com_google_absl//absl/random:distributions", + "@com_google_absl//absl/types:span", + "@com_google_benchmark//:benchmark", + ], +) + cc_library( name = "2d_rectangle_presolve", srcs = ["2d_rectangle_presolve.cc"], @@ -2772,6 +3006,7 @@ cc_library( ":cp_model_presolve", ":cp_model_solver_helpers", ":cp_model_utils", + ":diffn_util", ":integer", ":linear_constraint_manager", ":linear_programming_constraint", @@ -2857,6 +3092,23 @@ cc_library( ], ) +cc_test( + name = "rins_test", + size = "small", + srcs = ["rins_test.cc"], + deps = [ + ":cp_model_cc_proto", + ":cp_model_loader", + ":model", + ":rins", + ":synchronization", + "//ortools/base:gmock_main", + "//ortools/base:parse_test_proto", + "//ortools/util:random_engine", + "@com_google_absl//absl/types:span", + ], +) + cc_library( name = "subsolver", srcs = ["subsolver.cc"], @@ -3164,3 +3416,16 @@ cc_test( "@com_google_absl//absl/types:span", ], ) + +cc_test( + name = "flaky_models_test", + size = "small", + srcs = ["flaky_models_test.cc"], + deps = [ + ":cp_model_cc_proto", + ":cp_model_solver", + ":sat_parameters_cc_proto", + "//ortools/base:gmock_main", + "//ortools/base:parse_test_proto", + ], +) diff --git a/ortools/sat/cp_constraints.h b/ortools/sat/cp_constraints.h index 67a3a5fad6..3090489d1c 100644 --- a/ortools/sat/cp_constraints.h +++ b/ortools/sat/cp_constraints.h @@ -112,7 +112,7 @@ class GreaterThanAtLeastOneOfPropagator : public PropagatorInterface, // ============================================================================ inline std::vector ToIntegerValueVector( - const std::vector& input) { + absl::Span input) { std::vector result(input.size()); for (int i = 0; i < input.size(); ++i) { result[i] = IntegerValue(input[i]); diff --git a/ortools/sat/cp_model_checker_test.cc b/ortools/sat/cp_model_checker_test.cc new file mode 100644 index 0000000000..ccc1f3f887 --- /dev/null +++ b/ortools/sat/cp_model_checker_test.cc @@ -0,0 +1,666 @@ +// Copyright 2010-2024 Google LLC +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "ortools/sat/cp_model_checker.h" + +#include +#include +#include + +#include "absl/log/check.h" +#include "absl/types/span.h" +#include "gtest/gtest.h" +#include "ortools/base/gmock.h" +#include "ortools/base/parse_test_proto.h" +#include "ortools/sat/cp_model.pb.h" + +namespace operations_research { +namespace sat { +namespace { + +using ::google::protobuf::contrib::parse_proto::ParseTestProto; +using ::testing::HasSubstr; + +// This just checks that the code is at least properly executed. +TEST(SolutionIsFeasibleTest, BasicExample) { + const CpModelProto model = ParseTestProto(R"pb( + variables { name: 'x' domain: 0 domain: 10 } + variables { name: 'y' domain: 0 domain: 10 } + constraints { + linear { vars: 0 coeffs: 1 vars: 1 coeffs: 1 domain: 0 domain: 10 } + } + )pb"); + EXPECT_FALSE(SolutionIsFeasible(model, {8, 8})); + EXPECT_FALSE(SolutionIsFeasible(model, {11, -1})); + EXPECT_TRUE(SolutionIsFeasible(model, {5, 5})); +} + +TEST(SolutionIsFeasibleTest, LinMax) { + const CpModelProto model = ParseTestProto(R"pb( + variables { name: 'x' domain: 0 domain: 10 } + variables { name: 'y' domain: 0 domain: 30 } + constraints { + lin_max { + target { vars: 1 coeffs: 1 offset: 0 } + exprs { vars: 0 coeffs: 2 offset: 1 } + exprs { vars: 0 coeffs: 3 offset: -5 } + } + } + )pb"); + EXPECT_FALSE(SolutionIsFeasible(model, {2, 4})); + EXPECT_FALSE(SolutionIsFeasible(model, {11, -1})); + EXPECT_TRUE(SolutionIsFeasible(model, {2, 5})); + EXPECT_TRUE(SolutionIsFeasible(model, {8, 19})); +} + +TEST(SolutionIsFeasibleTest, OrToolsIssue3769) { + const CpModelProto model = ParseTestProto(R"pb( + variables { domain: [ 1, 2 ] } + variables { domain: [ 0, 1 ] } + constraints { + no_overlap_2d { + x_intervals: [ 1, 2 ] + y_intervals: [ 3, 4 ] + } + } + constraints { + interval { + start { offset: 2 } + end { + vars: [ 1 ] + coeffs: [ 1 ] + offset: 2 + } + size { + vars: [ 1 ] + coeffs: [ 1 ] + } + } + } + constraints { + interval { + start { offset: 1 } + end { offset: 3 } + size { offset: 2 } + } + } + constraints { + interval { + start { offset: 1 } + end { + vars: [ 0 ] + coeffs: [ 1 ] + offset: 1 + } + size { + vars: [ 0 ] + coeffs: [ 1 ] + } + } + } + constraints { + interval { + start { offset: 2 } + end { offset: 2 } + size {} + } + } + )pb"); + EXPECT_TRUE(SolutionIsFeasible(model, {1, 0})); + EXPECT_TRUE(SolutionIsFeasible(model, {1, 1})); + EXPECT_FALSE(SolutionIsFeasible(model, {2, 0})); +} + +TEST(SolutionIsFeasibleTest, Reservoir) { + const CpModelProto model = ParseTestProto(R"pb( + variables { domain: [ 0, 2 ] } + variables { domain: [ 0, 2 ] } + variables { domain: [ 1, 1 ] } + variables { domain: [ 0, 1 ] } + constraints { + reservoir { + time_exprs: { vars: 0 coeffs: 1 } + time_exprs: { vars: 1 coeffs: 1 } + level_changes: { offset: -1 } + level_changes: { offset: 1 } + active_literals: [ 2, 3 ] + min_level: 0 + max_level: 2 + } + } + )pb"); + EXPECT_FALSE(SolutionIsFeasible(model, {0, 0, 1, 0})); + EXPECT_TRUE(SolutionIsFeasible(model, {0, 0, 1, 1})); + EXPECT_TRUE(SolutionIsFeasible(model, {1, 0, 1, 1})); + EXPECT_FALSE(SolutionIsFeasible(model, {0, 1, 1, 1})); + EXPECT_FALSE(SolutionIsFeasible(model, {0, 0, 1, 0})); +} + +TEST(SolutionIsFeasibleTest, ReservoirWithNegativeTime) { + const CpModelProto model = ParseTestProto(R"pb( + variables { domain: [ -2, 2 ] } + variables { domain: [ -2, 2 ] } + constraints { + reservoir { + time_exprs: { vars: 0 coeffs: 1 } + time_exprs: { vars: 1 coeffs: 1 } + level_changes: { offset: 2 } + level_changes: { offset: -2 } + min_level: 0 + max_level: 2 + } + } + )pb"); + EXPECT_TRUE(SolutionIsFeasible(model, {1, 1})); + EXPECT_TRUE(SolutionIsFeasible(model, {0, 0})); + EXPECT_FALSE(SolutionIsFeasible(model, {1, 0})); + EXPECT_TRUE(SolutionIsFeasible(model, {0, 1})); + EXPECT_TRUE(SolutionIsFeasible(model, {-2, 2})); +} + +TEST(SolutionIsFeasibleTest, SelfArcAreOk) { + // The literal -1 is the negation of the first variable. + const CpModelProto model = ParseTestProto(R"pb( + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 1 ] } + constraints { + circuit { + literals: [ -1, 1, 2, 3, 0 ] + tails: [ 0, 1, 2, 3, 0 ] + heads: [ 0, 2, 3, 1, 2 ] + } + } + )pb"); + EXPECT_TRUE(SolutionIsFeasible(model, {0, 1, 1, 1})); + EXPECT_FALSE(SolutionIsFeasible(model, {1, 1, 1, 1})); +} + +TEST(SolutionIsFeasibleTest, SparseCircuit) { + const CpModelProto model = ParseTestProto(R"pb( + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 1 ] } + constraints { + circuit { + literals: [ 0, 1, 2, 3 ] + tails: [ -10, 10, 9, 1000 ] + heads: [ 10, 9, 1000, -10 ] + } + } + )pb"); + EXPECT_TRUE(SolutionIsFeasible(model, {1, 1, 1, 1})); + EXPECT_FALSE(SolutionIsFeasible(model, {1, 0, 1, 1})); +} + +TEST(SolutionIsFeasibleTest, BoolXor) { + const CpModelProto model = ParseTestProto(R"pb( + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 1 ] } + constraints { bool_xor { literals: [ 0, 1, 2, 3 ] } } + )pb"); + EXPECT_TRUE(SolutionIsFeasible(model, {1, 0, 0, 0})); + EXPECT_TRUE(SolutionIsFeasible(model, {1, 1, 1, 0})); + EXPECT_FALSE(SolutionIsFeasible(model, {1, 1, 1, 1})); + EXPECT_FALSE(SolutionIsFeasible(model, {1, 0, 1, 0})); +} + +TEST(SolutionIsFeasibleTest, WithEnforcement) { + const CpModelProto model = ParseTestProto(R"pb( + variables { name: 'a' domain: 0 domain: 1 } + variables { name: 'b' domain: 0 domain: 1 } + variables { name: 'y' domain: 0 domain: 10 } + constraints { + enforcement_literal: [ 0, 1 ] + linear { vars: 2 coeffs: 1 domain: 7 domain: 7 } + } + )pb"); + EXPECT_TRUE(SolutionIsFeasible(model, {0, 0, 5})); + EXPECT_TRUE(SolutionIsFeasible(model, {0, 1, 5})); + EXPECT_TRUE(SolutionIsFeasible(model, {1, 0, 5})); + EXPECT_FALSE(SolutionIsFeasible(model, {1, 1, 5})); + EXPECT_TRUE(SolutionIsFeasible(model, {1, 1, 7})); +} + +TEST(SolutionIsFeasibleTest, ObjectiveDomain) { + const CpModelProto model = ParseTestProto(R"pb( + variables { name: 'x' domain: 0 domain: 10 } + variables { name: 'y' domain: 0 domain: 10 } + objective { + vars: [ 0, 1 ] + coeffs: [ 1, 1 ] + domain: [ 5, 15 ] + } + )pb"); + EXPECT_FALSE(SolutionIsFeasible(model, {8, 8})); + EXPECT_TRUE(SolutionIsFeasible(model, {5, 5})); + EXPECT_FALSE(SolutionIsFeasible(model, {0, 0})); +} + +TEST(ValidateCpModelTest, BadVariableDomain1) { + const CpModelProto model = ParseTestProto(R"pb( + variables { name: 'a' domain: 0 domain: 1 domain: 3 } + )pb"); + EXPECT_THAT(ValidateCpModel(model), HasSubstr("odd domain")); +} + +TEST(ValidateCpModelTest, VariableUpperBoundTooLarge) { + const CpModelProto model = ParseTestProto(R"pb( + variables { + name: 'a' + domain: [ 0, 9223372036854775807 ] + } + )pb"); + EXPECT_THAT(ValidateCpModel(model), HasSubstr("do not fall in")); +} + +TEST(ValidateCpModelTest, VariableLowerBoundTooLarge1) { + const CpModelProto model = ParseTestProto(R"pb( + variables { + name: 'a' + domain: [ -9223372036854775807, 0 ] + } + )pb"); + EXPECT_THAT(ValidateCpModel(model), HasSubstr("do not fall in")); +} + +TEST(ValidateCpModelTest, VariableLowerBoundTooLarge2) { + const CpModelProto model = ParseTestProto(R"pb( + variables { + name: 'a' + domain: [ -9223372036854775808, 0 ] + } + )pb"); + EXPECT_THAT(ValidateCpModel(model), HasSubstr("do not fall in")); +} + +TEST(ValidateCpModelTest, VariableDomainOverflow) { + CHECK_EQ(std::numeric_limits::max() - 1, + int64_t{9223372036854775806}); + + const CpModelProto model_ok = ParseTestProto(R"pb( + variables { name: 'a' domain: 0 domain: 9223372036854775806 } + )pb"); + EXPECT_TRUE(ValidateCpModel(model_ok).empty()); + + const CpModelProto model_bad0 = ParseTestProto(R"pb( + variables { name: 'a' domain: -1 domain: 9223372036854775806 } + )pb"); + EXPECT_THAT(ValidateCpModel(model_bad0), HasSubstr("overflow")); + + const CpModelProto model_bad1 = ParseTestProto(R"pb( + variables { name: 'a' domain: -2 domain: 9223372036854775806 } + )pb"); + EXPECT_THAT(ValidateCpModel(model_bad1), HasSubstr("overflow")); + + CHECK_EQ(std::numeric_limits::min() + 2, + int64_t{-9223372036854775806}); + const CpModelProto model_bad2 = ParseTestProto(R"pb( + variables { name: 'a' domain: -9223372036854775806 domain: 2 } + )pb"); + EXPECT_THAT(ValidateCpModel(model_bad2), HasSubstr("overflow")); +} + +TEST(ValidateCpModelTest, ObjectiveOverflow) { + CHECK_EQ(std::numeric_limits::max() / 4, + int64_t{2305843009213693951}); + const CpModelProto model = ParseTestProto(R"pb( + variables { domain: [ -2305843009213693951, 2305843009213693951 ] } + variables { domain: [ -2305843009213693951, 2305843009213693951 ] } + variables { domain: [ -2305843009213693951, 2305843009213693951 ] } + objective { + vars: [ 0, 1, 2 ] + coeffs: [ 1, 1, 1 ] + } + )pb"); + + // The min/max sum do not overflow, but their difference do. + EXPECT_THAT(ValidateCpModel(model), HasSubstr("overflow")); +} + +TEST(ValidateCpModelTest, ValidSolutionHint) { + const CpModelProto model = ParseTestProto(R"pb( + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 1 ] } + solution_hint { + vars: [ 0, 1 ] + values: [ 1, 2 ] + } + )pb"); + EXPECT_TRUE(ValidateCpModel(model).empty()); +} + +TEST(ValidateCpModelTest, SolutionHint1) { + const CpModelProto model = ParseTestProto(R"pb( + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 1 ] } + solution_hint { + vars: [ 0, 1, 2 ] + values: [ 1, 2, 3, 4 ] + } + )pb"); + EXPECT_THAT(ValidateCpModel(model), HasSubstr("same size")); +} + +TEST(ValidateCpModelTest, SolutionHint2) { + const CpModelProto model = ParseTestProto(R"pb( + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 1 ] } + solution_hint { + vars: [ 0, 10, 2 ] + values: [ 1, 2, 3 ] + } + )pb"); + EXPECT_THAT(ValidateCpModel(model), HasSubstr("Invalid variable")); +} + +TEST(ValidateCpModelTest, SolutionHint3) { + const CpModelProto model = ParseTestProto(R"pb( + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 1 ] } + solution_hint { + vars: [ 0, 2, 0 ] + values: [ 1, 2, 3 ] + } + )pb"); + EXPECT_THAT(ValidateCpModel(model), HasSubstr("duplicate")); +} + +TEST(ValidateCpModelTest, Assumptions) { + const CpModelProto model = ParseTestProto(R"pb( + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 1 ] } + assumptions: [ 0, 1, 4 ] + )pb"); + EXPECT_THAT(ValidateCpModel(model), + "Invalid literal reference 4 in the 'assumptions' field."); +} + +TEST(ValidateCpModelTest, NegativeValueInIntervalSizeDomain) { + const CpModelProto model = ParseTestProto(R"pb( + variables { domain: [ 0, 0 ] } + variables { domain: [ -7, -7, 0, 0 ] } + constraints { + interval { + start { vars: 0 coeffs: 1 } + end { vars: 1 coeffs: 1 } + size { vars: 1 coeffs: 1 } + } + } + )pb"); + EXPECT_THAT(ValidateCpModel(model), + HasSubstr("The size of a performed interval must be >= 0")); +} + +TEST(ValidateCpModelTest, ParallelVectorMustHaveTheSameSize) { + const CpModelProto model = ParseTestProto(R"pb( + variables { domain: 0 domain: 4503599627370529 } + constraints { + interval { + start { offset: 1 } + size { offset: 2 } + end { offset: 3 } + } + } + constraints { + no_overlap_2d { x_intervals: 0 y_intervals: 0 y_intervals: 0 } + } + )pb"); + EXPECT_THAT(ValidateCpModel(model), HasSubstr("must have the same size")); +} + +TEST(ValidateCpModelTest, InvalidDomainInLinear) { + const CpModelProto model = ParseTestProto(R"pb( + variables { domain: -288230376151711744 domain: 262144 } + variables { domain: 0 domain: 5 } + constraints { + linear { + vars: [ 1, 0 ] + coeffs: [ 1, 2 ] + domain: [ 1, 3, 5 ] + } + } + )pb"); + EXPECT_THAT(ValidateCpModel(model), HasSubstr("Invalid domain")); +} + +TEST(ValidateCpModelTest, InvalidDomainInLinear2) { + const CpModelProto model = ParseTestProto(R"pb( + variables { domain: -288230376151711744 domain: 262144 } + variables { domain: 0 domain: 5 } + constraints { + name: "T" + linear { + vars: [ 1, 0 ] + coeffs: [ 1, 2 ] + domain: [ 3, 0 ] + } + } + )pb"); + + EXPECT_THAT(ValidateCpModel(model), HasSubstr("Invalid domain")); +} + +TEST(ValidateCpModelTest, NegatedReferenceInLinear) { + const CpModelProto model = ParseTestProto(R"pb( + variables { name: "c" domain: 1 domain: 1 } + variables { domain: 0 domain: 1 } + constraints { + int_div { + target {} + exprs {} + exprs { vars: -2 coeffs: 792633495762501632 } + } + } + )pb"); + + EXPECT_THAT(ValidateCpModel(model), HasSubstr("Invalid negated variable")); +} + +TEST(ValidateCpModelTest, ArityOneInIntProd) { + const CpModelProto model = ParseTestProto(R"pb( + variables { domain: [ 0, 10 ] } + variables { domain: [ 0, 10 ] } + constraints { + int_prod { + target { vars: 0 coeffs: 1 } + exprs { vars: 1 coeffs: 1 } + } + } + )pb"); + EXPECT_TRUE(ValidateCpModel(model).empty()); +} + +TEST(ValidateCpModelTest, ArityThreeInIntProd) { + const CpModelProto model = ParseTestProto(R"pb( + variables { domain: [ 0, 10 ] } + variables { domain: [ 0, 10 ] } + constraints { + int_prod { + target { vars: 0 coeffs: 1 } + exprs { vars: 1 coeffs: 1 } + exprs { vars: 1 coeffs: 1 } + exprs { vars: 1 coeffs: 1 } + } + } + )pb"); + EXPECT_TRUE(ValidateCpModel(model).empty()); +} + +TEST(ValidateCpModelTest, WrongArityInIntDiv) { + const CpModelProto model = ParseTestProto(R"pb( + variables { domain: [ 0, 10 ] } + variables { domain: [ 0, 10 ] } + constraints { + int_div { + target { vars: 0 coeffs: 1 } + exprs { vars: 1 coeffs: 1 } + } + } + )pb"); + EXPECT_THAT(ValidateCpModel(model), HasSubstr("have exactly 2 terms")); +} + +TEST(ValidateCpModelTest, DivisorDomainContainsZero) { + const CpModelProto model = ParseTestProto(R"pb( + variables { domain: [ 0, 10 ] } + variables { domain: [ 0, 10 ] } + variables { domain: [ -3, 3 ] } + constraints { + int_div { + target { vars: 0 coeffs: 1 } + exprs { vars: 1 coeffs: 1 } + exprs { vars: 2 coeffs: 1 } + } + } + )pb"); + EXPECT_THAT(ValidateCpModel(model), + HasSubstr("The domain of the divisor cannot contain 0")); +} + +TEST(ValidateCpModelTest, DivisorSpanningAcrossZero) { + const CpModelProto model = ParseTestProto(R"pb( + variables { domain: [ 0, 10 ] } + variables { domain: [ 0, 10 ] } + variables { domain: [ -3, 3 ] } + constraints { + int_div { + target { vars: 0 coeffs: 1 } + exprs { vars: 1 coeffs: 1 } + exprs { vars: 2 coeffs: 2 offset: -3 } + } + } + )pb"); + EXPECT_TRUE(ValidateCpModel(model).empty()); +} + +TEST(ValidateCpModelTest, DivisorIsZero) { + const CpModelProto model = ParseTestProto(R"pb( + variables { domain: [ 0, 10 ] } + variables { domain: [ 0, 10 ] } + constraints { + int_div { + target { vars: 0 coeffs: 1 } + exprs { vars: 1 coeffs: 1 } + exprs {} + } + } + )pb"); + EXPECT_THAT(ValidateCpModel(model), HasSubstr("Division by 0")); +} + +TEST(ValidateCpModelTest, WrongArityInIntMod) { + const CpModelProto model = ParseTestProto(R"pb( + variables { domain: [ 0, 10 ] } + variables { domain: [ 0, 10 ] } + constraints { + int_mod { + target { vars: 0 coeffs: 1 } + exprs { vars: 1 coeffs: 1 } + } + } + )pb"); + EXPECT_THAT(ValidateCpModel(model), HasSubstr("have exactly 2 terms")); +} + +TEST(ValidateCpModelTest, NegativeModulo) { + const CpModelProto model = ParseTestProto(R"pb( + variables { domain: [ 0, 10 ] } + variables { domain: [ 0, 10 ] } + variables { domain: [ -3, 3 ] } + constraints { + int_mod { + target { vars: 0 coeffs: 1 } + exprs { vars: 1 coeffs: 1 } + exprs { vars: 2 coeffs: 1 } + } + } + )pb"); + EXPECT_THAT(ValidateCpModel(model), + HasSubstr("strictly positive modulo argument")); +} + +TEST(ValidateCpModelTest, IncompatibleAutomatonTransitions) { + const CpModelProto model = ParseTestProto(R"pb( + variables { domain: 0 domain: 1 } + constraints { + automaton { + final_states: 0 + transition_tail: 0 + transition_tail: 0 + transition_head: 0 + transition_head: 1 + transition_label: 0 + transition_label: 0 + vars: 0 + } + } + )pb"); + EXPECT_THAT(ValidateCpModel(model), + HasSubstr("automaton: incompatible transitions")); +} + +TEST(ValidateCpModelTest, DuplicateAutomatonTransitions) { + const CpModelProto model = ParseTestProto(R"pb( + variables { domain: 0 domain: 1 } + constraints { + automaton { + final_states: 0 + transition_tail: 0 + transition_tail: 0 + transition_head: 0 + transition_head: 0 + transition_label: 0 + transition_label: 0 + vars: 0 + } + } + )pb"); + EXPECT_THAT(ValidateCpModel(model), + HasSubstr("automaton: duplicate transition")); +} + +TEST(ValidateCpModelTest, IntervalMustAppearBeforeTheyAreUsed) { + const CpModelProto model = ParseTestProto(R"pb( + constraints { no_overlap { intervals: [ 1, 2 ] } } + constraints { + interval { + start { offset: 0 } + end { offset: 4 } + size { offset: 4 } + } + } + constraints { + interval { + start { offset: 4 } + end { offset: 5 } + size { offset: 1 } + } + } + )pb"); + EXPECT_THAT(ValidateCpModel(model, /*after_presolve=*/true), + HasSubstr("must appear before")); +} + +} // namespace +} // namespace sat +} // namespace operations_research diff --git a/ortools/sat/cp_model_expand_test.cc b/ortools/sat/cp_model_expand_test.cc new file mode 100644 index 0000000000..b3b12dfad9 --- /dev/null +++ b/ortools/sat/cp_model_expand_test.cc @@ -0,0 +1,1793 @@ +// Copyright 2010-2024 Google LLC +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "ortools/sat/cp_model_expand.h" + +#include +#include +#include + +#include "absl/container/btree_set.h" +#include "absl/strings/string_view.h" +#include "gtest/gtest.h" +#include "ortools/base/container_logging.h" +#include "ortools/base/gmock.h" +#include "ortools/base/logging.h" +#include "ortools/base/parse_test_proto.h" +#include "ortools/sat/cp_model.pb.h" +#include "ortools/sat/cp_model_checker.h" +#include "ortools/sat/cp_model_solver.h" +#include "ortools/sat/cp_model_utils.h" +#include "ortools/sat/model.h" +#include "ortools/sat/presolve_context.h" +#include "ortools/sat/sat_parameters.pb.h" +#include "ortools/util/sorted_interval_list.h" + +namespace operations_research { +namespace sat { +namespace { + +using ::google::protobuf::contrib::parse_proto::ParseTestProto; + +CpSolverResponse SolveAndCheck( + const CpModelProto& initial_model, absl::string_view extra_parameters = "", + absl::btree_set>* solutions = nullptr) { + SatParameters params; + params.set_enumerate_all_solutions(true); + if (!extra_parameters.empty()) { + params.MergeFromString(extra_parameters); + } + auto observer = [&](const CpSolverResponse& response) { + VLOG(1) << response; + EXPECT_TRUE(SolutionIsFeasible( + initial_model, std::vector(response.solution().begin(), + response.solution().end()))); + if (solutions != nullptr) { + std::vector solution; + for (int var = 0; var < initial_model.variables_size(); ++var) { + solution.push_back(response.solution(var)); + } + solutions->insert(solution); + } + }; + Model model; + model.Add(NewSatParameters(params)); + model.Add(NewFeasibleSolutionObserver(observer)); + return SolveCpModel(initial_model, &model); +} + +TEST(ReservoirExpandTest, NoOptionalAndInitiallyFeasible) { + const CpModelProto initial_model = ParseTestProto(R"pb( + variables { name: 'x' domain: 0 domain: 2 } + variables { name: 'y' domain: 0 domain: 2 } + variables { name: 'z' domain: 0 domain: 2 } + constraints { + reservoir { + time_exprs { vars: 0 coeffs: 1 } + time_exprs { vars: 1 coeffs: 1 } + time_exprs { vars: 2 coeffs: 1 } + level_changes: { offset: 1 } + level_changes: { offset: 1 } + level_changes: { offset: 2 } + min_level: 0 + max_level: 4 + } + } + )pb"); + absl::btree_set> solutions; + const CpSolverResponse response = + SolveAndCheck(initial_model, "", &solutions); + EXPECT_EQ(OPTIMAL, response.status()); + EXPECT_EQ(27, solutions.size()); +} + +TEST(ReservoirExpandTest, GizaReport) { + const CpModelProto initial_model = ParseTestProto(R"pb( + variables { domain: 0 domain: 10 } + variables { domain: 0 domain: 10 } + variables { domain: 0 domain: 10 } + constraints { + reservoir { + time_exprs { vars: 0 coeffs: 1 } + time_exprs { vars: 1 coeffs: 1 } + time_exprs { vars: 2 coeffs: 1 } + level_changes: { offset: 10 } + level_changes: { offset: 1 } + level_changes: { offset: -1 } + min_level: 0 + max_level: 10 + } + } + constraints { + linear { + vars: 0 + coeffs: 1 + domain: [ 0, 0 ] + } + } + constraints { + linear { + vars: 1 + coeffs: 1 + domain: [ 1, 1 ] + } + } + constraints { + linear { + vars: 2 + coeffs: 1 + domain: [ 1, 1 ] + } + } + )pb"); + SatParameters params; + params.set_cp_model_presolve(false); + const CpSolverResponse response = SolveWithParameters(initial_model, params); + EXPECT_EQ(OPTIMAL, response.status()); +} + +TEST(ReservoirExpandTest, GizaReportReverse) { + const CpModelProto initial_model = ParseTestProto(R"pb( + variables { domain: 0 domain: 10 } + variables { domain: 0 domain: 10 } + variables { domain: 0 domain: 10 } + constraints { + reservoir { + time_exprs { vars: 0 coeffs: 1 } + time_exprs { vars: 1 coeffs: 1 } + time_exprs { vars: 2 coeffs: 1 } + level_changes: { offset: 10 } + level_changes: { offset: 1 } + level_changes: { offset: -1 } + min_level: 0 + max_level: 10 + } + } + constraints { + linear { + vars: 0 + coeffs: 1 + domain: [ 0, 0 ] + } + } + constraints { + linear { + vars: 1 + coeffs: 1 + domain: [ 1, 1 ] + } + } + constraints { + linear { + vars: 2 + coeffs: 1 + domain: [ 1, 1 ] + } + } + )pb"); + SatParameters params; + params.set_cp_model_presolve(false); + const CpSolverResponse response = SolveWithParameters(initial_model, params); + EXPECT_EQ(OPTIMAL, response.status()); +} + +TEST(ReservoirExpandTest, RepeatedTimesWithDifferentActivationVariables) { + const CpModelProto initial_model = ParseTestProto(R"pb( + variables { domain: 0 domain: 2 } + variables { domain: 0 domain: 2 } + variables { domain: 1 domain: 1 } + variables { domain: 0 domain: 1 } + constraints { + reservoir { + time_exprs { vars: 0 coeffs: 1 } + time_exprs { vars: 1 coeffs: 1 } + time_exprs { vars: 1 coeffs: 1 } + level_changes: { offset: 1 } + level_changes: { offset: 1 } + level_changes: { offset: -10 } + active_literals: [ 2, 2, 3 ] + min_level: 0 + max_level: 2 + } + } + )pb"); + absl::btree_set> solutions; + const CpSolverResponse response = + SolveAndCheck(initial_model, "", &solutions); + EXPECT_EQ(OPTIMAL, response.status()); + // First two time variables should be unconstrained giving us 3x3 solutions. + EXPECT_EQ(9, solutions.size()); +} + +TEST(ReservoirExpandTest, NoOptionalAndInitiallyFeasibleWithConsumption) { + const CpModelProto initial_model = ParseTestProto(R"pb( + variables { name: 'x' domain: 0 domain: 2 } + variables { name: 'y' domain: 0 domain: 2 } + variables { name: 'z' domain: 0 domain: 2 } + constraints { + reservoir { + time_exprs { vars: 0 coeffs: 1 } + time_exprs { vars: 1 coeffs: 1 } + time_exprs { vars: 2 coeffs: 1 } + level_changes: { offset: -1 } + level_changes: { offset: -1 } + level_changes: { offset: 2 } + min_level: 0 + max_level: 2 + } + } + constraints { + all_diff { + exprs { vars: 0 coeffs: 1 } + exprs { vars: 1 coeffs: 1 } + exprs { vars: 2 coeffs: 1 } + } + } + )pb"); + absl::btree_set> solutions; + const CpSolverResponse response = + SolveAndCheck(initial_model, "", &solutions); + EXPECT_EQ(OPTIMAL, response.status()); + EXPECT_EQ(2, solutions.size()); +} + +TEST(ReservoirExpandTest, NoOptionalAndInitiallyFeasibleAndOverloaded) { + const CpModelProto initial_model = ParseTestProto(R"pb( + variables { name: 'x' domain: 0 domain: 2 } + variables { name: 'y' domain: 0 domain: 2 } + variables { name: 'z' domain: 0 domain: 2 } + constraints { + reservoir { + time_exprs { vars: 0 coeffs: 1 } + time_exprs { vars: 1 coeffs: 1 } + time_exprs { vars: 2 coeffs: 1 } + level_changes: { offset: 1 } + level_changes: { offset: 1 } + level_changes: { offset: 2 } + min_level: 0 + max_level: 2 + } + } + )pb"); + absl::btree_set> solutions; + const CpSolverResponse response = + SolveAndCheck(initial_model, "", &solutions); + EXPECT_EQ(INFEASIBLE, response.status()); + EXPECT_EQ(0, solutions.size()); +} + +TEST(ReservoirExpandTest, OneUnschedulableOptionalAndInitiallyFeasible) { + const CpModelProto initial_model = ParseTestProto(R"pb( + variables { name: 'true' domain: 1 domain: 1 } + variables { name: 'x' domain: 0 domain: 2 } + variables { name: 'presence_y' domain: 0 domain: 1 } + variables { name: 'y' domain: 0 domain: 2 } + constraints { + reservoir { + time_exprs { vars: 1 coeffs: 1 } + time_exprs { vars: 3 coeffs: 1 } + level_changes: { offset: 1 } + level_changes: { offset: 2 } + active_literals: 0 + active_literals: 2 + min_level: 0 + max_level: 2 + } + } + constraints { + enforcement_literal: -3 + linear { vars: 3 coeffs: 1 domain: 0 domain: 0 } + } + )pb"); + absl::btree_set> solutions; + const CpSolverResponse response = + SolveAndCheck(initial_model, "", &solutions); + EXPECT_EQ(OPTIMAL, response.status()); + EXPECT_EQ(3, solutions.size()); +} + +TEST(ReservoirExpandTest, OptionalWithConsumption) { + const CpModelProto initial_model = ParseTestProto(R"pb( + variables { name: 'presence_x' domain: 0 domain: 1 } + variables { name: 'x' domain: 0 domain: 1 } + variables { name: 'presence_y' domain: 0 domain: 1 } + variables { name: 'y' domain: 0 domain: 1 } + constraints { + reservoir { + time_exprs { vars: 1 coeffs: 1 } + time_exprs { vars: 3 coeffs: 1 } + level_changes: { offset: 1 } + level_changes: { offset: -1 } + active_literals: 0 + active_literals: 2 + min_level: 0 + max_level: 2 + } + } + constraints { + enforcement_literal: -1 + linear { vars: 1 coeffs: 1 domain: 0 domain: 0 } + } + constraints { + enforcement_literal: -3 + linear { vars: 3 coeffs: 1 domain: 0 domain: 0 } + } + )pb"); + absl::btree_set> solutions; + const CpSolverResponse response = + SolveAndCheck(initial_model, "", &solutions); + EXPECT_EQ(OPTIMAL, response.status()); + EXPECT_EQ(6, solutions.size()); +} + +TEST(ReservoirExpandTest, FalseActive) { + const CpModelProto initial_model = ParseTestProto(R"pb( + variables { name: "x12" domain: 0 domain: 1 } + variables { name: "start" domain: 0 domain: 0 } + variables { name: "fill_time_2" domain: 2 domain: 2 } + variables { name: "empty_time_8" domain: 12 domain: 12 } + variables { domain: 1 domain: 1 } + constraints { + reservoir { + max_level: 20 + time_exprs { vars: 1 coeffs: 1 } + time_exprs { vars: 2 coeffs: 1 } + time_exprs { vars: 3 coeffs: 1 } + level_changes: { offset: 10 } + level_changes: { offset: 5 } + level_changes: { offset: -3 } + active_literals: 4 + active_literals: 1 + active_literals: 0 + } + } + )pb"); + const CpSolverResponse response = Solve(initial_model); + EXPECT_EQ(OPTIMAL, response.status()); +} + +TEST(IntModExpandTest, FzTest) { + const CpModelProto initial_model = ParseTestProto(R"pb( + variables { name: 'x' domain: 50 domain: 60 } + variables { name: 'y' domain: 1 domain: 5 } + variables { name: 'mod' domain: 5 domain: 5 } + constraints { + int_mod { + target { vars: 1 coeffs: 1 } + exprs { vars: 0 coeffs: 1 } + exprs { vars: 2 coeffs: 1 } + } + } + )pb"); + absl::btree_set> solutions; + const CpSolverResponse response = + SolveAndCheck(initial_model, "", &solutions); + EXPECT_EQ(OPTIMAL, response.status()); + EXPECT_EQ(8, solutions.size()); +} + +TEST(IntModExpandTest, FzTestVariableMod) { + const CpModelProto initial_model = ParseTestProto(R"pb( + variables { name: 'x' domain: 50 domain: 60 } + variables { name: 'y' domain: 1 domain: 5 } + variables { name: 'mod' domain: 4 domain: 5 } + constraints { + int_mod { + target { vars: 1 coeffs: 1 } + exprs { vars: 0 coeffs: 1 } + exprs { vars: 2 coeffs: 1 } + } + } + )pb"); + absl::btree_set> found_solutions; + const CpSolverResponse response = + SolveAndCheck(initial_model, "", &found_solutions); + EXPECT_EQ(OPTIMAL, response.status()); + absl::btree_set> expected{ + {50, 2, 4}, {51, 1, 5}, {51, 3, 4}, {52, 2, 5}, {53, 1, 4}, {53, 3, 5}, + {54, 2, 4}, {54, 4, 5}, {55, 3, 4}, {56, 1, 5}, {57, 1, 4}, {57, 2, 5}, + {58, 2, 4}, {58, 3, 5}, {59, 3, 4}, {59, 4, 5}}; + EXPECT_EQ(found_solutions, expected); + EXPECT_EQ(16, found_solutions.size()); +} + +TEST(IntModExpandTest, Issue2420) { + CpModelProto initial_model = ParseTestProto(R"pb( + variables { name: "b" domain: 0 domain: 65535 } + variables { domain: 192 domain: 192 } + variables { name: "x" domain: 127 domain: 137 } + constraints { + int_mod { + target { vars: 0 coeffs: 1 } + exprs { vars: 1 coeffs: 1 } + exprs { vars: 2 coeffs: 1 } + } + } + )pb"); + absl::btree_set> found_solutions; + const CpSolverResponse response = + SolveAndCheck(initial_model, "", &found_solutions); + EXPECT_EQ(CpSolverStatus::OPTIMAL, response.status()); + absl::btree_set> expected{ + {55, 192, 137}, {56, 192, 136}, {57, 192, 135}, {58, 192, 134}, + {59, 192, 133}, {60, 192, 132}, {61, 192, 131}, {62, 192, 130}, + {63, 192, 129}, {64, 192, 128}, {65, 192, 127}}; + EXPECT_EQ(found_solutions, expected); + EXPECT_EQ(11, found_solutions.size()); +} + +TEST(IntModExpandTest, VariableMod) { + CpModelProto initial_model = ParseTestProto(R"pb( + variables { domain: 0 domain: 10 } + variables { domain: 3 domain: 10 } + variables { domain: 1 domain: 4 } + constraints { + int_mod { + target { vars: 0 coeffs: 1 } + exprs { vars: 1 coeffs: 1 } + exprs { vars: 2 coeffs: 1 } + } + } + )pb"); + absl::btree_set> found_solutions; + const CpSolverResponse response = + SolveAndCheck(initial_model, "", &found_solutions); + EXPECT_EQ(CpSolverStatus::OPTIMAL, response.status()); + absl::btree_set> expected{ + {0, 3, 1}, {0, 3, 3}, {0, 4, 1}, {0, 4, 2}, {0, 4, 4}, {0, 5, 1}, + {0, 6, 1}, {0, 6, 2}, {0, 6, 3}, {0, 7, 1}, {0, 8, 1}, {0, 8, 2}, + {0, 8, 4}, {0, 9, 1}, {0, 9, 3}, {0, 10, 1}, {0, 10, 2}, {1, 3, 2}, + {1, 4, 3}, {1, 5, 2}, {1, 5, 4}, {1, 7, 2}, {1, 7, 3}, {1, 9, 2}, + {1, 9, 4}, {1, 10, 3}, {2, 5, 3}, {2, 6, 4}, {2, 8, 3}, {2, 10, 4}, + {3, 3, 4}, {3, 7, 4}}; + EXPECT_EQ(found_solutions, expected); + EXPECT_EQ(32, found_solutions.size()); +} + +TEST(IntProdExpandTest, LeftCase) { + const CpModelProto initial_model = ParseTestProto(R"pb( + variables { name: 'x' domain: -50 domain: -40 domain: 10 domain: 20 } + variables { name: 'y' domain: 0 domain: 1 } + variables { name: 'p' domain: -100 domain: 100 } + constraints { + int_prod { + target { vars: 2 coeffs: 1 } + exprs { vars: 0 coeffs: 1 } + exprs { vars: 1 coeffs: 1 } + } + } + )pb"); + absl::btree_set> solutions; + const CpSolverResponse response = + SolveAndCheck(initial_model, "", &solutions); + EXPECT_EQ(OPTIMAL, response.status()); + EXPECT_EQ(44, solutions.size()); +} + +TEST(IntProdExpandTest, RightCase) { + const CpModelProto initial_model = ParseTestProto(R"pb( + variables { name: 'x' domain: -50 domain: -40 domain: 10 domain: 20 } + variables { name: 'y' domain: 0 domain: 1 } + variables { name: 'p' domain: -100 domain: 100 } + constraints { + int_prod { + target { vars: 2 coeffs: 1 } + exprs { vars: 0 coeffs: 1 } + exprs { vars: 1 coeffs: 1 } + } + } + )pb"); + absl::btree_set> solutions; + const CpSolverResponse response = + SolveAndCheck(initial_model, "", &solutions); + EXPECT_EQ(OPTIMAL, response.status()); + EXPECT_EQ(44, solutions.size()); +} + +TEST(IntProdExpandTest, LeftAcrossZero) { + const CpModelProto initial_model = ParseTestProto(R"pb( + variables { name: 'x' domain: -6 domain: 6 } + variables { name: 'y' domain: 2 domain: 4 } + variables { name: 'p' domain: -30 domain: 30 } + constraints { + int_prod { + target { vars: 2 coeffs: 1 } + exprs { vars: 0 coeffs: 1 } + exprs { vars: 1 coeffs: 1 } + } + } + )pb"); + absl::btree_set> solutions; + const CpSolverResponse response = + SolveAndCheck(initial_model, "", &solutions); + EXPECT_EQ(OPTIMAL, response.status()); + absl::btree_set> expected{ + {-6, 2, -12}, {-6, 3, -18}, {-6, 4, -24}, {-5, 2, -10}, {-5, 3, -15}, + {-5, 4, -20}, {-4, 2, -8}, {-4, 3, -12}, {-4, 4, -16}, {-3, 2, -6}, + {-3, 3, -9}, {-3, 4, -12}, {-2, 2, -4}, {-2, 3, -6}, {-2, 4, -8}, + {-1, 2, -2}, {-1, 3, -3}, {-1, 4, -4}, {0, 2, 0}, {0, 3, 0}, + {0, 4, 0}, {1, 2, 2}, {1, 3, 3}, {1, 4, 4}, {2, 2, 4}, + {2, 3, 6}, {2, 4, 8}, {3, 2, 6}, {3, 3, 9}, {3, 4, 12}, + {4, 2, 8}, {4, 3, 12}, {4, 4, 16}, {5, 2, 10}, {5, 3, 15}, + {5, 4, 20}, {6, 2, 12}, {6, 3, 18}, {6, 4, 24}, + }; + EXPECT_EQ(solutions.size(), 13 * 3); + EXPECT_EQ(solutions, expected); +} + +TEST(IntProdExpandTest, TestLargerArity) { + const CpModelProto initial_model = ParseTestProto(R"pb( + variables { name: 'x' domain: -6 domain: 6 } + variables { name: 'y' domain: 2 domain: 4 } + variables { name: 'z' domain: 1 domain: 2 } + variables { name: 'p' domain: -30 domain: 30 } + constraints { + int_prod { + target { vars: 3 coeffs: 1 } + exprs { vars: 0 coeffs: 1 } + exprs { vars: 1 coeffs: 1 } + exprs { vars: 2 coeffs: 1 } + } + } + )pb"); + absl::btree_set> solutions; + const CpSolverResponse response = + SolveAndCheck(initial_model, "", &solutions); + EXPECT_EQ(OPTIMAL, response.status()); + const Domain dx = ReadDomainFromProto(initial_model.variables(0)); + const Domain dy = ReadDomainFromProto(initial_model.variables(1)); + const Domain dz = ReadDomainFromProto(initial_model.variables(2)); + const Domain dp = ReadDomainFromProto(initial_model.variables(3)); + + absl::btree_set> expected; + for (const int vx : dx.Values()) { + for (const int vy : dy.Values()) { + for (const int vz : dz.Values()) { + if (dp.Contains(vx * vy * vz)) { + expected.insert(std::vector{vx, vy, vz, vx * vy * vz}); + } + } + } + } + + EXPECT_EQ(solutions, expected); +} + +TEST(IntProdExpandTest, TestLargerAffineProd) { + const CpModelProto initial_model = ParseTestProto(R"pb( + variables { name: 'x' domain: -6 domain: 6 } + variables { name: 'y' domain: 2 domain: 4 } + variables { name: 'z' domain: 1 domain: 2 } + variables { name: 'p' domain: -30 domain: 30 } + constraints { + int_prod { + target { vars: 3 coeffs: 1 } + exprs { vars: 0 coeffs: 1 offset: 2 } + exprs { vars: 0 coeffs: 3 offset: 1 } + exprs { vars: 1 coeffs: 1 } + exprs { vars: 1 coeffs: 2 offset: -1 } + exprs { vars: 1 coeffs: 3 } + exprs { vars: 2 coeffs: 1 offset: 1 } + } + } + )pb"); + absl::btree_set> solutions; + const CpSolverResponse response = + SolveAndCheck(initial_model, "", &solutions); + EXPECT_EQ(OPTIMAL, response.status()); + const Domain dx = ReadDomainFromProto(initial_model.variables(0)); + const Domain dy = ReadDomainFromProto(initial_model.variables(1)); + const Domain dz = ReadDomainFromProto(initial_model.variables(2)); + const Domain dp = ReadDomainFromProto(initial_model.variables(3)); + + absl::btree_set> expected; + for (const int vx : dx.Values()) { + for (const int vy : dy.Values()) { + for (const int vz : dz.Values()) { + const int p = (vx + 2) * (2 * vx + 1) * vy * (2 * vy - 1) * (3 * vy) * + (2 * vz + 1); + if (dp.Contains(p)) { + expected.insert(std::vector{vx, vy, vz, p}); + } + } + } + } + + EXPECT_EQ(solutions, expected); +} + +TEST(ElementExpandTest, ConstantArray) { + CpModelProto initial_model = ParseTestProto(R"pb( + variables { domain: [ -1, 5 ] } + variables { domain: [ 1, 1 ] } + variables { domain: [ 3, 3 ] } + variables { domain: [ 4, 4 ] } + variables { domain: [ 5, 5 ] } + variables { domain: [ 0, 7 ] } + constraints { + element { + index: 0, + vars: [ 1, 2, 3, 4, 1 ], + target: 5 + } + } + )pb"); + + absl::btree_set> found_solutions; + const CpSolverResponse response = + SolveAndCheck(initial_model, "", &found_solutions); + absl::btree_set> expected{ + {0, 1, 3, 4, 5, 1}, {1, 1, 3, 4, 5, 3}, {2, 1, 3, 4, 5, 4}, + {3, 1, 3, 4, 5, 5}, {4, 1, 3, 4, 5, 1}, + }; + EXPECT_EQ(found_solutions, expected); +} + +TEST(AutomatonExpandTest, NonogramRule) { + // Accept sequences with 3 '1', then 2 '1', then 1 '1', separated by at least + // one '0'. + CpModelProto initial_model = ParseTestProto(R"pb( + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 1 ] } + constraints { + automaton { + starting_state: 1, + final_states: [ 9 ], + transition_tail: [ 1, 1, 2, 3, 4, 5, 5, 6, 7, 8, 8, 9 ], + transition_head: [ 1, 2, 3, 4, 5, 5, 6, 7, 8, 8, 9, 9 ], + transition_label: [ 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0 ], + vars: [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 ], + } + } + )pb"); + absl::btree_set> found_solutions; + const CpSolverResponse response = + SolveAndCheck(initial_model, "", &found_solutions); + absl::btree_set> expected{ + {0, 0, 1, 1, 1, 0, 1, 1, 0, 1}, {0, 1, 1, 1, 0, 0, 1, 1, 0, 1}, + {0, 1, 1, 1, 0, 1, 1, 0, 0, 1}, {0, 1, 1, 1, 0, 1, 1, 0, 1, 0}, + {1, 1, 1, 0, 0, 0, 1, 1, 0, 1}, {1, 1, 1, 0, 0, 1, 1, 0, 0, 1}, + {1, 1, 1, 0, 0, 1, 1, 0, 1, 0}, {1, 1, 1, 0, 1, 1, 0, 0, 0, 1}, + {1, 1, 1, 0, 1, 1, 0, 0, 1, 0}, {1, 1, 1, 0, 1, 1, 0, 1, 0, 0}}; + EXPECT_EQ(found_solutions, expected); +} + +TEST(AutomatonExpandTest, Bug1753_1) { + CpModelProto initial_model = ParseTestProto(R"pb( + variables { name: "0" domain: 0 domain: 2 } + variables { name: "1" domain: 0 domain: 2 } + variables { name: "2" domain: 0 domain: 2 } + constraints { + automaton { + starting_state: 1 + final_states: 1 + final_states: 2 + transition_tail: 1 + transition_tail: 2 + transition_head: 2 + transition_head: 1 + transition_label: 1 + transition_label: 2 + vars: 0 + vars: 1 + vars: 2 + } + } + )pb"); + absl::btree_set> found_solutions; + const CpSolverResponse response = + SolveAndCheck(initial_model, "", &found_solutions); + absl::btree_set> expected{{1, 2, 1}}; + EXPECT_EQ(found_solutions, expected); +} + +TEST(AutomatonExpandTest, Bug1753_2) { + CpModelProto initial_model = ParseTestProto(R"pb( + variables { name: "0" domain: 0 domain: 2 } + variables { name: "1" domain: 0 domain: 2 } + variables { name: "2" domain: 0 domain: 2 } + constraints { linear { vars: 2 coeffs: 1 domain: 1 domain: 1 } } + constraints { + automaton { + starting_state: 1 + final_states: 1 + final_states: 2 + transition_tail: 1 + transition_tail: 0 + transition_tail: 1 + transition_tail: 2 + transition_tail: 0 + transition_tail: 2 + transition_head: 2 + transition_head: 2 + transition_head: 1 + transition_head: 1 + transition_head: 1 + transition_head: 2 + transition_label: 1 + transition_label: 1 + transition_label: 0 + transition_label: 2 + transition_label: 2 + transition_label: 0 + vars: 0 + vars: 1 + vars: 2 + } + } + )pb"); + absl::btree_set> found_solutions; + const CpSolverResponse response = + SolveAndCheck(initial_model, "", &found_solutions); + absl::btree_set> expected{{0, 0, 1}, {1, 2, 1}}; + EXPECT_EQ(found_solutions, expected); +} + +TEST(AutomatonExpandTest, EverythingZero) { + CpModelProto initial_model = ParseTestProto(R"pb( + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 1 ] } + constraints { + automaton { + starting_state: 1, + final_states: [ 1 ], + transition_tail: 1, + transition_head: 1, + transition_label: 0, + vars: [ 0, 1, 2, 3, 4, 5 ], + } + } + )pb"); + Model model; + PresolveContext context(&model, &initial_model, nullptr); + ExpandCpModel(&context); + + const CpModelProto expected_model = ParseTestProto(R"pb( + variables { domain: 0 domain: 0 } + variables { domain: 0 domain: 0 } + variables { domain: 0 domain: 0 } + variables { domain: 0 domain: 0 } + variables { domain: 0 domain: 0 } + variables { domain: 0 domain: 0 } + constraints {} + )pb"); + EXPECT_THAT(initial_model, testing::EqualsProto(expected_model)); +} + +TEST(AutomatonExpandTest, LoopingAutomatonMultipleFinalStates) { + // These tuples accept "0*(12)+0*". + CpModelProto initial_model = ParseTestProto(R"pb( + variables { domain: [ 0, 2 ] } + variables { domain: [ 0, 2 ] } + variables { domain: [ 0, 2 ] } + variables { domain: [ 0, 2 ] } + variables { domain: [ 0, 2 ] } + variables { domain: [ 0, 2 ] } + variables { domain: [ 0, 2 ] } + variables { domain: [ 0, 2 ] } + variables { domain: [ 0, 2 ] } + variables { domain: [ 0, 2 ] } + constraints { + automaton { + starting_state: 1, + final_states: [ 3, 4 ], + transition_tail: [ 1, 1, 2, 3, 3, 4 ], + transition_head: [ 1, 2, 3, 2, 4, 4 ], + transition_label: [ 0, 1, 2, 1, 0, 0 ], + vars: [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 ], + } + } + )pb"); + absl::btree_set> found_solutions; + const CpSolverResponse response = + SolveAndCheck(initial_model, "", &found_solutions); + EXPECT_EQ(CpSolverStatus::OPTIMAL, response.status()); + absl::btree_set> expected{ + {0, 0, 0, 0, 0, 0, 0, 0, 1, 2}, {0, 0, 0, 0, 0, 0, 0, 1, 2, 0}, + {0, 0, 0, 0, 0, 0, 1, 2, 0, 0}, {0, 0, 0, 0, 0, 0, 1, 2, 1, 2}, + {0, 0, 0, 0, 0, 1, 2, 0, 0, 0}, {0, 0, 0, 0, 0, 1, 2, 1, 2, 0}, + {0, 0, 0, 0, 1, 2, 0, 0, 0, 0}, {0, 0, 0, 0, 1, 2, 1, 2, 0, 0}, + {0, 0, 0, 0, 1, 2, 1, 2, 1, 2}, {0, 0, 0, 1, 2, 0, 0, 0, 0, 0}, + {0, 0, 0, 1, 2, 1, 2, 0, 0, 0}, {0, 0, 0, 1, 2, 1, 2, 1, 2, 0}, + {0, 0, 1, 2, 0, 0, 0, 0, 0, 0}, {0, 0, 1, 2, 1, 2, 0, 0, 0, 0}, + {0, 0, 1, 2, 1, 2, 1, 2, 0, 0}, {0, 0, 1, 2, 1, 2, 1, 2, 1, 2}, + {0, 1, 2, 0, 0, 0, 0, 0, 0, 0}, {0, 1, 2, 1, 2, 0, 0, 0, 0, 0}, + {0, 1, 2, 1, 2, 1, 2, 0, 0, 0}, {0, 1, 2, 1, 2, 1, 2, 1, 2, 0}, + {1, 2, 0, 0, 0, 0, 0, 0, 0, 0}, {1, 2, 1, 2, 0, 0, 0, 0, 0, 0}, + {1, 2, 1, 2, 1, 2, 0, 0, 0, 0}, {1, 2, 1, 2, 1, 2, 1, 2, 0, 0}, + {1, 2, 1, 2, 1, 2, 1, 2, 1, 2}}; + EXPECT_EQ(found_solutions, expected); + EXPECT_EQ(25, found_solutions.size()); +} + +TEST(AutomatonExpandTest, LoopingAutomatonMultipleFinalStatesNegatedVariables) { + // These automaton accept "0*(12)+0*". + CpModelProto initial_model = ParseTestProto(R"pb( + variables { domain: [ 0, 2 ] } + variables { domain: [ -2, 0 ] } + variables { domain: [ 0, 2 ] } + variables { domain: [ 0, 2 ] } + variables { domain: [ 0, 2 ] } + variables { domain: [ 0, 2 ] } + variables { domain: [ 0, 2 ] } + variables { domain: [ 0, 2 ] } + variables { domain: [ 0, 2 ] } + variables { domain: [ 0, 2 ] } + constraints { + automaton { + starting_state: 1, + final_states: [ 3, 4 ], + transition_tail: [ 1, 1, 2, 3, 3, 4 ], + transition_head: [ 1, 2, 3, 2, 4, 4 ], + transition_label: [ 0, 1, 2, 1, 0, 0 ], + vars: [ 0, -2, 2, 3, 4, 5, 6, 7, 8, 9 ], + } + } + )pb"); + absl::btree_set> found_solutions; + const CpSolverResponse response = + SolveAndCheck(initial_model, "", &found_solutions); + EXPECT_EQ(CpSolverStatus::OPTIMAL, response.status()); + + absl::btree_set> expected{ + {0, 0, 0, 0, 0, 0, 0, 0, 1, 2}, {0, 0, 0, 0, 0, 0, 0, 1, 2, 0}, + {0, 0, 0, 0, 0, 0, 1, 2, 0, 0}, {0, 0, 0, 0, 0, 0, 1, 2, 1, 2}, + {0, 0, 0, 0, 0, 1, 2, 0, 0, 0}, {0, 0, 0, 0, 0, 1, 2, 1, 2, 0}, + {0, 0, 0, 0, 1, 2, 0, 0, 0, 0}, {0, 0, 0, 0, 1, 2, 1, 2, 0, 0}, + {0, 0, 0, 0, 1, 2, 1, 2, 1, 2}, {0, 0, 0, 1, 2, 0, 0, 0, 0, 0}, + {0, 0, 0, 1, 2, 1, 2, 0, 0, 0}, {0, 0, 0, 1, 2, 1, 2, 1, 2, 0}, + {0, 0, 1, 2, 0, 0, 0, 0, 0, 0}, {0, 0, 1, 2, 1, 2, 0, 0, 0, 0}, + {0, 0, 1, 2, 1, 2, 1, 2, 0, 0}, {0, 0, 1, 2, 1, 2, 1, 2, 1, 2}, + {0, -1, 2, 0, 0, 0, 0, 0, 0, 0}, {0, -1, 2, 1, 2, 0, 0, 0, 0, 0}, + {0, -1, 2, 1, 2, 1, 2, 0, 0, 0}, {0, -1, 2, 1, 2, 1, 2, 1, 2, 0}, + {1, -2, 0, 0, 0, 0, 0, 0, 0, 0}, {1, -2, 1, 2, 0, 0, 0, 0, 0, 0}, + {1, -2, 1, 2, 1, 2, 0, 0, 0, 0}, {1, -2, 1, 2, 1, 2, 1, 2, 0, 0}, + {1, -2, 1, 2, 1, 2, 1, 2, 1, 2}}; + EXPECT_EQ(found_solutions, expected); +} + +TEST(AutomatonExpandTest, AnotherAutomaton) { + // This accept everything that does not contain 4 consecutives 1 or 4 + // consecutives 2. + CpModelProto initial_model = ParseTestProto(R"pb( + variables { domain: [ 0, 2 ] } + variables { domain: [ 0, 2 ] } + variables { domain: [ 0, 2 ] } + variables { domain: [ 0, 2 ] } + variables { domain: [ 0, 2 ] } + variables { domain: [ 0, 2 ] } + variables { domain: [ 0, 2 ] } + constraints { + automaton { + starting_state: 1, + final_states: [ 1, 2, 3, 4, 5, 6, 7 ], + transition_tail: [ 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7 ], + transition_head: [ 2, 5, 3, 5, 4, 5, 0, 5, 2, 6, 2, 7, 2, 0 ], + transition_label: [ 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2 ], + vars: [ 0, 1, 2, 3, 4, 5, 6 ], + } + } + )pb"); + absl::btree_set> found_solutions; + const CpSolverResponse response = + SolveAndCheck(initial_model, "", &found_solutions); + + EXPECT_EQ(CpSolverStatus::OPTIMAL, response.status()); + + // Out of the 2**7 tuples, the one that contains 4 consecutive 1 are: + // - 1111??? (8) + // - 21111?? (4) + // - ?21111? (4) + // - ??21111 (4) + EXPECT_EQ(128 - 2 * 20, found_solutions.size()); +} + +TEST(ExpandTableTest, EnumerationAndEncoding) { + const CpModelProto model_proto = ParseTestProto(R"pb( + variables { domain: [ 0, 4 ] } + variables { domain: [ 0, 4 ] } + variables { domain: [ 0, 4 ] } + variables { domain: [ 0, 4 ] } + constraints { table { vars: 0 vars: 2 values: 0 values: 1 } } + constraints { table { vars: 1 vars: 3 values: 4 values: 0 } } + constraints { table { vars: 2 vars: 1 values: 1 values: 4 } } + )pb"); + + Model model; + model.Add(NewSatParameters("enumerate_all_solutions:true")); + int count = 0; + model.Add( + NewFeasibleSolutionObserver([&count](const CpSolverResponse& response) { + LOG(INFO) << gtl::LogContainer(response.solution()); + ++count; + })); + const CpSolverResponse response = SolveCpModel(model_proto, &model); + EXPECT_EQ(response.status(), CpSolverStatus::OPTIMAL); + + // There should be just one solution [0, 4, 1, 0], but the solver used to + // report more because of extra "free" variable used in the encoding. + EXPECT_EQ(count, 1); +} + +TEST(ExpandTableTest, EnumerationAndEncodingTwoVars) { + const CpModelProto model_proto = ParseTestProto(R"pb( + variables { + name: "X1" + domain: [ 0, 4 ] + } + variables { + name: "X3" + domain: [ 0, 4 ] + } + constraints { + table { + vars: [ 0, 1 ] + values: [ 0, 0, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4 ] + } + } + )pb"); + + Model model; + model.Add(NewSatParameters("enumerate_all_solutions:true")); + int count = 0; + model.Add( + NewFeasibleSolutionObserver([&count](const CpSolverResponse& response) { + LOG(INFO) << gtl::LogContainer(response.solution()); + ++count; + })); + const CpSolverResponse response = SolveCpModel(model_proto, &model); + EXPECT_EQ(response.status(), CpSolverStatus::OPTIMAL); + EXPECT_EQ(count, 7); +} + +TEST(ExpandTableTest, EnumerationAndEncodingFullPrefix) { + const CpModelProto model_proto = ParseTestProto(R"pb( + variables { domain: [ 0, 2 ] } + variables { domain: [ 0, 2 ] } + variables { domain: [ 0, 2 ] } + constraints { + table { + vars: [ 0, 1, 2 ] + values: [ + 0, 0, 0, 0, 1, 1, 0, 2, 2, 1, 0, 1, 1, 1, + 2, 1, 2, 0, 2, 0, 2, 2, 1, 0, 2, 2, 1 + ] + } + } + )pb"); + + Model model; + model.Add(NewSatParameters("enumerate_all_solutions:true")); + int count = 0; + model.Add( + NewFeasibleSolutionObserver([&count](const CpSolverResponse& response) { + LOG(INFO) << gtl::LogContainer(response.solution()); + ++count; + })); + const CpSolverResponse response = SolveCpModel(model_proto, &model); + EXPECT_EQ(response.status(), CpSolverStatus::OPTIMAL); + + EXPECT_EQ(count, 9); +} + +TEST(ExpandTableTest, EnumerationAndEncodingPartialPrefix) { + const CpModelProto model_proto = ParseTestProto(R"pb( + variables { domain: [ 0, 2 ] } + variables { domain: [ 0, 2 ] } + variables { domain: [ 0, 2 ] } + constraints { + table { + vars: [ 0, 1, 2 ] + values: [ + 0, 0, 0, 0, 2, 2, 1, 0, 1, 1, 1, 2, 1, 2, 0, 2, 0, 2, 2, 1, 0 + ] + } + } + )pb"); + + Model model; + model.Add(NewSatParameters("enumerate_all_solutions:true")); + int count = 0; + model.Add( + NewFeasibleSolutionObserver([&count](const CpSolverResponse& response) { + LOG(INFO) << gtl::LogContainer(response.solution()); + ++count; + })); + const CpSolverResponse response = SolveCpModel(model_proto, &model); + EXPECT_EQ(response.status(), CpSolverStatus::OPTIMAL); + + EXPECT_EQ(count, 7); +} + +TEST(ExpandTableTest, EnumerationAndEncodingInvalidTuples) { + const CpModelProto model_proto = ParseTestProto(R"pb( + variables { domain: [ 0, 2 ] } + variables { domain: [ 0, 2 ] } + variables { domain: [ 0, 2 ] } + constraints { + table { + vars: [ 0, 1, 2 ] + values: [ + 0, 0, 4, 0, 2, 2, 1, 0, 1, 1, 1, 2, 1, 2, 0, 2, 0, 2, 2, 1, 4 + ] + } + } + )pb"); + + Model model; + model.Add( + NewSatParameters("enumerate_all_solutions:true,cp_model_presolve:false")); + int count = 0; + model.Add( + NewFeasibleSolutionObserver([&count](const CpSolverResponse& response) { + LOG(INFO) << gtl::LogContainer(response.solution()); + ++count; + })); + const CpSolverResponse response = SolveCpModel(model_proto, &model); + EXPECT_EQ(response.status(), CpSolverStatus::OPTIMAL); + + // There should be exactly one solution per valid tuple. + EXPECT_EQ(count, 5); +} + +TEST(ExpandTableTest, EnumerationAndEncodingOneTupleWithAny) { + const CpModelProto model_proto = ParseTestProto(R"pb( + variables { domain: [ 0, 3 ] } + variables { domain: [ 0, 3 ] } + variables { domain: [ 0, 3 ] } + constraints { + table { + vars: [ 0, 1, 2 ] + values: [ 1, 0, 2, 1, 1, 2, 1, 2, 2 ] + } + } + )pb"); + + Model model; + model.Add( + NewSatParameters("enumerate_all_solutions:true,cp_model_presolve:false")); + int count = 0; + model.Add( + NewFeasibleSolutionObserver([&count](const CpSolverResponse& response) { + LOG(INFO) << gtl::LogContainer(response.solution()); + ++count; + })); + const CpSolverResponse response = SolveCpModel(model_proto, &model); + EXPECT_EQ(response.status(), CpSolverStatus::OPTIMAL); + + EXPECT_EQ(count, 3); +} + +TEST(ExpandTableTest, EnumerationAndEncodingPrefixWithLargeNegatedPart) { + const CpModelProto model_proto = ParseTestProto(R"pb( + variables { domain: [ 0, 5 ] } + variables { domain: [ 0, 5 ] } + variables { domain: [ 0, 5 ] } + constraints { + table { + vars: [ 0, 1, 2 ] + values: [ 0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5 ] + } + } + )pb"); + + Model model; + model.Add( + NewSatParameters("enumerate_all_solutions:true,cp_model_presolve:false")); + int count = 0; + model.Add( + NewFeasibleSolutionObserver([&count](const CpSolverResponse& response) { + LOG(INFO) << gtl::LogContainer(response.solution()); + ++count; + })); + const CpSolverResponse response = SolveCpModel(model_proto, &model); + EXPECT_EQ(response.status(), CpSolverStatus::OPTIMAL); + + EXPECT_EQ(count, 6); +} + +TEST(ExpandTableTest, EnforcedPositiveTable) { + const CpModelProto model_proto = ParseTestProto(R"pb( + variables { domain: [ 1, 3 ] } + variables { domain: [ 1, 3 ] } + variables { domain: [ 1, 3 ] } + variables { domain: [ 0, 1 ] } + constraints { + enforcement_literal: [ 3 ] + table { + vars: [ 0, 1, 2 ] + values: [ 1, 2, 3, 2, 2, 2, 3, 2, 1 ] + } + } + )pb"); + + Model model; + model.Add( + NewSatParameters("enumerate_all_solutions:true,cp_model_presolve:false")); + int count = 0; + model.Add( + NewFeasibleSolutionObserver([&count](const CpSolverResponse& response) { + LOG(INFO) << gtl::LogContainer(response.solution()); + ++count; + })); + const CpSolverResponse response = SolveCpModel(model_proto, &model); + EXPECT_EQ(response.status(), CpSolverStatus::OPTIMAL); + + EXPECT_EQ(count, 30); +} + +TEST(ExpandTableTest, EnforcedPositiveEmptyTable) { + const CpModelProto model_proto = ParseTestProto(R"pb( + variables { domain: [ 1, 3 ] } + variables { domain: [ 1, 3 ] } + variables { domain: [ 1, 3 ] } + variables { domain: [ 0, 1 ] } + constraints { + enforcement_literal: [ 3 ] + table { + vars: [ 0, 1, 2 ] + values: [] + } + } + )pb"); + + Model model; + model.Add( + NewSatParameters("enumerate_all_solutions:true,cp_model_presolve:false")); + int count = 0; + model.Add( + NewFeasibleSolutionObserver([&count](const CpSolverResponse& response) { + LOG(INFO) << gtl::LogContainer(response.solution()); + ++count; + })); + const CpSolverResponse response = SolveCpModel(model_proto, &model); + EXPECT_EQ(response.status(), CpSolverStatus::OPTIMAL); + + EXPECT_EQ(count, 27); +} + +TEST(ExpandTableTest, DualEnforcedPositiveTable) { + const CpModelProto model_proto = ParseTestProto(R"pb( + variables { domain: [ 1, 3 ] } + variables { domain: [ 1, 3 ] } + variables { domain: [ 1, 3 ] } + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 1 ] } + constraints { + enforcement_literal: [ 3, 4 ] + table { + vars: [ 0, 1, 2 ] + values: [ 1, 2, 3, 2, 2, 2, 3, 2, 1 ] + } + } + )pb"); + + Model model; + model.Add(NewSatParameters("enumerate_all_solutions:true")); + int count = 0; + model.Add( + NewFeasibleSolutionObserver([&count](const CpSolverResponse& response) { + LOG(INFO) << gtl::LogContainer(response.solution()); + ++count; + })); + const CpSolverResponse response = SolveCpModel(model_proto, &model); + EXPECT_EQ(response.status(), CpSolverStatus::OPTIMAL); + + EXPECT_EQ(count, 84); +} + +TEST(ExpandTableTest, EnforcedNegativeTable) { + const CpModelProto model_proto = ParseTestProto(R"pb( + variables { domain: [ 1, 3 ] } + variables { domain: [ 1, 3 ] } + variables { domain: [ 1, 3 ] } + variables { domain: [ 0, 1 ] } + constraints { + enforcement_literal: [ 3 ] + table { + vars: [ 0, 1, 2 ] + values: [ 1, 2, 3, 2, 2, 2, 3, 2, 1 ] + negated: true + } + } + )pb"); + + Model model; + model.Add( + NewSatParameters("enumerate_all_solutions:true,cp_model_presolve:false")); + int count = 0; + model.Add( + NewFeasibleSolutionObserver([&count](const CpSolverResponse& response) { + LOG(INFO) << gtl::LogContainer(response.solution()); + ++count; + })); + const CpSolverResponse response = SolveCpModel(model_proto, &model); + EXPECT_EQ(response.status(), CpSolverStatus::OPTIMAL); + + EXPECT_EQ(count, 51); +} + +TEST(ExpandTableTest, UnsatTable) { + const CpModelProto model_proto = ParseTestProto(R"pb( + variables { domain: [ 0, 4 ] } + variables { domain: [ 5, 9 ] } + constraints { table { vars: 0 vars: 1 values: 3 values: 3 } } + )pb"); + + Model model; + model.Add(NewSatParameters("cp_model_presolve:false")); + const CpSolverResponse response = SolveCpModel(model_proto, &model); + EXPECT_EQ(response.status(), CpSolverStatus::INFEASIBLE); +} + +TEST(ExpandTableTest, UnsatNegatedTable) { + const CpModelProto model_proto = ParseTestProto(R"pb( + variables { domain: [ 0, 1 ] } + constraints { + table { + vars: 0 + values: [ 0, 1 ] + negated: true + } + } + )pb"); + + const CpSolverResponse response = Solve(model_proto); + EXPECT_EQ(response.status(), CpSolverStatus::INFEASIBLE); +} + +TEST(ExpandAllDiffTest, Permutation) { + const CpModelProto initial_model = ParseTestProto(R"pb( + variables { domain: [ 0, 2 ] } + variables { domain: [ 0, 2 ] } + variables { domain: [ 0, 1 ] } + constraints { + all_diff { + exprs { vars: 0 coeffs: 1 } + exprs { vars: 1 coeffs: 1 } + exprs { vars: 2 coeffs: 1 } + } + } + )pb"); + + absl::btree_set> found_solutions; + const CpSolverResponse response = + SolveAndCheck(initial_model, "presolve_cp_model:false", &found_solutions); + absl::btree_set> expected{ + {0, 2, 1}, {2, 0, 1}, {1, 2, 0}, {2, 1, 0}}; + EXPECT_EQ(found_solutions, expected); +} + +TEST(ExpandInverseTest, CountInvolution) { + const CpModelProto initial_model = ParseTestProto(R"pb( + variables { domain: [ 0, 10 ] } + variables { domain: [ 0, 10 ] } + variables { domain: [ 0, 10 ] } + constraints { + inverse { + f_direct: [ 0, 1, 2 ] + f_inverse: [ 0, 1, 2 ] + } + } + )pb"); + absl::btree_set> solutions; + const CpSolverResponse response = + SolveAndCheck(initial_model, "", &solutions); + EXPECT_EQ(OPTIMAL, response.status()); + + // On 3 elements, we either have the identity or one of the 3 two cycle. + EXPECT_EQ(4, solutions.size()); +} + +TEST(ExpandInverseTest, DuplicateAtDifferentPosition) { + const CpModelProto initial_model = ParseTestProto(R"pb( + variables { domain: [ 0, 10 ] } + variables { domain: [ 0, 10 ] } + variables { domain: [ 0, 10 ] } + variables { domain: [ 0, 10 ] } + variables { domain: [ 0, 10 ] } + variables { domain: [ 0, 10 ] } + variables { domain: [ 0, 10 ] } + constraints { + inverse { + f_direct: [ 0, 1, 2, 3 ] + f_inverse: [ 4, 5, 6, 0 ] + } + } + )pb"); + absl::btree_set> solutions; + const CpSolverResponse response = + SolveAndCheck(initial_model, "", &solutions); + EXPECT_EQ(OPTIMAL, response.status()); + + // f(0) = 1 has 2 solutions, same with f(0) = 2. + EXPECT_EQ(4, solutions.size()); +} + +TEST(ExpandSmallLinearTest, ReplaceNonEqual) { + CpModelProto initial_model = ParseTestProto(R"pb( + variables { domain: [ 0, 5 ] } + variables { domain: [ 0, 5 ] } + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 1 ] } + constraints { + linear { + vars: [ 0, 1 ] + coeffs: [ 1, 1 ] + domain: [ 0, 4, 6, 10 ] + } + } + )pb"); + Model model; + PresolveContext context(&model, &initial_model, nullptr); + context.InitializeNewDomains(); + context.InsertVarValueEncoding(2, 0, 0); + context.InsertVarValueEncoding(3, 0, 1); + context.InsertVarValueEncoding(4, 0, 2); + context.InsertVarValueEncoding(5, 0, 3); + context.InsertVarValueEncoding(6, 0, 4); + context.InsertVarValueEncoding(7, 0, 5); + context.InsertVarValueEncoding(8, 1, 0); + context.InsertVarValueEncoding(9, 1, 1); + context.InsertVarValueEncoding(10, 1, 2); + context.InsertVarValueEncoding(11, 1, 3); + context.InsertVarValueEncoding(12, 1, 4); + context.InsertVarValueEncoding(13, 1, 5); + ExpandCpModel(&context); + + const CpModelProto expected_model = ParseTestProto(R"pb( + variables { domain: 0 domain: 5 } + variables { domain: 0 domain: 5 } + variables { domain: 0 domain: 1 } + variables { domain: 0 domain: 1 } + variables { domain: 0 domain: 1 } + variables { domain: 0 domain: 1 } + variables { domain: 0 domain: 1 } + variables { domain: 0 domain: 1 } + variables { domain: 0 domain: 1 } + variables { domain: 0 domain: 1 } + variables { domain: 0 domain: 1 } + variables { domain: 0 domain: 1 } + variables { domain: 0 domain: 1 } + variables { domain: 0 domain: 1 } + constraints {} + constraints { + enforcement_literal: 2 + linear { vars: 0 coeffs: 1 domain: 0 domain: 0 } + } + constraints { + enforcement_literal: -3 + linear { + vars: 0 + coeffs: 1 + domain: -9223372036854775808 + domain: -1 + domain: 1 + domain: 9223372036854775807 + } + } + constraints { + enforcement_literal: 3 + linear { vars: 0 coeffs: 1 domain: 1 domain: 1 } + } + constraints { + enforcement_literal: -4 + linear { + vars: 0 + coeffs: 1 + domain: -9223372036854775808 + domain: 0 + domain: 2 + domain: 9223372036854775807 + } + } + constraints { + enforcement_literal: 4 + linear { vars: 0 coeffs: 1 domain: 2 domain: 2 } + } + constraints { + enforcement_literal: -5 + linear { + vars: 0 + coeffs: 1 + domain: -9223372036854775808 + domain: 1 + domain: 3 + domain: 9223372036854775807 + } + } + constraints { + enforcement_literal: 5 + linear { vars: 0 coeffs: 1 domain: 3 domain: 3 } + } + constraints { + enforcement_literal: -6 + linear { + vars: 0 + coeffs: 1 + domain: -9223372036854775808 + domain: 2 + domain: 4 + domain: 9223372036854775807 + } + } + constraints { + enforcement_literal: 6 + linear { vars: 0 coeffs: 1 domain: 4 domain: 4 } + } + constraints { + enforcement_literal: -7 + linear { + vars: 0 + coeffs: 1 + domain: -9223372036854775808 + domain: 3 + domain: 5 + domain: 9223372036854775807 + } + } + constraints { + enforcement_literal: 7 + linear { vars: 0 coeffs: 1 domain: 5 domain: 5 } + } + constraints { + enforcement_literal: -8 + linear { + vars: 0 + coeffs: 1 + domain: -9223372036854775808 + domain: 4 + domain: 6 + domain: 9223372036854775807 + } + } + constraints { + enforcement_literal: 8 + linear { vars: 1 coeffs: 1 domain: 0 domain: 0 } + } + constraints { + enforcement_literal: -9 + linear { + vars: 1 + coeffs: 1 + domain: -9223372036854775808 + domain: -1 + domain: 1 + domain: 9223372036854775807 + } + } + constraints { + enforcement_literal: 9 + linear { vars: 1 coeffs: 1 domain: 1 domain: 1 } + } + constraints { + enforcement_literal: -10 + linear { + vars: 1 + coeffs: 1 + domain: -9223372036854775808 + domain: 0 + domain: 2 + domain: 9223372036854775807 + } + } + constraints { + enforcement_literal: 10 + linear { vars: 1 coeffs: 1 domain: 2 domain: 2 } + } + constraints { + enforcement_literal: -11 + linear { + vars: 1 + coeffs: 1 + domain: -9223372036854775808 + domain: 1 + domain: 3 + domain: 9223372036854775807 + } + } + constraints { + enforcement_literal: 11 + linear { vars: 1 coeffs: 1 domain: 3 domain: 3 } + } + constraints { + enforcement_literal: -12 + linear { + vars: 1 + coeffs: 1 + domain: -9223372036854775808 + domain: 2 + domain: 4 + domain: 9223372036854775807 + } + } + constraints { + enforcement_literal: 12 + linear { vars: 1 coeffs: 1 domain: 4 domain: 4 } + } + constraints { + enforcement_literal: -13 + linear { + vars: 1 + coeffs: 1 + domain: -9223372036854775808 + domain: 3 + domain: 5 + domain: 9223372036854775807 + } + } + constraints { + enforcement_literal: 13 + linear { vars: 1 coeffs: 1 domain: 5 domain: 5 } + } + constraints { + enforcement_literal: -14 + linear { + vars: 1 + coeffs: 1 + domain: -9223372036854775808 + domain: 4 + domain: 6 + domain: 9223372036854775807 + } + } + constraints { bool_or { literals: -3 literals: -14 } } + constraints { bool_or { literals: -4 literals: -13 } } + constraints { bool_or { literals: -5 literals: -12 } } + constraints { bool_or { literals: -6 literals: -11 } } + constraints { bool_or { literals: -7 literals: -10 } } + constraints { bool_or { literals: -8 literals: -9 } } + )pb"); + EXPECT_THAT(initial_model, testing::EqualsProto(expected_model)); +} + +TEST(TableExpandTest, UsedToFail) { + const CpModelProto initial_model = ParseTestProto(R"pb( + variables { domain: [ 0, 3 ] } + variables { domain: [ 0, 3 ] } + variables { domain: [ 0, 3 ] } + variables { domain: [ 0, 3 ] } + variables { domain: [ 0, 3 ] } + variables { domain: [ 0, 3 ] } + variables { domain: [ 0, 3 ] } + variables { domain: [ 0, 3 ] } + constraints { + table { + vars: [ 0, 4, 1, 5 ] + values: [ 0, 0, 2, 2 ] + values: [ 1, 0, 3, 0 ] + values: [ 2, 2, 0, 0 ] + values: [ 3, 0, 1, 0 ] + } + } + constraints { + table { + vars: [ 1, 5, 3, 6 ] + values: [ 0, 0, 2, 2 ] + values: [ 1, 0, 3, 0 ] + values: [ 2, 2, 0, 0 ] + values: [ 3, 0, 1, 0 ] + } + } + constraints { + table { + vars: [ 2, 6, 3, 7 ] + values: [ 0, 0, 2, 2 ] + values: [ 1, 0, 3, 0 ] + values: [ 2, 2, 0, 0 ] + values: [ 3, 0, 1, 0 ] + } + } + constraints { + table { + vars: [ 3, 7, 0, 4 ] + values: [ 0, 0, 2, 2 ] + values: [ 1, 0, 3, 0 ] + values: [ 2, 2, 0, 0 ] + values: [ 3, 0, 1, 0 ] + } + } + )pb"); + + SatParameters params; + params.set_cp_model_presolve(false); + const CpSolverResponse response = SolveWithParameters(initial_model, params); + EXPECT_EQ(INFEASIBLE, response.status()); +} + +TEST(LinMaxExpansionTest, SimpleEnumeration) { + CpModelProto initial_model = ParseTestProto(R"pb( + variables { domain: [ 0, 5 ] } + variables { domain: [ 0, 5 ] } + variables { domain: [ 0, 6 ] } + constraints { + lin_max { + target { vars: 0 coeffs: 1 offset: 1 } + exprs { vars: 1 coeffs: 2 } + exprs: { vars: 2 coeffs: 1 offset: -3 } + } + } + )pb"); + absl::btree_set> found_solutions; + const CpSolverResponse response = SolveAndCheck( + initial_model, "max_lin_max_size_for_expansion:4", &found_solutions); + absl::btree_set> expected{ + {0, 0, 4}, {1, 0, 5}, {1, 1, 0}, {1, 1, 1}, {1, 1, 2}, {1, 1, 3}, + {1, 1, 4}, {1, 1, 5}, {2, 0, 6}, {2, 1, 6}, {3, 2, 0}, {3, 2, 1}, + {3, 2, 2}, {3, 2, 3}, {3, 2, 4}, {3, 2, 5}, {3, 2, 6}, {5, 3, 0}, + {5, 3, 1}, {5, 3, 2}, {5, 3, 3}, {5, 3, 4}, {5, 3, 5}, {5, 3, 6}}; + EXPECT_EQ(found_solutions, expected); +} + +TEST(LinMaxExpansionTest, GoldenTest) { + CpModelProto initial_model = ParseTestProto(R"pb( + variables { domain: [ 0, 5 ] } + variables { domain: [ 0, 5 ] } + variables { domain: [ 0, 6 ] } + constraints { + lin_max { + target { vars: 0 coeffs: 1 offset: 1 } + exprs { vars: 1 coeffs: 2 } + exprs: { vars: 2 coeffs: 1 offset: -3 } + } + } + )pb"); + Model model; + model.GetOrCreate()->set_max_lin_max_size_for_expansion(4); + PresolveContext context(&model, &initial_model, nullptr); + ExpandCpModel(&context); + + const CpModelProto expected_model = ParseTestProto(R"pb( + variables { domain: 0 domain: 5 } + variables { domain: 0 domain: 5 } + variables { domain: 0 domain: 6 } + variables { domain: 0 domain: 1 } + constraints {} + constraints { + linear { + vars: 0 + vars: 1 + coeffs: 1 + coeffs: -2 + domain: -1 + domain: 9223372036854775806 + } + } + constraints { + linear { + vars: 0 + vars: 2 + coeffs: 1 + coeffs: -1 + domain: -4 + domain: 9223372036854775803 + } + } + constraints { + enforcement_literal: 3 + linear { + vars: 0 + vars: 1 + coeffs: 1 + coeffs: -2 + domain: -9223372036854775808 + domain: -1 + } + } + constraints { + enforcement_literal: -4 + linear { + vars: 0 + vars: 2 + coeffs: 1 + coeffs: -1 + domain: -9223372036854775808 + domain: -4 + } + } + )pb"); + EXPECT_THAT(initial_model, testing::EqualsProto(expected_model)); +} + +TEST(FinalExpansionForLinearConstraintTest, ComplexLinearExpansion) { + CpModelProto initial_model = ParseTestProto(R"pb( + variables { domain: [ 0, 10 ] } + variables { domain: [ 0, 10 ] } + constraints { + linear { + vars: [ 0, 1 ] + coeffs: [ 1, 1 ] + domain: [ 0, 2, 4, 6, 8, 10 ] + } + } + solution_hint { + vars: [ 0, 1 ] + values: [ 1, 5 ] + } + )pb"); + Model model; + PresolveContext context(&model, &initial_model, nullptr); + + context.InitializeNewDomains(); + context.LoadSolutionHint(); + + FinalExpansionForLinearConstraint(&context); + + const CpModelProto expected_model = ParseTestProto(R"pb( + variables { domain: [ 0, 10 ] } + variables { domain: [ 0, 10 ] } + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 1 ] } + constraints {} + constraints { bool_or { literals: [ 2, 3, 4 ] } } + constraints { + enforcement_literal: 2 + linear { + vars: [ 0, 1 ] + coeffs: [ 1, 1 ] + domain: [ 0, 2 ] + } + } + constraints { + enforcement_literal: 3 + linear { + vars: [ 0, 1 ] + coeffs: [ 1, 1 ] + domain: [ 4, 6 ] + } + } + constraints { + enforcement_literal: 4 + linear { + vars: [ 0, 1 ] + coeffs: [ 1, 1 ] + domain: [ 8, 10 ] + } + } + solution_hint { + vars: [ 0, 1 ] + values: [ 1, 5 ] + } + )pb"); + EXPECT_THAT(initial_model, testing::EqualsProto(expected_model)); + + // We should properly complete the hint and choose the bucket [4, 6]. + EXPECT_THAT(context.SolutionHint(), ::testing::ElementsAre(1, 5, 0, 1, 0)); +} + +} // namespace +} // namespace sat +} // namespace operations_research diff --git a/ortools/sat/cp_model_lns.cc b/ortools/sat/cp_model_lns.cc index 5e8104a47f..a3ccabe003 100644 --- a/ortools/sat/cp_model_lns.cc +++ b/ortools/sat/cp_model_lns.cc @@ -46,6 +46,7 @@ #include "ortools/sat/cp_model_presolve.h" #include "ortools/sat/cp_model_solver_helpers.h" #include "ortools/sat/cp_model_utils.h" +#include "ortools/sat/diffn_util.h" #include "ortools/sat/integer.h" #include "ortools/sat/linear_constraint_manager.h" #include "ortools/sat/linear_programming_constraint.h" @@ -806,33 +807,31 @@ void InsertCumulativePrecedences( } } -struct Rectangle { +struct IndexedRectangle { int interval_index; - int64_t x_start; - int64_t x_end; - int64_t y_start; - int64_t y_end; + Rectangle r; - bool operator<(const Rectangle& other) const { - return std::tie(x_start, x_end) < std::tie(other.x_start, other.x_end); + bool operator<(const IndexedRectangle& other) const { + return std::tie(r.x_min, r.x_max) < std::tie(other.r.x_min, other.r.x_max); } }; void InsertRectanglePredecences( - const std::vector& rectangles, + const std::vector& rectangles, absl::flat_hash_set>* precedences) { // TODO(user): Refine set of interesting points. - std::vector interesting_points; - for (const Rectangle& r : rectangles) { - interesting_points.push_back(r.y_end - 1); + std::vector interesting_points; + for (const IndexedRectangle& idx_r : rectangles) { + interesting_points.push_back(idx_r.r.y_max - 1); } gtl::STLSortAndRemoveDuplicates(&interesting_points); std::vector demands; - for (const int64_t t : interesting_points) { + for (const IntegerValue t : interesting_points) { demands.clear(); - for (const Rectangle& r : rectangles) { - if (r.y_start > t || r.y_end <= t) continue; - demands.push_back({r.interval_index, r.x_start, r.x_end, 1}); + for (const IndexedRectangle& idx_r : rectangles) { + if (idx_r.r.y_min > t || idx_r.r.y_max <= t) continue; + demands.push_back({idx_r.interval_index, idx_r.r.x_min.value(), + idx_r.r.x_max.value(), 1}); } std::sort(demands.begin(), demands.end()); InsertPrecedencesFromSortedListOfNonOverlapingIntervals(demands, @@ -848,8 +847,8 @@ void InsertNoOverlap2dPrecedences( std::vector demands; const NoOverlap2DConstraintProto& no_overlap_2d = model_proto.constraints(no_overlap_2d_index).no_overlap_2d(); - std::vector x_main; - std::vector y_main; + std::vector x_main; + std::vector y_main; for (int i = 0; i < no_overlap_2d.x_intervals_size(); ++i) { // Ignore unperformed rectangles. const int x_interval_index = no_overlap_2d.x_intervals(i); @@ -876,10 +875,16 @@ void InsertNoOverlap2dPrecedences( // Ignore rectangles with zero area. if (x_start_value == x_end_value || y_start_value == y_end_value) continue; - x_main.push_back({x_interval_index, x_start_value, x_end_value, - y_start_value, y_end_value}); - y_main.push_back({y_interval_index, y_start_value, y_end_value, - x_start_value, x_end_value}); + x_main.push_back({.interval_index = x_interval_index, + .r = {.x_min = x_start_value, + .x_max = x_end_value, + .y_min = y_start_value, + .y_max = y_end_value}}); + y_main.push_back({.interval_index = y_interval_index, + .r = {.x_min = y_start_value, + .x_max = y_end_value, + .y_min = x_start_value, + .y_max = x_end_value}}); } if (x_main.empty() || y_main.empty()) return; diff --git a/ortools/sat/cp_model_postsolve_test.cc b/ortools/sat/cp_model_postsolve_test.cc new file mode 100644 index 0000000000..dee1d25448 --- /dev/null +++ b/ortools/sat/cp_model_postsolve_test.cc @@ -0,0 +1,347 @@ +// Copyright 2010-2024 Google LLC +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "ortools/sat/cp_model_postsolve.h" + +#include +#include + +#include "gtest/gtest.h" +#include "ortools/base/gmock.h" +#include "ortools/base/parse_test_proto.h" +#include "ortools/sat/cp_model.pb.h" +#include "ortools/util/logging.h" +#include "ortools/util/sorted_interval_list.h" + +namespace operations_research { +namespace sat { +namespace { + +using ::google::protobuf::contrib::parse_proto::ParseTestProto; + +// Note that the postsolve is already tested in many of our solver tests or +// random presolve tests. We just have a small unit test here. +TEST(PostsolveResponseTest, BasicExample) { + // Fixing z will allow the postsolve code to reconstruct all values. + const CpModelProto mapping_proto = ParseTestProto(R"pb( + variables { name: 'x' domain: 0 domain: 10 } + variables { name: 'y' domain: 0 domain: 10 } + variables { name: 'z' domain: 0 domain: 10 } + constraints { + linear { + vars: [ 0, 1, 2 ] + coeffs: [ 1, 2, -3 ] + domain: [ 5, 5 ] + } + } + constraints { + linear { + vars: [ 1, 2 ] + coeffs: [ 3, -1 ] + domain: [ 5, 5 ] + } + } + )pb"); + + std::vector solution = {1}; + std::vector postsolve_mapping = {2}; // The solution fix z. + PostsolveResponse(/*num_variables_in_original_model=*/3, mapping_proto, + postsolve_mapping, &solution); + + // x + 2y - 3z = 5 + // 3y - z = 5 + // z = 1 + EXPECT_THAT(solution, ::testing::ElementsAre(4, 2, 1)); +} + +TEST(PostsolveResponseTest, ExactlyOneExample1) { + const CpModelProto mapping_proto = ParseTestProto(R"pb( + variables { name: 'x' domain: 0 domain: 1 } + variables { name: 'y' domain: 0 domain: 1 } + variables { name: 'z' domain: 0 domain: 1 } + constraints { exactly_one { literals: [ 0, 1, 2 ] } } + )pb"); + + std::vector solution = {1}; + std::vector postsolve_mapping = {2}; // The solution fix z. + PostsolveResponse(/*num_variables_in_original_model=*/3, mapping_proto, + postsolve_mapping, &solution); + EXPECT_THAT(solution, ::testing::ElementsAre(0, 0, 1)); +} + +TEST(PostsolveResponseTest, ExactlyOneExample2) { + const CpModelProto mapping_proto = ParseTestProto(R"pb( + variables { name: 'x' domain: 0 domain: 1 } + variables { name: 'y' domain: 0 domain: 1 } + variables { name: 'z' domain: 0 domain: 1 } + constraints { exactly_one { literals: [ 0, 1, 2 ] } } + )pb"); + + std::vector solution = {0}; + std::vector postsolve_mapping = {2}; // The solution fix z. + PostsolveResponse(/*num_variables_in_original_model=*/3, mapping_proto, + postsolve_mapping, &solution); + + // One variable is set to one. + EXPECT_THAT(solution, ::testing::ElementsAre(0, 1, 0)); +} + +TEST(PostsolveResponseTest, Element) { + // Fixing z will allow the postsolve code to reconstruct all values. + const CpModelProto mapping_proto = ParseTestProto(R"pb( + variables { + name: 'index' + domain: [ 0, 1 ] + } + variables { + name: 'a' + domain: [ 1, 10 ] + } + variables { + name: 'b' + domain: [ 0, 10 ] + } + variables { + name: 'target' + domain: [ 0, 10 ] + } + constraints { + element { + index: 0 + vars: [ 1, 2 ] + target: 3 + } + } + )pb"); + + std::vector solution; + std::vector postsolve_mapping = {}; + PostsolveResponse(/*num_variables_in_original_model=*/4, mapping_proto, + postsolve_mapping, &solution); + EXPECT_THAT(solution, ::testing::ElementsAre(0, 1, 0, 1)); +} + +TEST(PostsolveResponseTest, VariableElement) { + const CpModelProto mapping_proto = ParseTestProto(R"pb( + variables { domain: [ 0, 129 ] } + variables { domain: [ 1, 5 ] } + variables { domain: [ 0, 129 ] } + variables { domain: [ 2, 2 ] } + constraints { element { index: 3 target: 2 vars: 0 vars: 1 vars: 0 } } + )pb"); + + std::vector solution; + std::vector postsolve_mapping = {}; + PostsolveResponse(/*num_variables_in_original_model=*/4, mapping_proto, + postsolve_mapping, &solution); + EXPECT_THAT(solution, ::testing::ElementsAre(0, 1, 0, 2)); +} + +// Note that our postolve code is "limited" when it come to solving a single +// linear equation since we should only encounter "simple" case. +TEST(PostsolveResponseTest, TrickyLinearCase) { + // The equation is 2x + y = z + // + // It mostly work all the time, except if we decide to make z - y not a + // multiple of two. This is not necessarily detected by our presolve since + // 2 * [0, 124] is too complex to represent. Yet for any value of x and y + // there is a possible z, but the reverse is not true, since y = 1, z = 0 is + // not feasible. + // + // The preosolve should deal with that by putting z first so that the + // postsolve code do not fail. + const CpModelProto mapping_proto = ParseTestProto(R"pb( + variables { + name: 'x' + domain: [ 0, 124 ] + } + variables { + name: 'y' + domain: [ 0, 1 ] + } + variables { + name: 'z' + domain: [ 0, 255 ] + } + constraints { + linear { + vars: [ 2, 0, 1 ] + coeffs: [ -1, 2, 1 ] + domain: [ 0, 0 ] + } + } + )pb"); + + // The likely response (there are many possible). + std::vector solution; + CpSolverResponse response; + response.set_status(OPTIMAL); + std::vector postsolve_mapping; + PostsolveResponse(/*num_variables_in_original_model=*/3, mapping_proto, + postsolve_mapping, &solution); + EXPECT_THAT(solution, ::testing::ElementsAre(0, 0, 0)); +} + +// This used to fail because we where computing the EXACT domain atteignable +// by the sum of discrete domains, which have a lot of disjoint part. +// +// But our presolve was fine, because adding each of them to the loose rhs +// domain just result in a domain with a small complexity. +TEST(PostsolveResponseTest, ComplexityIssue) { + CpModelProto mapping_proto; + + // N variables such that their sum can be and even number. If we try to + // compute the exact domains of their sum, we are quadratic in compexity. + const int num_variables = 30; + for (int i = 0; i < num_variables; ++i) { + IntegerVariableProto* var = mapping_proto.add_variables(); + var->add_domain(0); + var->add_domain(0); + const int value = 1 << (1 + i); + var->add_domain(value); + var->add_domain(value); + } + + // A linear constraint sum variable in [0, 1e9]. + ConstraintProto* ct = mapping_proto.add_constraints(); + ct->mutable_linear()->add_domain(0); + ct->mutable_linear()->add_domain(1e9); + for (int i = 0; i < num_variables; ++i) { + ct->mutable_linear()->add_vars(i); + ct->mutable_linear()->add_coeffs(1); + } + + // The likely response (there are many possible). + std::vector solution; + std::vector postsolve_mapping; + PostsolveResponse(num_variables, mapping_proto, postsolve_mapping, &solution); + ASSERT_EQ(solution.size(), num_variables); +} + +TEST(FillTightenedDomainInResponseTest, BasicBehavior) { + // Original model. + const CpModelProto original_model = ParseTestProto(R"pb( + variables { + name: 'x' + domain: [ 0, 124 ] + } + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 255 ] } + )pb"); + + // We might have more variable there. + // Also the domains might be tighter. + const CpModelProto mapping_proto = ParseTestProto(R"pb( + variables { domain: [ 0, 100 ] } + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 255 ] } + variables { domain: [ 0, 17 ] } + variables { domain: [ 0, 18 ] } + )pb"); + + // Lets assume the presolved mode contains 3 variables, 2 in common. + std::vector postsolve_mapping{0, 2, 4}; + std::vector search_bounds{Domain(0, 100), Domain(0, 0), Domain(3, 7)}; + + // Call the postsolving. + SolverLogger logger; + CpSolverResponse response; + FillTightenedDomainInResponse(original_model, mapping_proto, + postsolve_mapping, search_bounds, &response, + &logger); + + // Lets test by constructing a model for easy comparison. + CpModelProto returned_model; + for (const IntegerVariableProto& var : response.tightened_variables()) { + *returned_model.add_variables() = var; + } + + const CpModelProto expected_model = ParseTestProto(R"pb( + variables { + name: 'x' + domain: [ 0, 100 ] + } # presolve reduced the domain. + variables { domain: [ 0, 1 ] } # no info. + variables { domain: [ 0, 0 ] } # was fixed by search. + )pb"); + EXPECT_THAT(returned_model, testing::EqualsProto(expected_model)); +} + +TEST(FillTightenedDomainInResponseTest, WithAffine) { + // Original model. + const CpModelProto original_model = ParseTestProto(R"pb( + variables { domain: [ 0, 124 ] } + variables { domain: [ 0, 50 ] } + variables { domain: [ 0, 255 ] } + )pb"); + + // We might have more variable there. + // Also the domains might be tighter. + const CpModelProto mapping_proto = ParseTestProto(R"pb( + variables { domain: [ 0, 100 ] } + variables { domain: [ 0, 50 ] } + variables { domain: [ 0, 100 ] } + variables { domain: [ 0, 17 ] } + variables { domain: [ 0, 18 ] } + variables { domain: [ 0, 19 ] } + constraints { + linear { + vars: [ 0, 3 ] + coeffs: [ 2, 1 ] + domain: [ 10, 10 ] + } + } + constraints { + linear { + vars: [ 1, 4 ] + coeffs: [ 1, 1 ] + domain: [ 10, 10 ] + } + } + constraints { + linear { + vars: [ 5, 2 ] + coeffs: [ 2, 1 ] + domain: [ 10, 10 ] + } + } + )pb"); + + std::vector postsolve_mapping{3, 4, 5}; + std::vector search_bounds{Domain(0, 20), Domain(0, 20), Domain(3, 5)}; + + // Call the postsolving. + SolverLogger logger; + logger.EnableLogging(true); + CpSolverResponse response; + FillTightenedDomainInResponse(original_model, mapping_proto, + postsolve_mapping, search_bounds, &response, + &logger); + + // Lets test by constructing a model for easy comparison. + CpModelProto returned_model; + for (const IntegerVariableProto& var : response.tightened_variables()) { + *returned_model.add_variables() = var; + } + + const CpModelProto expected_model = ParseTestProto(R"pb( + variables { domain: [ 0, 5 ] } # 2 * v = 10 - [0, 17] + variables { domain: [ 0, 10 ] } # v = 10 - [0, 18] + variables { domain: [ 0, 4 ] } # v = 10 - 2 * [3, 5] + )pb"); + EXPECT_THAT(returned_model, testing::EqualsProto(expected_model)); +} + +} // namespace +} // namespace sat +} // namespace operations_research diff --git a/ortools/sat/cp_model_search_test.cc b/ortools/sat/cp_model_search_test.cc new file mode 100644 index 0000000000..c2961a523d --- /dev/null +++ b/ortools/sat/cp_model_search_test.cc @@ -0,0 +1,297 @@ +// Copyright 2010-2024 Google LLC +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "ortools/sat/cp_model_search.h" + +#include +#include + +#include "gtest/gtest.h" +#include "ortools/base/gmock.h" +#include "ortools/base/parse_test_proto.h" +#include "ortools/sat/cp_model.pb.h" +#include "ortools/sat/cp_model_solver.h" +#include "ortools/sat/model.h" +#include "ortools/sat/sat_parameters.pb.h" + +namespace operations_research { +namespace sat { +namespace { + +using ::google::protobuf::contrib::parse_proto::ParseTestProto; + +CpModelProto CreateExactlyOneTrueBooleanCpModel(int size) { + CpModelProto model_proto; + auto* exactly_one_constraint = + model_proto.add_constraints()->mutable_exactly_one(); + DecisionStrategyProto* const search_strategy = + model_proto.add_search_strategy(); + + for (int i = 0; i < size; ++i) { + IntegerVariableProto* const var = model_proto.add_variables(); + var->add_domain(0); + var->add_domain(1); + exactly_one_constraint->add_literals(i); + search_strategy->add_variables(i); + } + return model_proto; +} + +TEST(RandomSearchTest, CheckDistribution) { + const int kSize = 50; + std::vector winners(kSize, 0); + const int kLoops = 100; + for (int l = 0; l < kLoops; ++l) { + const CpModelProto model_proto = CreateExactlyOneTrueBooleanCpModel(kSize); + Model model; + SatParameters parameters; + parameters.set_search_random_variable_pool_size(10); + parameters.set_cp_model_presolve(false); + parameters.set_search_branching(SatParameters::FIXED_SEARCH); + parameters.set_random_seed(l); + parameters.set_num_workers(1); + model.Add(NewSatParameters(parameters)); + const CpSolverResponse response = SolveCpModel(model_proto, &model); + for (int i = 0; i < kSize; ++i) { + if (response.solution(i)) { + winners[i]++; + } + } + } + for (int i = 0; i < kSize; ++i) { + EXPECT_LE(winners[i], kLoops / 10); + } +} + +TEST(RandomSearchTest, CheckSeed) { + const int kSeeds = 10; + for (int seed = 0; seed < kSeeds; ++seed) { + const int kSize = 20; + std::vector winners(kSize, 0); + const int kLoops = 50; + for (int l = 0; l < kLoops; ++l) { + const CpModelProto model_proto = + CreateExactlyOneTrueBooleanCpModel(kSize); + + SatParameters params; + params.set_randomize_search(true); + params.set_cp_model_presolve(false); + params.set_search_branching(SatParameters::FIXED_SEARCH); + params.set_use_absl_random(false); // Otherwise, each solve changes. + params.set_random_seed(0); + const CpSolverResponse response = + SolveWithParameters(model_proto, params); + for (int i = 0; i < kSize; ++i) { + if (response.solution(i)) { + winners[i]++; + } + } + } + for (int i = 0; i < kSize; ++i) { + EXPECT_TRUE(winners[i] == 0 || winners[i] == kLoops) << winners[i]; + } + } +} + +TEST(BasicFixedSearchBehaviorTest, Default) { + const CpModelProto model_proto = ParseTestProto(R"pb( + variables { domain: [ 4, 50 ] } + variables { domain: [ 3, 7 ] } + variables { domain: [ 0, 7 ] } + variables { domain: [ 4, 5 ] } + variables { domain: [ 3, 9 ] } + constraints { + all_diff { + exprs { vars: 0 coeffs: 1 } + exprs { vars: 1 coeffs: 1 } + exprs { vars: 2 coeffs: 1 } + exprs { vars: 3 coeffs: 1 } + exprs { vars: 4 coeffs: 1 } + } + } + )pb"); + Model model; + model.Add(NewSatParameters( + "cp_model_presolve:false,search_branching:FIXED_SEARCH")); + const CpSolverResponse response = SolveCpModel(model_proto, &model); + EXPECT_EQ(response.status(), CpSolverStatus::OPTIMAL); + EXPECT_THAT(response.solution(), testing::ElementsAre(4, 3, 0, 5, 6)); +} + +TEST(BasicFixedSearchBehaviorTest, ReverseOrder) { + // Note that SELECT_LOWER_HALF or SELECT_MIN_VALUE result in the same + // solution. + const CpModelProto model_proto = ParseTestProto(R"pb( + variables { domain: [ 4, 50 ] } + variables { domain: [ 3, 7 ] } + variables { domain: [ 0, 7 ] } + variables { domain: [ 4, 5 ] } + variables { domain: [ 3, 9 ] } + constraints { + all_diff { + exprs { vars: 0 coeffs: 1 } + exprs { vars: 1 coeffs: 1 } + exprs { vars: 2 coeffs: 1 } + exprs { vars: 3 coeffs: 1 } + exprs { vars: 4 coeffs: 1 } + } + } + search_strategy { + variables: [ 4, 3, 2, 1, 0 ] + variable_selection_strategy: CHOOSE_FIRST + domain_reduction_strategy: SELECT_LOWER_HALF + } + )pb"); + Model model; + model.Add(NewSatParameters( + "cp_model_presolve:false,search_branching:FIXED_SEARCH")); + const CpSolverResponse response = SolveCpModel(model_proto, &model); + EXPECT_EQ(response.status(), CpSolverStatus::OPTIMAL); + EXPECT_THAT(response.solution(), testing::ElementsAre(6, 5, 0, 4, 3)); +} + +// The strategies that sort variables according to their domain do not have +// a fixed solution depending on the propagation strength... +TEST(BasicFixedSearchBehaviorTest, MinDomainSize) { + const CpModelProto model_proto = ParseTestProto(R"pb( + variables { domain: [ 4, 10 ] } + variables { domain: [ 3, 7 ] } + variables { domain: [ 0, 7 ] } + variables { domain: [ 4, 5 ] } + variables { domain: [ 3, 9 ] } + constraints { + all_diff { + exprs { vars: 0 coeffs: 1 } + exprs { vars: 1 coeffs: 1 } + exprs { vars: 2 coeffs: 1 } + exprs { vars: 3 coeffs: 1 } + exprs { vars: 4 coeffs: 1 } + } + } + search_strategy { + variables: [ 0, 1, 2, 3, 4 ] + variable_selection_strategy: CHOOSE_MIN_DOMAIN_SIZE + domain_reduction_strategy: SELECT_MAX_VALUE + } + )pb"); + Model model; + model.Add(NewSatParameters( + "cp_model_presolve:false,search_branching:FIXED_SEARCH")); + const CpSolverResponse response = SolveCpModel(model_proto, &model); + EXPECT_EQ(response.status(), CpSolverStatus::OPTIMAL); + EXPECT_THAT(response.solution(), testing::ElementsAre(10, 7, 6, 5, 9)); +} + +TEST(BasicFixedSearchBehaviorTest, WithTransformation1) { + const CpModelProto model_proto = ParseTestProto(R"pb( + variables { domain: [ 3, 10 ] } + variables { domain: [ 3, 7 ] } + constraints { + all_diff { + exprs { vars: 0 coeffs: 1 } + exprs { vars: 1 coeffs: 1 } + } + } + search_strategy { + exprs { vars: 0 coeffs: 1 offset: 4 } + exprs { vars: 1 coeffs: 4 } + variable_selection_strategy: CHOOSE_LOWEST_MIN + domain_reduction_strategy: SELECT_MIN_VALUE + } + )pb"); + Model model; + model.Add(NewSatParameters( + "cp_model_presolve:false,search_branching:FIXED_SEARCH")); + const CpSolverResponse response = SolveCpModel(model_proto, &model); + EXPECT_EQ(response.status(), CpSolverStatus::OPTIMAL); + EXPECT_THAT(response.solution(), testing::ElementsAre(3, 4)); +} + +TEST(BasicFixedSearchBehaviorTest, WithTransformation2) { + const CpModelProto model_proto = ParseTestProto(R"pb( + variables { domain: [ 3, 7 ] } + variables { domain: [ 3, 7 ] } + constraints { + all_diff { + exprs { vars: 0 coeffs: 1 } + exprs { vars: 1 coeffs: 1 } + } + } + search_strategy { + exprs { vars: 0 coeffs: -1 offset: 4 } + exprs { vars: 1 coeffs: -4 } + variable_selection_strategy: CHOOSE_LOWEST_MIN + domain_reduction_strategy: SELECT_MIN_VALUE + } + )pb"); + Model model; + model.Add(NewSatParameters( + "cp_model_presolve:false,search_branching:FIXED_SEARCH")); + const CpSolverResponse response = SolveCpModel(model_proto, &model); + EXPECT_EQ(response.status(), CpSolverStatus::OPTIMAL); + EXPECT_THAT(response.solution(), testing::ElementsAre(6, 7)); +} + +TEST(BasicFixedSearchBehaviorTest, MedianTest) { + const CpModelProto model_proto = ParseTestProto(R"pb( + variables { domain: [ 0, 8 ] } + variables { domain: [ 0, 8 ] } + constraints { + linear { + vars: [ 0, 1 ] + coeffs: [ 1, 1 ] + domain: [ 8, 100 ] + } + } + search_strategy { + variables: [ 0, 1 ] + variable_selection_strategy: CHOOSE_FIRST + domain_reduction_strategy: SELECT_MEDIAN_VALUE + } + )pb"); + SatParameters params; + params.set_keep_all_feasible_solutions_in_presolve(true); + params.set_search_branching(SatParameters::FIXED_SEARCH); + const CpSolverResponse response = SolveWithParameters(model_proto, params); + EXPECT_EQ(response.status(), CpSolverStatus::OPTIMAL); + EXPECT_THAT(response.solution(), testing::ElementsAre(4, 6)); +} + +TEST(BasicFixedSearchBehaviorTest, MedianTest2) { + const CpModelProto model_proto = ParseTestProto(R"pb( + variables { domain: [ 0, 20 ] } + variables { domain: [ 6, 12 ] } + constraints { + all_diff { + exprs { vars: 0 coeffs: 1 } + exprs { vars: 1 coeffs: 1 } + } + } + search_strategy { + variables: [ 0, 1 ] + variable_selection_strategy: CHOOSE_MAX_DOMAIN_SIZE + domain_reduction_strategy: SELECT_MEDIAN_VALUE + } + )pb"); + SatParameters params; + params.set_keep_all_feasible_solutions_in_presolve(true); + params.set_search_branching(SatParameters::FIXED_SEARCH); + const CpSolverResponse response = SolveWithParameters(model_proto, params); + + EXPECT_EQ(response.status(), CpSolverStatus::OPTIMAL); + EXPECT_THAT(response.solution(), testing::ElementsAre(10, 8)); +} + +} // namespace +} // namespace sat +} // namespace operations_research diff --git a/ortools/sat/cp_model_solver.cc b/ortools/sat/cp_model_solver.cc index f47ec0aa4d..728524ed36 100644 --- a/ortools/sat/cp_model_solver.cc +++ b/ortools/sat/cp_model_solver.cc @@ -1172,7 +1172,7 @@ class LnsSolver : public SubSolver { static_cast(generator_->num_fully_solved_calls()) / static_cast(num_calls); const std::string lns_info = absl::StrFormat( - "%s (d=%0.2f s=%i t=%0.2f p=%0.2f stall=%d h=%s)", source_info, + "%s (d=%0.3f s=%i t=%0.2f p=%0.2f stall=%d h=%s)", source_info, data.difficulty, task_id, data.deterministic_limit, fully_solved_proportion, stall, search_info); diff --git a/ortools/sat/cuts.cc b/ortools/sat/cuts.cc index e65af77517..af4aec65de 100644 --- a/ortools/sat/cuts.cc +++ b/ortools/sat/cuts.cc @@ -1085,6 +1085,17 @@ struct LargeContribFirst { } }; +struct LargeLpValueFirst { + bool operator()(const CutTerm& a, const CutTerm& b) const { + if (a.lp_value == b.lp_value) { + // Prefer high coefficients if the distance is the same. + // We have more chance to get a cover this way. + return a.coeff > b.coeff; + } + return a.lp_value > b.lp_value; + } +}; + // When minimizing a cover we want to remove bad score (large dist) divided by // item size. Note that here we assume item are "boolean" fully taken or not. // for general int we use (lp_dist / bound_diff) / (coeff * bound_diff) which @@ -1117,14 +1128,15 @@ struct KnapsackRemove { template int CoverCutHelper::MinimizeCover(int cover_size, absl::int128 slack) { CHECK_GT(slack, 0); - std::sort(cut_.terms.begin(), cut_.terms.begin() + cover_size, Compare()); + absl::Span terms = absl::MakeSpan(cut_.terms); + std::sort(terms.begin(), terms.begin() + cover_size, Compare()); for (int i = 0; i < cover_size;) { - const CutTerm& t = cut_.terms[i]; + const CutTerm& t = terms[i]; const absl::int128 contrib = absl::int128(t.bound_diff.value()) * absl::int128(t.coeff.value()); if (contrib < slack) { slack -= contrib; - std::swap(cut_.terms[i], cut_.terms[--cover_size]); + std::swap(terms[i], terms[--cover_size]); } else { ++i; } @@ -1136,16 +1148,17 @@ int CoverCutHelper::MinimizeCover(int cover_size, absl::int128 slack) { template int CoverCutHelper::GetCoverSize(int relevant_size) { if (relevant_size == 0) return 0; + absl::Span terms = absl::MakeSpan(cut_.terms); // Take first all at variable at upper bound, and ignore the one at lower // bound. int part1 = 0; for (int i = 0; i < relevant_size;) { - CutTerm& term = cut_.terms[i]; + CutTerm& term = terms[i]; const double dist = term.LpDistToMaxValue(); if (dist < 1e-6) { // Move to part 1. - std::swap(term, cut_.terms[part1]); + std::swap(term, terms[part1]); ++i; ++part1; } else if (term.lp_value > 1e-6) { @@ -1154,30 +1167,27 @@ int CoverCutHelper::GetCoverSize(int relevant_size) { } else { // Exclude entirely (part 3). --relevant_size; - std::swap(term, cut_.terms[relevant_size]); + std::swap(term, terms[relevant_size]); } } - std::sort(cut_.terms.begin() + part1, cut_.terms.begin() + relevant_size, - CompareAdd()); + std::sort(terms.begin() + part1, terms.begin() + relevant_size, CompareAdd()); // We substract the initial rhs to avoid overflow. - CHECK_GE(cut_.rhs, 0); + DCHECK_GE(cut_.rhs, 0); absl::int128 max_shifted_activity = -cut_.rhs; absl::int128 shifted_round_up = -cut_.rhs; int cover_size = 0; - double dist = 0.0; for (; cover_size < relevant_size; ++cover_size) { if (max_shifted_activity > 0) break; - const CutTerm& term = cut_.terms[cover_size]; + const CutTerm& term = terms[cover_size]; max_shifted_activity += absl::int128(term.coeff.value()) * absl::int128(term.bound_diff.value()); shifted_round_up += absl::int128(term.coeff.value()) * std::min(absl::int128(term.bound_diff.value()), absl::int128(std::ceil(term.lp_value - 1e-6))); - dist += term.LpDistToMaxValue(); } - CHECK_GE(cover_size, 0); + DCHECK_GE(cover_size, 0); if (shifted_round_up <= 0) { return 0; } @@ -1187,51 +1197,60 @@ int CoverCutHelper::GetCoverSize(int relevant_size) { // Try a simple cover heuristic. // Look for violated CUT of the form: sum (UB - X) or (X - LB) >= 1. int CoverCutHelper::GetCoverSizeForBooleans() { + absl::Span terms = absl::MakeSpan(cut_.terms); + // Sorting can be slow, so we start by splitting the vector in 3 parts - // [can always be in cover, candidates, can never be in cover]. + // - Can always be in cover + // - Candidates that needs sorting + // - At most one can be in cover (we keep the max). int part1 = 0; - int relevant_size = cut_.terms.size(); - const double threshold = 1.0 - 1.0 / static_cast(relevant_size); + int relevant_size = terms.size(); + int best_in_part3 = -1; + const double threshold = 1.0 - 1.0 / static_cast(terms.size()); for (int i = 0; i < relevant_size;) { - const double lp_value = cut_.terms[i].lp_value; + const double lp_value = terms[i].lp_value; // Exclude non-Boolean. - if (cut_.terms[i].bound_diff > 1) { + if (terms[i].bound_diff > 1) { --relevant_size; - std::swap(cut_.terms[i], cut_.terms[relevant_size]); + std::swap(terms[i], terms[relevant_size]); continue; } if (lp_value >= threshold) { // Move to part 1. - std::swap(cut_.terms[i], cut_.terms[part1]); + std::swap(terms[i], terms[part1]); ++i; ++part1; - } else if (lp_value >= 0.001) { + } else if (lp_value > 0.5) { // Keep in part 2. ++i; } else { - // Exclude entirely (part 3). + // Only keep the max (part 3). --relevant_size; - std::swap(cut_.terms[i], cut_.terms[relevant_size]); + std::swap(terms[i], terms[relevant_size]); + + if (best_in_part3 == -1 || + LargeLpValueFirst()(terms[relevant_size], terms[best_in_part3])) { + best_in_part3 = relevant_size; + } } } + if (best_in_part3 != -1) { + std::swap(terms[relevant_size], terms[best_in_part3]); + ++relevant_size; + } + // Sort by decreasing Lp value. - std::sort(cut_.terms.begin() + part1, cut_.terms.begin() + relevant_size, - [](const CutTerm& a, const CutTerm& b) { - if (a.lp_value == b.lp_value) { - // Prefer low coefficients if the distance is the same. - return a.coeff < b.coeff; - } - return a.lp_value > b.lp_value; - }); + std::sort(terms.begin() + part1, terms.begin() + relevant_size, + LargeLpValueFirst()); double activity = 0.0; int cover_size = relevant_size; absl::int128 slack = -cut_.rhs; for (int i = 0; i < relevant_size; ++i) { - const CutTerm& term = cut_.terms[i]; + const CutTerm& term = terms[i]; activity += term.LpDistToMaxValue(); // As an heuristic we select all the term so that the sum of distance @@ -1259,7 +1278,9 @@ int CoverCutHelper::GetCoverSizeForBooleans() { // possible violation. Note also that we lift as much as possible, so we don't // necessarily optimize for the cut efficacity though. But we do get a // stronger cut. - if (slack <= 0) return 0; + if (slack <= 0) { + return 0; + } if (cover_size == 0) return 0; return MinimizeCover(cover_size, slack); } @@ -1315,11 +1336,12 @@ bool CoverCutHelper::TrySimpleKnapsack(const CutData& input_ct, : GetCoverSizeForBooleans(); if (!has_relevant_int && ib_processor == nullptr) { // If some implied bound substitution are possible, we do not cache anything - // currently because the logic is currently sighlty different betweent the + // currently because the logic is currently sighlty different between the // two code. Fix? has_bool_base_ct_ = true; - bool_base_ct_ = cut_; bool_cover_size_ = cover_size; + if (cover_size == 0) return false; + bool_base_ct_ = cut_; } if (cover_size == 0) return false; @@ -1502,8 +1524,9 @@ bool CoverCutHelper::TryWithLetchfordSouliLifting( // We already called GetCoverSizeForBooleans() and ib_processor was nullptr, // so reuse that info. CHECK(ib_processor == nullptr); - InitializeCut(bool_base_ct_); cover_size = bool_cover_size_; + if (cover_size == 0) return false; + InitializeCut(bool_base_ct_); } else { InitializeCut(input_ct); diff --git a/ortools/sat/flaky_models_test.cc b/ortools/sat/flaky_models_test.cc new file mode 100644 index 0000000000..ce388d8e37 --- /dev/null +++ b/ortools/sat/flaky_models_test.cc @@ -0,0 +1,101 @@ +// Copyright 2010-2024 Google LLC +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "gtest/gtest.h" +#include "ortools/base/parse_test_proto.h" +#include "ortools/sat/cp_model.pb.h" +#include "ortools/sat/cp_model_solver.h" +#include "ortools/sat/sat_parameters.pb.h" + +namespace operations_research { +namespace sat { +namespace { + +using ::google::protobuf::contrib::parse_proto::ParseTestProto; + +TEST(FlakyTest, Issue3108) { + const CpModelProto model_proto = ParseTestProto(R"pb( + variables { domain: 0 domain: 1 } + variables { domain: 0 domain: 1 } + variables { domain: 0 domain: 1 } + variables { domain: 0 domain: 1 } + variables { domain: 0 domain: 1 } + constraints { + enforcement_literal: 2 + interval { + start { vars: 0 coeffs: 1 } + end { vars: 0 coeffs: 1 offset: 1 } + size { offset: 1 } + } + } + constraints { + enforcement_literal: 3 + interval { + start { vars: 1 coeffs: 1 } + end { vars: 1 coeffs: 1 offset: 1 } + size { offset: 1 } + } + } + constraints { + cumulative { + capacity { vars: 4 coeffs: 1 } + intervals: 0 + intervals: 1 + demands { offset: 1 } + demands { offset: 1 } + } + } + constraints { + enforcement_literal: 2 + linear { vars: 0 coeffs: 1 domain: 0 domain: 1 } + } + constraints { + enforcement_literal: -3 + linear { + vars: 0 + coeffs: 1 + domain: -9223372036854775808 + domain: -1 + domain: 2 + domain: 9223372036854775807 + } + } + constraints { + enforcement_literal: 3 + linear { vars: 1 coeffs: 1 domain: 0 domain: 1 } + } + constraints { + enforcement_literal: -4 + linear { + vars: 1 + coeffs: 1 + domain: -9223372036854775808 + domain: -1 + domain: 2 + domain: 9223372036854775807 + } + } + objective { vars: 4 coeffs: 1 } + )pb"); + SatParameters parameters; + parameters.set_log_search_progress(true); + parameters.set_cp_model_probing_level(0); + parameters.set_num_search_workers(1); + const CpSolverResponse response = + SolveWithParameters(model_proto, parameters); + EXPECT_EQ(response.status(), CpSolverStatus::OPTIMAL); +} + +} // namespace +} // namespace sat +} // namespace operations_research diff --git a/ortools/sat/integer_expr_test.cc b/ortools/sat/integer_expr_test.cc new file mode 100644 index 0000000000..93c02494e5 --- /dev/null +++ b/ortools/sat/integer_expr_test.cc @@ -0,0 +1,1644 @@ +// Copyright 2010-2024 Google LLC +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "ortools/sat/integer_expr.h" + +#include +#include +#include +#include +#include +#include +#include + +#include "absl/container/btree_set.h" +#include "absl/log/check.h" +#include "absl/random/distributions.h" +#include "absl/random/random.h" +#include "absl/strings/str_cat.h" +#include "absl/strings/string_view.h" +#include "absl/types/span.h" +#include "gtest/gtest.h" +#include "ortools/base/logging.h" +#include "ortools/base/parse_test_proto.h" +#include "ortools/port/proto_utils.h" +#include "ortools/sat/cp_model.pb.h" +#include "ortools/sat/cp_model_checker.h" +#include "ortools/sat/cp_model_solver.h" +#include "ortools/sat/cp_model_utils.h" +#include "ortools/sat/integer.h" +#include "ortools/sat/linear_constraint.h" +#include "ortools/sat/model.h" +#include "ortools/sat/sat_base.h" +#include "ortools/sat/sat_parameters.pb.h" +#include "ortools/sat/sat_solver.h" +#include "ortools/util/saturated_arithmetic.h" +#include "ortools/util/sorted_interval_list.h" +#include "ortools/util/strong_integers.h" + +namespace operations_research { +namespace sat { +namespace { + +// Weighted sum <= constant reified. +void AddWeightedSumLowerOrEqualReif(Literal is_le, + absl::Span vars, + absl::Span coefficients, + int64_t upper_bound, Model* model) { + AddWeightedSumLowerOrEqual({is_le}, vars, coefficients, upper_bound, model); + AddWeightedSumGreaterOrEqual({is_le.Negated()}, vars, coefficients, + upper_bound + 1, model); +} + +// Weighted sum >= constant reified. +void AddWeightedSumGreaterOrEqualReif(Literal is_ge, + absl::Span vars, + absl::Span coefficients, + int64_t lower_bound, Model* model) { + AddWeightedSumGreaterOrEqual({is_ge}, vars, coefficients, lower_bound, model); + AddWeightedSumLowerOrEqual({is_ge.Negated()}, vars, coefficients, + lower_bound - 1, model); +} + +// Weighted sum == constant reified. +// TODO(user): Simplify if the constant is at the edge of the possible values. +void AddFixedWeightedSumReif(Literal is_eq, + const std::vector& vars, + const std::vector& coefficients, + int64_t value, Model* model) { + // We creates two extra Boolean variables in this case. The alternative is + // to code a custom propagator for the direction equality => reified. + const Literal is_le = Literal(model->Add(NewBooleanVariable()), true); + const Literal is_ge = Literal(model->Add(NewBooleanVariable()), true); + model->Add(ReifiedBoolAnd({is_le, is_ge}, is_eq)); + AddWeightedSumLowerOrEqualReif(is_le, vars, coefficients, value, model); + AddWeightedSumGreaterOrEqualReif(is_ge, vars, coefficients, value, model); +} + +using ::google::protobuf::contrib::parse_proto::ParseTestProto; + +CpSolverResponse SolveAndCheck( + const CpModelProto& initial_model, absl::string_view extra_parameters = "", + absl::btree_set>* solutions = nullptr) { + SatParameters params; + params.set_enumerate_all_solutions(true); + if (!extra_parameters.empty()) { + params.MergeFromString(extra_parameters); + } + auto observer = [&](const CpSolverResponse& response) { + VLOG(2) << response; + EXPECT_TRUE(SolutionIsFeasible( + initial_model, std::vector(response.solution().begin(), + response.solution().end()))); + if (solutions != nullptr) { + std::vector solution; + for (int var = 0; var < initial_model.variables_size(); ++var) { + solution.push_back(response.solution(var)); + } + solutions->insert(solution); + } + }; + Model model; + model.Add(NewSatParameters(params)); + model.Add(NewFeasibleSolutionObserver(observer)); + return SolveCpModel(initial_model, &model); +} + +// A simple macro to make the code more readable. +#define EXPECT_BOUNDS_EQ(var, lb, ub) \ + EXPECT_TRUE((model.Get(LowerBound(var)) == lb) && \ + (model.Get(UpperBound(var)) == ub)) + +TEST(WeightedSumTest, LevelZeroPropagation) { + Model model; + std::vector vars{model.Add(NewIntegerVariable(4, 9)), + model.Add(NewIntegerVariable(-7, -2)), + model.Add(NewIntegerVariable(3, 8))}; + + const IntegerVariable sum = + model.Add(NewWeightedSum(std::vector{1, -2, 3}, vars)); + EXPECT_EQ(SatSolver::FEASIBLE, model.GetOrCreate()->Solve()); + EXPECT_EQ(model.Get(LowerBound(sum)), 4 + 2 * 2 + 3 * 3); + EXPECT_EQ(model.Get(UpperBound(sum)), 9 + 2 * 7 + 3 * 8); + + // Setting this leave only a slack of 2. + model.Add(LowerOrEqual(sum, 19)); + EXPECT_EQ(SatSolver::FEASIBLE, model.GetOrCreate()->Solve()); + EXPECT_BOUNDS_EQ(vars[0], 4, 6); // coeff = 1, slack = 2 + EXPECT_BOUNDS_EQ(vars[1], -3, -2); // coeff = 2, slack = 1 + EXPECT_BOUNDS_EQ(vars[2], 3, 3); // coeff = 3, slack = 0 +} + +TEST(WeightedSumLowerOrEqualTest, UnaryRounding) { + Model model; + IntegerVariable var = model.Add(NewIntegerVariable(0, 10)); + const std::vector coeffs = {-100}; + model.Add(WeightedSumLowerOrEqual({var}, coeffs, -320)); + EXPECT_EQ(SatSolver::FEASIBLE, model.GetOrCreate()->Solve()); + EXPECT_EQ(model.Get(LowerBound(var)), 4); +} + +// This one used to fail before CL 139204507. +TEST(WeightedSumTest, LevelZeroPropagationWithNegativeNumbers) { + Model model; + std::vector vars{model.Add(NewIntegerVariable(-5, 0)), + model.Add(NewIntegerVariable(-6, 0)), + model.Add(NewIntegerVariable(-4, 0))}; + + const IntegerVariable sum = + model.Add(NewWeightedSum(std::vector{3, 3, 3}, vars)); + EXPECT_EQ(SatSolver::FEASIBLE, model.GetOrCreate()->Solve()); + EXPECT_EQ(model.Get(LowerBound(sum)), -15 * 3); + EXPECT_EQ(model.Get(UpperBound(sum)), 0); + + // Setting this leave only a slack of 5 which is not an exact multiple of 3. + model.Add(LowerOrEqual(sum, -40)); + EXPECT_EQ(SatSolver::FEASIBLE, model.GetOrCreate()->Solve()); + EXPECT_BOUNDS_EQ(vars[0], -5, -4); + EXPECT_BOUNDS_EQ(vars[1], -6, -5); + EXPECT_BOUNDS_EQ(vars[2], -4, -3); +} + +TEST(ReifiedWeightedSumLeTest, ReifToBoundPropagation) { + Model model; + const Literal r = Literal(model.Add(NewBooleanVariable()), true); + const IntegerVariable var = model.Add(NewIntegerVariable(4, 9)); + AddWeightedSumLowerOrEqualReif(r, {var}, std::vector{1}, 6, &model); + EXPECT_EQ( + SatSolver::FEASIBLE, + model.GetOrCreate()->ResetAndSolveWithGivenAssumptions({r})); + EXPECT_BOUNDS_EQ(var, 4, 6); + EXPECT_EQ(SatSolver::FEASIBLE, + model.GetOrCreate()->ResetAndSolveWithGivenAssumptions( + {r.Negated()})); + EXPECT_BOUNDS_EQ(var, 7, 9); // The associated literal (x <= 6) is false. +} + +TEST(ReifiedWeightedSumLeTest, ReifToBoundPropagationWithNegatedCoeff) { + Model model; + const Literal r = Literal(model.Add(NewBooleanVariable()), true); + const IntegerVariable var = model.Add(NewIntegerVariable(-9, 9)); + AddWeightedSumLowerOrEqualReif(r, {var}, std::vector{-3}, 7, &model); + EXPECT_EQ( + SatSolver::FEASIBLE, + model.GetOrCreate()->ResetAndSolveWithGivenAssumptions({r})); + EXPECT_BOUNDS_EQ(var, -2, 9); + EXPECT_EQ(SatSolver::FEASIBLE, + model.GetOrCreate()->ResetAndSolveWithGivenAssumptions( + {r.Negated()})); + EXPECT_BOUNDS_EQ(var, -9, -3); // The associated literal (x >= -2) is false. +} + +TEST(ReifiedWeightedSumGeTest, ReifToBoundPropagation) { + Model model; + const Literal r = Literal(model.Add(NewBooleanVariable()), true); + const IntegerVariable var = model.Add(NewIntegerVariable(4, 9)); + AddWeightedSumGreaterOrEqualReif(r, {var}, std::vector{1}, 6, + &model); + EXPECT_EQ( + SatSolver::FEASIBLE, + model.GetOrCreate()->ResetAndSolveWithGivenAssumptions({r})); + EXPECT_BOUNDS_EQ(var, 6, 9); + EXPECT_EQ(SatSolver::FEASIBLE, + model.GetOrCreate()->ResetAndSolveWithGivenAssumptions( + {r.Negated()})); + EXPECT_BOUNDS_EQ(var, 4, 5); +} + +TEST(ReifiedFixedWeightedSumTest, ReifToBoundPropagation) { + Model model; + const Literal r = Literal(model.Add(NewBooleanVariable()), true); + const IntegerVariable var = model.Add(NewIntegerVariable(4, 9)); + AddFixedWeightedSumReif(r, {var}, std::vector{1}, 6, &model); + EXPECT_EQ( + SatSolver::FEASIBLE, + model.GetOrCreate()->ResetAndSolveWithGivenAssumptions({r})); + EXPECT_BOUNDS_EQ(var, 6, 6); + + // Because we introduced intermediate Boolean, we decide if var is < 6 or > 6. + EXPECT_EQ(SatSolver::FEASIBLE, + model.GetOrCreate()->ResetAndSolveWithGivenAssumptions( + {r.Negated()})); + if (model.Get(LowerBound(var)) == 4) { + EXPECT_BOUNDS_EQ(var, 4, 5); + } else { + EXPECT_BOUNDS_EQ(var, 7, 9); + } +} + +TEST(ReifiedWeightedSumTest, BoundToReifTrueLe) { + Model model; + const Literal r = Literal(model.Add(NewBooleanVariable()), true); + const IntegerVariable var = model.Add(NewIntegerVariable(4, 9)); + AddWeightedSumLowerOrEqualReif(r, {var}, std::vector{1}, 9, &model); + EXPECT_TRUE(model.GetOrCreate()->Propagate()); + EXPECT_TRUE(model.Get(Value(r))); +} + +TEST(ReifiedWeightedSumTest, BoundToReifFalseLe) { + Model model; + const Literal r = Literal(model.Add(NewBooleanVariable()), true); + const IntegerVariable var = model.Add(NewIntegerVariable(4, 9)); + AddWeightedSumLowerOrEqualReif(r, {var}, std::vector{1}, 3, &model); + EXPECT_TRUE(model.GetOrCreate()->Propagate()); + EXPECT_FALSE(model.Get(Value(r))); +} + +TEST(ReifiedWeightedSumTest, BoundToReifTrueEq) { + Model model; + const Literal r = Literal(model.Add(NewBooleanVariable()), true); + const IntegerVariable var = model.Add(NewIntegerVariable(4, 4)); + AddFixedWeightedSumReif(r, {var}, std::vector{1}, 4, &model); + EXPECT_TRUE(model.GetOrCreate()->Propagate()); + EXPECT_TRUE(model.Get(Value(r))); +} + +TEST(ReifiedWeightedSumTest, BoundToReifFalseEq1) { + Model model; + const Literal r = Literal(model.Add(NewBooleanVariable()), true); + const IntegerVariable var = model.Add(NewIntegerVariable(4, 6)); + AddFixedWeightedSumReif(r, {var}, std::vector{1}, 8, &model); + EXPECT_TRUE(model.GetOrCreate()->Propagate()); + EXPECT_FALSE(model.Get(Value(r))); +} + +TEST(ReifiedWeightedSumTest, BoundToReifFalseEq2) { + Model model; + const Literal r = Literal(model.Add(NewBooleanVariable()), true); + const IntegerVariable var = model.Add(NewIntegerVariable(4, 6)); + AddFixedWeightedSumReif(r, {var}, std::vector{1}, 3, &model); + EXPECT_TRUE(model.GetOrCreate()->Propagate()); + EXPECT_FALSE(model.Get(Value(r))); +} + +TEST(ConditionalLbTest, BasicPositiveCase) { + Model model; + const IntegerVariable var = model.Add(NewIntegerVariable(0, 10)); + const IntegerVariable obj = model.Add(NewIntegerVariable(-10, 10)); + + std::vector vars{var, obj}; + std::vector coeffs{6, -2}; + const IntegerValue rhs = 4; + IntegerSumLE constraint({}, vars, coeffs, rhs, &model); + + // We have 2 * obj >= 6 * var - 4. + const auto result = + constraint.ConditionalLb(IntegerLiteral::GreaterOrEqual(var, 1), obj); + EXPECT_EQ(result.first, -2); // When false. + EXPECT_EQ(result.second, 1); // When true. + + // We have 2 * obj >= 6 * var - 4. + const auto result2 = + constraint.ConditionalLb(IntegerLiteral::GreaterOrEqual(var, 3), obj); + EXPECT_EQ(result2.first, -2); // When false. + EXPECT_EQ(result2.second, 7); // When true. +} + +TEST(ConditionalLbTest, CornerCase) { + Model model; + const IntegerVariable var = model.Add(NewIntegerVariable(0, 10)); + const IntegerVariable obj = model.Add(NewIntegerVariable(-10, 10)); + + std::vector vars{var, obj}; + std::vector coeffs{6, -2}; + const IntegerValue rhs = 4; + IntegerSumLE constraint({}, vars, coeffs, rhs, &model); + + // Here we don't even look at the equation. + const auto result = + constraint.ConditionalLb(IntegerLiteral::GreaterOrEqual(obj, 2), obj); + EXPECT_EQ(result.first, kMinIntegerValue); // When false. + EXPECT_EQ(result.second, 2); // When true. + + const auto result2 = + constraint.ConditionalLb(IntegerLiteral::LowerOrEqual(obj, 3), obj); + EXPECT_EQ(result2.first, 4); // When false. + EXPECT_EQ(result2.second, kMinIntegerValue); // When true. +} + +TEST(ConditionalLbTest, BasicNegativeCase) { + Model model; + const IntegerVariable var = model.Add(NewIntegerVariable(0, 1)); + const IntegerVariable obj = model.Add(NewIntegerVariable(-10, 10)); + + std::vector vars{var, obj}; + std::vector coeffs{-6, -1}; + const IntegerValue rhs = -4; + IntegerSumLE constraint({}, vars, coeffs, rhs, &model); + + // We have obj >= 4 - 6 * var. + const auto result = + constraint.ConditionalLb(IntegerLiteral::LowerOrEqual(var, 0), obj); + EXPECT_EQ(result.first, -2); // false, var <= 1 + EXPECT_EQ(result.second, 4); // true, var <= 0. +} + +TEST(MinMaxTest, LevelZeroPropagation) { + Model model; + std::vector vars{model.Add(NewIntegerVariable(4, 9)), + model.Add(NewIntegerVariable(2, 7)), + model.Add(NewIntegerVariable(3, 8))}; + const IntegerVariable min = model.Add(NewIntegerVariable(0, 10)); + const IntegerVariable max = model.Add(NewIntegerVariable(0, 10)); + model.Add(IsEqualToMinOf(min, vars)); + model.Add(IsEqualToMaxOf(max, vars)); + + EXPECT_EQ(SatSolver::FEASIBLE, model.GetOrCreate()->Solve()); + EXPECT_BOUNDS_EQ(min, 2, 7); + EXPECT_BOUNDS_EQ(max, 4, 9); + + model.Add(LowerOrEqual(min, 5)); + EXPECT_EQ(SatSolver::FEASIBLE, model.GetOrCreate()->Solve()); + EXPECT_BOUNDS_EQ(min, 2, 5); + + model.Add(GreaterOrEqual(max, 7)); + EXPECT_EQ(SatSolver::FEASIBLE, model.GetOrCreate()->Solve()); + EXPECT_BOUNDS_EQ(max, 7, 9); + + // Test the propagation in the other direction (PrecedencesPropagator). + model.Add(GreaterOrEqual(min, 5)); + EXPECT_EQ(SatSolver::FEASIBLE, model.GetOrCreate()->Solve()); + EXPECT_BOUNDS_EQ(vars[0], 5, 9); + EXPECT_BOUNDS_EQ(vars[1], 5, 7); + EXPECT_BOUNDS_EQ(vars[2], 5, 8); + + model.Add(LowerOrEqual(max, 8)); + EXPECT_EQ(SatSolver::FEASIBLE, model.GetOrCreate()->Solve()); + EXPECT_BOUNDS_EQ(vars[0], 5, 8); + EXPECT_BOUNDS_EQ(vars[1], 5, 7); + EXPECT_BOUNDS_EQ(vars[2], 5, 8); +} + +TEST(LinMinMaxTest, LevelZeroPropagation) { + Model model; + std::vector vars{model.Add(NewIntegerVariable(4, 9)), + model.Add(NewIntegerVariable(2, 7)), + model.Add(NewIntegerVariable(3, 8))}; + std::vector exprs; + for (const IntegerVariable var : vars) { + LinearExpression expr; + expr.vars.push_back(var); + expr.coeffs.push_back(1); + exprs.push_back(expr); + } + const IntegerVariable min = model.Add(NewIntegerVariable(-100, 100)); + LinearExpression min_expr; + min_expr.vars.push_back(min); + min_expr.coeffs.push_back(1); + model.Add(IsEqualToMinOf(min_expr, exprs)); + + EXPECT_EQ(SatSolver::FEASIBLE, model.GetOrCreate()->Solve()); + EXPECT_BOUNDS_EQ(min, 2, 7); + + model.Add(LowerOrEqual(min, 5)); + EXPECT_EQ(SatSolver::FEASIBLE, model.GetOrCreate()->Solve()); + EXPECT_BOUNDS_EQ(min, 2, 5); + + // Test the propagation in the other direction (PrecedencesPropagator). + model.Add(GreaterOrEqual(min, 5)); + EXPECT_EQ(SatSolver::FEASIBLE, model.GetOrCreate()->Solve()); + EXPECT_BOUNDS_EQ(vars[0], 5, 9); + EXPECT_BOUNDS_EQ(vars[1], 5, 7); + EXPECT_BOUNDS_EQ(vars[2], 5, 8); +} + +TEST(MinTest, OnlyOnePossibleCandidate) { + Model model; + std::vector vars{model.Add(NewIntegerVariable(4, 7)), + model.Add(NewIntegerVariable(2, 9)), + model.Add(NewIntegerVariable(5, 8))}; + const IntegerVariable min = model.Add(NewIntegerVariable(0, 10)); + model.Add(IsEqualToMinOf(min, vars)); + + // So far everything is normal. + EXPECT_EQ(SatSolver::FEASIBLE, model.GetOrCreate()->Solve()); + EXPECT_BOUNDS_EQ(min, 2, 7); + + // But now, if the min is known to be <= 3, the minimum variable is known! it + // has to be variable #1, so we can propagate its upper bound. + model.Add(LowerOrEqual(min, 3)); + EXPECT_EQ(SatSolver::FEASIBLE, model.GetOrCreate()->Solve()); + EXPECT_BOUNDS_EQ(min, 2, 3); + EXPECT_BOUNDS_EQ(vars[1], 2, 3); + + // Test infeasibility. + model.Add(LowerOrEqual(min, 1)); + EXPECT_EQ(SatSolver::INFEASIBLE, model.GetOrCreate()->Solve()); +} + +TEST(LinMinTest, OnlyOnePossibleCandidate) { + Model model; + std::vector vars{model.Add(NewIntegerVariable(4, 7)), + model.Add(NewIntegerVariable(2, 9)), + model.Add(NewIntegerVariable(5, 8))}; + std::vector exprs; + for (const IntegerVariable var : vars) { + LinearExpression expr; + expr.vars.push_back(var); + expr.coeffs.push_back(1); + exprs.push_back(expr); + } + const IntegerVariable min = model.Add(NewIntegerVariable(-100, 100)); + LinearExpression min_expr; + min_expr.vars.push_back(min); + min_expr.coeffs.push_back(1); + model.Add(IsEqualToMinOf(min_expr, exprs)); + + // So far everything is normal. + EXPECT_EQ(SatSolver::FEASIBLE, model.GetOrCreate()->Solve()); + EXPECT_BOUNDS_EQ(min, 2, 7); + + // But now, if the min is known to be <= 3, the minimum variable is known! it + // has to be variable #1, so we can propagate its upper bound. + model.Add(LowerOrEqual(min, 3)); + EXPECT_EQ(SatSolver::FEASIBLE, model.GetOrCreate()->Solve()); + EXPECT_BOUNDS_EQ(min, 2, 3); + EXPECT_BOUNDS_EQ(vars[1], 2, 3); + + // Test infeasibility. + model.Add(LowerOrEqual(min, 1)); + EXPECT_EQ(SatSolver::INFEASIBLE, model.GetOrCreate()->Solve()); +} + +TEST(LinMinTest, OnlyOnePossibleExpr) { + Model model; + std::vector vars{model.Add(NewIntegerVariable(1, 2)), + model.Add(NewIntegerVariable(0, 3)), + model.Add(NewIntegerVariable(-2, 4))}; + std::vector exprs; + IntegerTrail* integer_trail = model.GetOrCreate(); + LinearExpression expr1; // 2x0 + 3x1 - 5 + expr1.vars = {vars[0], vars[1]}; + expr1.coeffs = {2, 3}; + expr1.offset = -5; + expr1 = CanonicalizeExpr(expr1); + EXPECT_EQ(-3, expr1.Min(*integer_trail)); + EXPECT_EQ(8, expr1.Max(*integer_trail)); + + LinearExpression expr2; // 2x1 - 5x2 + 6 + expr2.vars = {vars[1], vars[2]}; + expr2.coeffs = {2, -5}; + expr2.offset = 6; + expr2 = CanonicalizeExpr(expr2); + EXPECT_EQ(-14, expr2.Min(*integer_trail)); + EXPECT_EQ(22, expr2.Max(*integer_trail)); + + LinearExpression expr3; // 2x0 + 3x2 + expr3.vars = {vars[0], vars[2]}; + expr3.coeffs = {2, 3}; + expr3 = CanonicalizeExpr(expr3); + EXPECT_EQ(-4, expr3.Min(*integer_trail)); + EXPECT_EQ(16, expr3.Max(*integer_trail)); + + exprs.push_back(expr1); + exprs.push_back(expr2); + exprs.push_back(expr3); + IntegerVariable min = model.Add(NewIntegerVariable(-100, 100)); + LinearExpression min_expr; + min_expr.vars.push_back(min); + min_expr.coeffs.push_back(1); + model.Add(IsEqualToMinOf(min_expr, exprs)); + + // So far everything is normal. + EXPECT_EQ(SatSolver::FEASIBLE, model.GetOrCreate()->Solve()); + EXPECT_BOUNDS_EQ(min, -14, 8); + + // But now, if the min is known to be <= -5, the minimum expression has to be + // expr 2, so we can propagate its upper bound. + model.Add(LowerOrEqual(min, -5)); + EXPECT_EQ(SatSolver::FEASIBLE, model.GetOrCreate()->Solve()); + EXPECT_BOUNDS_EQ(min, -14, -5); + EXPECT_BOUNDS_EQ(vars[0], 1, 2); + EXPECT_BOUNDS_EQ(vars[1], 0, 3); + EXPECT_BOUNDS_EQ(vars[2], 3, 4); + // NOTE: The expression bound is not as tight because the underlying variable + // bounds can't be propagated enough without throwing away valid solutions. + EXPECT_LE(expr2.Max(*integer_trail), -3); + + // Test infeasibility. + model.Add(LowerOrEqual(min, -15)); + EXPECT_EQ(SatSolver::INFEASIBLE, model.GetOrCreate()->Solve()); +} + +TEST(OneOfTest, BasicPropagation) { + Model model; + + IntegerVariable var = model.Add(NewIntegerVariable(0, 10)); + std::vector selectors; + for (int i = 0; i < 5; ++i) { + selectors.push_back(Literal(model.Add(NewBooleanVariable()), true)); + } + std::vector values{5, 0, 3, 3, 9}; + + model.Add(IsOneOf(var, selectors, values)); + + // We start with nothing fixed and then start fixing variables. + SatSolver* solver = model.GetOrCreate(); + EXPECT_TRUE(solver->Propagate()); + EXPECT_BOUNDS_EQ(var, 0, 9); + EXPECT_TRUE(solver->EnqueueDecisionIfNotConflicting(selectors[1].Negated())); + EXPECT_BOUNDS_EQ(var, 3, 9); + EXPECT_TRUE(solver->EnqueueDecisionIfNotConflicting(selectors[4].Negated())); + EXPECT_BOUNDS_EQ(var, 3, 5); + EXPECT_TRUE(solver->EnqueueDecisionIfNotConflicting(selectors[2].Negated())); + EXPECT_BOUNDS_EQ(var, 3, 5); + EXPECT_TRUE(solver->EnqueueDecisionIfNotConflicting(selectors[3].Negated())); + EXPECT_BOUNDS_EQ(var, 5, 5); + + // Now we restrict the possible values by changing the bound. + solver->Backtrack(0); + model.Add(LowerOrEqual(var, 3)); + EXPECT_EQ( + SatSolver::FEASIBLE, + model.GetOrCreate()->ResetAndSolveWithGivenAssumptions({})); + EXPECT_FALSE(model.Get(Value(selectors[0]))); + EXPECT_FALSE(model.Get(Value(selectors[4]))); +} + +// Propagates a * b = p by hand. Return false if the domains are empty, +// otherwise returns true and the expected domains value. This is slow and +// work in O(product of domain(a).size() * domain(b).size())!. +bool TestProductPropagation(const IntegerTrail& trail, + std::vector vars, + std::vector* expected_mins, + std::vector* expected_maxs) { + const IntegerValue min_a = trail.LowerBound(vars[0]); + const IntegerValue max_a = trail.UpperBound(vars[0]); + const IntegerValue min_b = trail.LowerBound(vars[1]); + const IntegerValue max_b = trail.UpperBound(vars[1]); + const IntegerValue min_p = trail.LowerBound(vars[2]); + const IntegerValue max_p = trail.UpperBound(vars[2]); + + std::vector> new_values(3); + for (IntegerValue va(min_a); va <= max_a; ++va) { + for (IntegerValue vb(min_b); vb <= max_b; ++vb) { + const IntegerValue vp = va * vb; + if (vp >= min_p && vp <= max_p) { + new_values[0].insert(va); + new_values[1].insert(vb); + new_values[2].insert(vp); + } + } + } + if (new_values[0].empty() || new_values[1].empty() || new_values[2].empty()) { + return false; + } + + expected_mins->clear(); + expected_maxs->clear(); + for (int i = 0; i < 3; ++i) { + std::vector sorted(new_values[i].begin(), + new_values[i].end()); + expected_mins->push_back(sorted.front()); + expected_maxs->push_back(sorted.back()); + } + return true; +} + +TEST(ProductConstraintTest, RandomCases) { + absl::BitGen random; + + int num_non_perfect = 0; + const int num_tests = 1000; + for (int i = 0; i < num_tests; ++i) { + Model model; + IntegerTrail* integer_trail = model.GetOrCreate(); + std::vector vars; + std::string input_string; + for (int v = 0; v < 3; ++v) { + const int limit = v < 2 ? 20 : 200; + int64_t min = absl::Uniform(random, -limit, limit); + int64_t max = absl::Uniform(random, -limit, limit); + if (min > max) std::swap(min, max); + absl::StrAppend(&input_string, + (v == 1 ? " * " + : v == 2 ? " = " + : ""), + "[", min, ", ", max, "]"); + vars.push_back(model.Add(NewIntegerVariable(min, max))); + } + + // Start by computing the expected result. + std::vector expected_mins; + std::vector expected_maxs; + const bool expected_result = TestProductPropagation( + *integer_trail, vars, &expected_mins, &expected_maxs); + + bool perfect_propagation = true; + bool ok_propagation = true; + model.Add(ProductConstraint(vars[0], vars[1], vars[2])); + const bool result = model.GetOrCreate()->Propagate(); + if (expected_result != result) { + if (expected_result) { + ok_propagation = false; + } else { + // If the exact result is UNSAT, we might not have seen that. + perfect_propagation = false; + } + } + std::string expected_string; + std::string result_string; + for (int i = 0; i < 3; ++i) { + const int64_t lb = integer_trail->LowerBound(vars[i]).value(); + const int64_t ub = integer_trail->UpperBound(vars[i]).value(); + if (expected_result) { + if (expected_mins[i] != lb) perfect_propagation = false; + if (expected_mins[i] < lb) ok_propagation = false; + if (expected_maxs[i] != ub) perfect_propagation = false; + if (expected_maxs[i] > ub) ok_propagation = false; + + // We should always be exact on the domain of a and b. + if (i < 2 && !perfect_propagation) { + ok_propagation = false; + } + absl::StrAppend(&expected_string, "[", expected_mins[i].value(), ", ", + expected_maxs[i].value(), "] "); + } + + if (result) { + absl::StrAppend(&result_string, "[", lb, ", ", ub, "] "); + } + } + if (!perfect_propagation || !ok_propagation) { + VLOG(1) << "Imperfect on input: " << input_string; + if (expected_result) { + VLOG(1) << "Expected: " << expected_string; + if (result) { + VLOG(1) << "Result: " << result_string; + } else { + VLOG(1) << "UNSAT was received."; + } + } else { + VLOG(1) << "Result: " << result_string; + VLOG(1) << "UNSAT was expected."; + } + ++num_non_perfect; + } + ASSERT_TRUE(ok_propagation); + } + + // Unfortunately our TestProductPropagation() is too good and in some corner + // cases like when the product is [18, 19] it can detect stuff like the + // product 19 (which is prime) can't be reached by any product a * b, + // whereas our propagator doesn't see that! + LOG(INFO) << "Num imperfect: " << num_non_perfect << " / " << num_tests; + EXPECT_LT(num_non_perfect, num_tests / 2); +} + +TEST(ProductConstraintTest, RestrictedProductDomainPosPos) { + const CpModelProto initial_model = ParseTestProto(R"pb( + variables { name: 'y' domain: 0 domain: 3 } + variables { name: 'x' domain: 0 domain: 2 } + variables { name: 'p' domain: 0 domain: 4 } + constraints { + int_prod { + target { vars: 2 coeffs: 1 } + exprs { vars: 0 coeffs: 1 } + exprs { vars: 1 coeffs: 1 } + } + } + )pb"); + absl::btree_set> solutions; + const CpSolverResponse response = + SolveAndCheck(initial_model, "", &solutions); + EXPECT_EQ(OPTIMAL, response.status()); + absl::btree_set> expected{ + {0, 0, 0}, {0, 1, 0}, {0, 2, 0}, {1, 0, 0}, {1, 1, 1}, {1, 2, 2}, + {2, 0, 0}, {2, 1, 2}, {2, 2, 4}, {3, 0, 0}, {3, 1, 3}, + }; + EXPECT_EQ(solutions, expected); +} + +TEST(ProductConstraintTest, RestrictedProductDomainPosNeg) { + const CpModelProto initial_model = ParseTestProto(R"pb( + variables { name: 'y' domain: 0 domain: 3 } + variables { name: 'x' domain: -2 domain: 0 } + variables { name: 'p' domain: -4 domain: 0 } + constraints { + int_prod { + target { vars: 2 coeffs: 1 } + exprs { vars: 0 coeffs: 1 } + exprs { vars: 1 coeffs: 1 } + } + } + )pb"); + absl::btree_set> solutions; + const CpSolverResponse response = + SolveAndCheck(initial_model, "", &solutions); + EXPECT_EQ(OPTIMAL, response.status()); + absl::btree_set> expected{ + {0, 0, 0}, {0, -1, 0}, {0, -2, 0}, {1, 0, 0}, {1, -1, -1}, {1, -2, -2}, + {2, 0, 0}, {2, -1, -2}, {2, -2, -4}, {3, 0, 0}, {3, -1, -3}, + }; + EXPECT_EQ(solutions, expected); +} + +TEST(ProductConstraintTest, RestrictedProductDomainNegPos) { + const CpModelProto initial_model = ParseTestProto(R"pb( + variables { name: 'y' domain: -3 domain: 0 } + variables { name: 'x' domain: 0 domain: 2 } + variables { name: 'p' domain: -4 domain: 0 } + constraints { + int_prod { + target { vars: 2 coeffs: 1 } + exprs { vars: 0 coeffs: 1 } + exprs { vars: 1 coeffs: 1 } + } + } + )pb"); + absl::btree_set> solutions; + const CpSolverResponse response = + SolveAndCheck(initial_model, "", &solutions); + EXPECT_EQ(OPTIMAL, response.status()); + absl::btree_set> expected{ + {0, 0, 0}, {0, 1, 0}, {0, 2, 0}, {-1, 0, 0}, + {-1, 1, -1}, {-1, 2, -2}, {-2, 0, 0}, {-2, 1, -2}, + {-2, 2, -4}, {-3, 0, 0}, {-3, 1, -3}, + }; + EXPECT_EQ(solutions, expected); +} + +TEST(ProductConstraintTest, RestrictedProductDomainNegNeg) { + const CpModelProto initial_model = ParseTestProto(R"pb( + variables { name: 'y' domain: -3 domain: 0 } + variables { name: 'x' domain: -2 domain: 0 } + variables { name: 'p' domain: 0 domain: 4 } + constraints { + int_prod { + target { vars: 2 coeffs: 1 } + exprs { vars: 0 coeffs: 1 } + exprs { vars: 1 coeffs: 1 } + } + } + )pb"); + absl::btree_set> solutions; + const CpSolverResponse response = + SolveAndCheck(initial_model, "", &solutions); + EXPECT_EQ(OPTIMAL, response.status()); + absl::btree_set> expected{ + {0, 0, 0}, {0, -1, 0}, {0, -2, 0}, {-1, 0, 0}, + {-1, -1, 1}, {-1, -2, 2}, {-2, 0, 0}, {-2, -1, 2}, + {-2, -2, 4}, {-3, 0, 0}, {-3, -1, 3}, + }; + EXPECT_EQ(solutions, expected); +} + +TEST(ProductConstraintTest, ProductIsNull) { + const CpModelProto initial_model = ParseTestProto(R"pb( + variables { name: 'y' domain: 0 domain: 3 } + variables { name: 'x' domain: 0 domain: 2 } + variables { name: 'p' domain: 0 domain: 6 } + constraints { + int_prod { + target { vars: 2 coeffs: 1 } + exprs { vars: 0 coeffs: 1 } + exprs { vars: 1 coeffs: 1 } + } + } + constraints { linear { vars: 2 coeffs: 1 domain: 0 domain: 0 } } + )pb"); + absl::btree_set> solutions; + const CpSolverResponse response = + SolveAndCheck(initial_model, "", &solutions); + EXPECT_EQ(OPTIMAL, response.status()); + absl::btree_set> expected{{0, 0, 0}, {0, 1, 0}, {0, 2, 0}, + {1, 0, 0}, {2, 0, 0}, {3, 0, 0}}; + EXPECT_EQ(solutions, expected); +} + +TEST(ProductConstraintTest, CheckAllSolutionsRandomProblem) { + absl::BitGen random; + const int kMaxValue = 50; + const int kNumLoops = DEBUG_MODE ? 50 : 100; + + for (int loop = 0; loop < kNumLoops; ++loop) { + CpModelProto cp_model; + int x_min = absl::Uniform(random, -kMaxValue, kMaxValue); + int x_max = absl::Uniform(random, -kMaxValue, kMaxValue); + if (x_min > x_max) std::swap(x_min, x_max); + IntegerVariableProto* x = cp_model.add_variables(); + x->add_domain(x_min); + x->add_domain(x_max); + + int y_min = absl::Uniform(random, -kMaxValue, kMaxValue); + int y_max = absl::Uniform(random, -kMaxValue, kMaxValue); + if (y_min > y_max) std::swap(y_min, y_max); + IntegerVariableProto* y = cp_model.add_variables(); + y->add_domain(y_min); + y->add_domain(y_max); + + int z_min = absl::Uniform(random, -kMaxValue, kMaxValue); + int z_max = absl::Uniform(random, -kMaxValue, kMaxValue); + if (z_min > z_max) std::swap(z_min, z_max); + IntegerVariableProto* z = cp_model.add_variables(); + z->add_domain(z_min); + z->add_domain(z_max); + + // z == x * y. + LinearArgumentProto* prod = cp_model.add_constraints()->mutable_int_prod(); + prod->add_exprs()->add_vars(0); // x. + prod->mutable_exprs(0)->add_coeffs(1); + prod->add_exprs()->add_vars(1); // y + prod->mutable_exprs(1)->add_coeffs(1); + prod->mutable_target()->add_vars(2); // z + prod->mutable_target()->add_coeffs(1); + + absl::btree_set> solutions; + const CpSolverResponse response = + SolveAndCheck(cp_model, "linearization_level:0", &solutions); + + // Loop through the domains of x and y, and collect valid solutions. + absl::btree_set> expected; + for (int i = x_min; i <= x_max; ++i) { + for (int j = y_min; j <= y_max; ++j) { + const int k = i * j; + if (k < z_min || k > z_max) continue; + expected.insert({i, j, k}); + } + } + + // Checks that we get the same solution set through the two methods. + EXPECT_EQ(solutions, expected); + } +} + +TEST(ProductPropagationTest, RightAcrossZero) { + const CpModelProto initial_model = ParseTestProto(R"pb( + variables { name: 'y' domain: 2 domain: 4 } + variables { name: 'x' domain: -6 domain: 6 } + variables { name: 'p' domain: -30 domain: 30 } + constraints { + int_prod { + target { vars: 2 coeffs: 1 } + exprs { vars: 0 coeffs: 1 } + exprs { vars: 1 coeffs: 1 } + } + } + )pb"); + absl::btree_set> solutions; + const CpSolverResponse response = + SolveAndCheck(initial_model, "", &solutions); + EXPECT_EQ(OPTIMAL, response.status()); + absl::btree_set> expected{ + {2, -6, -12}, {3, -6, -18}, {4, -6, -24}, {2, -5, -10}, {3, -5, -15}, + {4, -5, -20}, {2, -4, -8}, {3, -4, -12}, {4, -4, -16}, {2, -3, -6}, + {3, -3, -9}, {4, -3, -12}, {2, -2, -4}, {3, -2, -6}, {4, -2, -8}, + {2, -1, -2}, {3, -1, -3}, {4, -1, -4}, {2, 0, 0}, {3, 0, 0}, + {4, 0, 0}, {2, 1, 2}, {3, 1, 3}, {4, 1, 4}, {2, 2, 4}, + {3, 2, 6}, {4, 2, 8}, {2, 3, 6}, {3, 3, 9}, {4, 3, 12}, + {2, 4, 8}, {3, 4, 12}, {4, 4, 16}, {2, 5, 10}, {3, 5, 15}, + {4, 5, 20}, {2, 6, 12}, {3, 6, 18}, {4, 6, 24}, + }; + EXPECT_EQ(solutions.size(), 3 * 13); + EXPECT_EQ(solutions, expected); +} + +TEST(ProductPropagationTest, BothAcrossZero) { + const CpModelProto initial_model = ParseTestProto(R"pb( + variables { name: 'y' domain: -2 domain: 3 } + variables { name: 'x' domain: -3 domain: 2 } + variables { name: 'p' domain: -10 domain: 10 } + constraints { + int_prod { + target { vars: 2 coeffs: 1 } + exprs { vars: 0 coeffs: 1 } + exprs { vars: 1 coeffs: 1 } + } + } + )pb"); + absl::btree_set> solutions; + const CpSolverResponse response = + SolveAndCheck(initial_model, "", &solutions); + EXPECT_EQ(OPTIMAL, response.status()); + absl::btree_set> expected{ + {-2, -3, 6}, {-2, -2, 4}, {-2, -1, 2}, {-2, 0, 0}, {-2, 1, -2}, + {-2, 2, -4}, {-1, -3, 3}, {-1, -2, 2}, {-1, -1, 1}, {-1, 0, 0}, + {-1, 1, -1}, {-1, 2, -2}, {0, -3, 0}, {0, -2, 0}, {0, -1, 0}, + {0, 0, 0}, {0, 1, 0}, {0, 2, 0}, {1, -3, -3}, {1, -2, -2}, + {1, -1, -1}, {1, 0, 0}, {1, 1, 1}, {1, 2, 2}, {2, -3, -6}, + {2, -2, -4}, {2, -1, -2}, {2, 0, 0}, {2, 1, 2}, {2, 2, 4}, + {3, -3, -9}, {3, -2, -6}, {3, -1, -3}, {3, 0, 0}, {3, 1, 3}, + {3, 2, 6}}; + EXPECT_EQ(solutions.size(), 6 * 6); + EXPECT_EQ(solutions, expected); +} + +TEST(ProductPropagationTest, BothAcrossZeroWithRangeRestriction) { + const CpModelProto initial_model = ParseTestProto(R"pb( + variables { name: 'y' domain: -2 domain: 3 } + variables { name: 'x' domain: -3 domain: 2 } + variables { name: 'p' domain: -3 domain: 4 } + constraints { + int_prod { + target { vars: 2 coeffs: 1 } + exprs { vars: 0 coeffs: 1 } + exprs { vars: 1 coeffs: 1 } + } + } + )pb"); + absl::btree_set> solutions; + const CpSolverResponse response = + SolveAndCheck(initial_model, "", &solutions); + EXPECT_EQ(OPTIMAL, response.status()); + absl::btree_set> expected{ + {-2, -2, 4}, {-2, -1, 2}, {-2, 0, 0}, {-2, 1, -2}, {-1, -3, 3}, + {-1, -2, 2}, {-1, -1, 1}, {-1, 0, 0}, {-1, 1, -1}, {-1, 2, -2}, + {0, -3, 0}, {0, -2, 0}, {0, -1, 0}, {0, 0, 0}, {0, 1, 0}, + {0, 2, 0}, {1, -3, -3}, {1, -2, -2}, {1, -1, -1}, {1, 0, 0}, + {1, 1, 1}, {1, 2, 2}, {2, -1, -2}, {2, 0, 0}, {2, 1, 2}, + {2, 2, 4}, {3, -1, -3}, {3, 0, 0}, {3, 1, 3}, + }; + EXPECT_EQ(solutions, expected); +} + +TEST(ProductPropagationTest, BothAcrossZeroWithPositiveTarget) { + const CpModelProto initial_model = ParseTestProto(R"pb( + variables { domain: [ -2, 6 ] } + variables { domain: [ -2, 6 ] } + variables { domain: [ 12, 12 ] } + constraints { + int_prod { + target { vars: 2 coeffs: 1 } + exprs { vars: 0 coeffs: 1 } + exprs { vars: 1 coeffs: 1 } + } + } + )pb"); + absl::btree_set> solutions; + const CpSolverResponse response = + SolveAndCheck(initial_model, "", &solutions); + EXPECT_EQ(OPTIMAL, response.status()); + absl::btree_set> expected{ + {2, 6, 12}, {3, 4, 12}, {4, 3, 12}, {6, 2, 12}}; + EXPECT_EQ(solutions, expected); +} + +TEST(ProductPropagationTest, BothAcrossZeroWithFarPositiveTarget) { + const CpModelProto initial_model = ParseTestProto(R"pb( + variables { domain: [ -2, 6 ] } + variables { domain: [ -2, 6 ] } + variables { domain: [ 15, 15 ] } + constraints { + int_prod { + target { vars: 2 coeffs: 1 } + exprs { vars: 0 coeffs: 1 } + exprs { vars: 1 coeffs: 1 } + } + } + )pb"); + absl::btree_set> solutions; + const CpSolverResponse response = + SolveAndCheck(initial_model, "", &solutions); + EXPECT_EQ(OPTIMAL, response.status()); + absl::btree_set> expected{{3, 5, 15}, {5, 3, 15}}; + EXPECT_EQ(solutions, expected); +} + +TEST(ProductPropagationTest, BothAcrossZeroWithNegativeTarget) { + const CpModelProto initial_model = ParseTestProto(R"pb( + variables { domain: [ -2, 6 ] } + variables { domain: [ -2, 6 ] } + variables { domain: [ -12, -12 ] } + constraints { + int_prod { + target { vars: 2 coeffs: 1 } + exprs { vars: 0 coeffs: 1 } + exprs { vars: 1 coeffs: 1 } + } + } + )pb"); + absl::btree_set> solutions; + const CpSolverResponse response = + SolveAndCheck(initial_model, "", &solutions); + EXPECT_EQ(OPTIMAL, response.status()); + absl::btree_set> expected{{-2, 6, -12}, {6, -2, -12}}; + EXPECT_EQ(solutions, expected); +} + +TEST(ProductPropagationTest, LargePositiveDomain) { + const CpModelProto initial_model = ParseTestProto(R"pb( + variables { domain: 0 domain: 3000000000 } + variables { domain: 0 domain: 3000000000 } + variables { domain: [ -30, -15, 15, 30 ] } + constraints { + int_prod { + target { vars: 2 coeffs: 1 } + exprs { vars: 0 coeffs: 1 } + exprs { vars: 1 coeffs: 1 } + } + } + )pb"); + absl::btree_set> solutions; + const CpSolverResponse response = + SolveAndCheck(initial_model, "", &solutions); + EXPECT_EQ(OPTIMAL, response.status()); + const Domain dp = ReadDomainFromProto(initial_model.variables(2)); + absl::btree_set> expected; + for (int vx = 0; vx <= 30; ++vx) { + for (int vy = 0; vy <= 30; ++vy) { + if (dp.Contains(vx * vy)) { + expected.insert(std::vector{vx, vy, vx * vy}); + } + } + } + EXPECT_EQ(solutions, expected); +} + +TEST(ProductPropagationTest, LargeDomain) { + const CpModelProto initial_model = ParseTestProto(R"pb( + variables { domain: -30 domain: 3000000000 } + variables { domain: -30 domain: 3000000000 } + variables { domain: [ -30, -15, 15, 30 ] } + constraints { + int_prod { + target { vars: 2 coeffs: 1 } + exprs { vars: 0 coeffs: 1 } + exprs { vars: 1 coeffs: 1 } + } + } + )pb"); + absl::btree_set> solutions; + const CpSolverResponse response = + SolveAndCheck(initial_model, "", &solutions); + EXPECT_EQ(OPTIMAL, response.status()); + const Domain dp = ReadDomainFromProto(initial_model.variables(2)); + absl::btree_set> expected; + for (int vx = -30; vx <= 30; ++vx) { + for (int vy = -30; vy <= 30; ++vy) { + if (dp.Contains(vx * vy)) { + expected.insert(std::vector{vx, vy, vx * vy}); + } + } + } + EXPECT_EQ(solutions, expected); +} + +TEST(DivisionConstraintTest, CheckAllSolutions) { + absl::BitGen random; + const int kMaxValue = 100; + const int kShift = 10; + const int kNumLoops = DEBUG_MODE ? 100 : 1000; + + for (int loop = 0; loop < kNumLoops; ++loop) { + // Generate domains for x, y, and z. + // z is meant to be roughly compatible with x / y. There can still be no + // feasible solutions. + CpModelProto cp_model; + const int x_min = absl::Uniform(random, -kMaxValue, kMaxValue); + const int x_max = absl::Uniform(random, x_min, kMaxValue); + IntegerVariableProto* x = cp_model.add_variables(); + x->add_domain(x_min); + x->add_domain(x_max); + + const int y_min = absl::Uniform(random, 1, kMaxValue); + const int y_max = absl::Uniform(random, y_min, kMaxValue); + IntegerVariableProto* y = cp_model.add_variables(); + y->add_domain(y_min); + y->add_domain(y_max); + + const int z_min = std::max( + x_min / y_max + absl::Uniform(random, -kShift, kShift), 0); + const int z_max = std::max( + z_min, x_max / y_min + absl::Uniform(random, -kShift, kShift)); + IntegerVariableProto* z = cp_model.add_variables(); + z->add_domain(z_min); + z->add_domain(z_max); + + // z == x / y. + LinearArgumentProto* div = cp_model.add_constraints()->mutable_int_div(); + div->add_exprs()->add_vars(0); // x. + div->mutable_exprs(0)->add_coeffs(1); + div->add_exprs()->add_vars(1); // y + div->mutable_exprs(1)->add_coeffs(1); + div->mutable_target()->add_vars(2); // z + div->mutable_target()->add_coeffs(1); + + absl::btree_set> solutions; + const CpSolverResponse response = + SolveAndCheck(cp_model, "linearization_level:0", &solutions); + + // Loop through the domains of x and y, and collect valid solutions. + absl::btree_set> expected; + for (int i = x_min; i <= x_max; ++i) { + for (int j = y_min; j <= y_max; ++j) { + const int k = i / j; + if (k < z_min || k > z_max) continue; + expected.insert({i, j, k}); + } + } + + // Checks that we get the same solution set through the two methods. + EXPECT_EQ(solutions, expected) + << "x = [" << x_min << ".." << x_max << "], y = [" << y_min << ".." + << y_max << "], z = [" << z_min << ".." << z_max << "]\n---------\n" + << ProtobufDebugString(cp_model) << "---------\n"; + } +} + +TEST(DivisionConstraintTest, NumeratorAcrossZeroPositiveDenom) { + const CpModelProto initial_model = ParseTestProto(R"pb( + variables { domain: [ -2, 6 ] } + variables { domain: [ 2, 4 ] } + variables { domain: [ -1, 3 ] } + constraints { + int_div { + target { vars: 2 coeffs: 1 } + exprs { vars: 0 coeffs: 1 } + exprs { vars: 1 coeffs: 1 } + } + } + )pb"); + absl::btree_set> solutions; + const CpSolverResponse response = + SolveAndCheck(initial_model, "linearization_level:0", &solutions); + EXPECT_EQ(OPTIMAL, response.status()); + absl::btree_set> expected{ + {-2, 2, -1}, {-2, 3, 0}, {-2, 4, 0}, {-1, 2, 0}, {-1, 3, 0}, {-1, 4, 0}, + {0, 2, 0}, {0, 3, 0}, {0, 4, 0}, {1, 2, 0}, {1, 3, 0}, {1, 4, 0}, + {2, 2, 1}, {2, 3, 0}, {2, 4, 0}, {3, 2, 1}, {3, 3, 1}, {3, 4, 0}, + {4, 2, 2}, {4, 3, 1}, {4, 4, 1}, {5, 2, 2}, {5, 3, 1}, {5, 4, 1}, + {6, 2, 3}, {6, 3, 2}, {6, 4, 1}}; + EXPECT_EQ(solutions, expected); +} + +TEST(DivisionConstraintTest, NumeratorAcrossZeroNegativeDenom) { + const CpModelProto initial_model = ParseTestProto(R"pb( + variables { domain: [ -2, 6 ] } + variables { domain: [ -4, -2 ] } + variables { domain: [ -3, 1 ] } + constraints { + int_div { + target { vars: 2 coeffs: 1 } + exprs { vars: 0 coeffs: 1 } + exprs { vars: 1 coeffs: 1 } + } + } + )pb"); + absl::btree_set> solutions; + const CpSolverResponse response = + SolveAndCheck(initial_model, "linearization_level:0", &solutions); + EXPECT_EQ(OPTIMAL, response.status()); + absl::btree_set> expected{ + {-2, -4, 0}, {-2, -3, 0}, {-2, -2, 1}, {-1, -4, 0}, {-1, -3, 0}, + {-1, -2, 0}, {0, -4, 0}, {0, -3, 0}, {0, -2, 0}, {1, -4, 0}, + {1, -3, 0}, {1, -2, 0}, {2, -4, 0}, {2, -3, 0}, {2, -2, -1}, + {3, -4, 0}, {3, -3, -1}, {3, -2, -1}, {4, -4, -1}, {4, -3, -1}, + {4, -2, -2}, {5, -4, -1}, {5, -3, -1}, {5, -2, -2}, {6, -4, -1}, + {6, -3, -2}, {6, -2, -3}}; + EXPECT_EQ(solutions, expected); +} + +TEST(DivisionConstraintTest, CheckAllPropagationsRandomProblem) { + absl::BitGen random; + const int kMaxValue = 50; + const int kMaxDenom = 10; + const int kNumLoops = DEBUG_MODE ? 5000 : 100000; + + for (int loop = 0; loop < kNumLoops; ++loop) { + // Generate domains for x, y, and z. + int x_min = absl::Uniform(random, -kMaxValue, kMaxValue); + int x_max = absl::Uniform(random, -kMaxValue, kMaxValue); + if (x_min > x_max) std::swap(x_min, x_max); + int y_min = absl::Uniform(random, 1, kMaxDenom); + int y_max = absl::Uniform(random, 1, kMaxDenom); + if (y_min > y_max) std::swap(y_min, y_max); + int z_min = absl::Uniform(random, -kMaxValue, kMaxValue); + int z_max = absl::Uniform(random, -kMaxValue, kMaxValue); + if (z_min > z_max) std::swap(z_min, z_max); + + // Loop through the domains of x and y, and collect valid bounds. + int expected_x_min = std::numeric_limits::max(); + int expected_x_max = std::numeric_limits::min(); + int expected_y_min = std::numeric_limits::max(); + int expected_y_max = std::numeric_limits::min(); + int expected_z_min = std::numeric_limits::max(); + int expected_z_max = std::numeric_limits::min(); + for (int i = x_min; i <= x_max; ++i) { + for (int j = y_min; j <= y_max; ++j) { + const int k = i / j; + if (k < z_min || k > z_max) continue; + expected_x_min = std::min(expected_x_min, i); + expected_x_max = std::max(expected_x_max, i); + expected_y_min = std::min(expected_y_min, j); + expected_y_max = std::max(expected_y_max, j); + expected_z_min = std::min(expected_z_min, k); + expected_z_max = std::max(expected_z_max, k); + } + } + + Model model; + const IntegerVariable var_x = model.Add(NewIntegerVariable(x_min, x_max)); + const IntegerVariable var_y = model.Add(NewIntegerVariable(y_min, y_max)); + const IntegerVariable var_z = model.Add(NewIntegerVariable(z_min, z_max)); + model.Add(DivisionConstraint(var_x, var_y, var_z)); + const bool result = model.GetOrCreate()->Propagate(); + if (result) { + EXPECT_BOUNDS_EQ(var_x, expected_x_min, expected_x_max); + EXPECT_BOUNDS_EQ(var_y, expected_y_min, expected_y_max); + EXPECT_BOUNDS_EQ(var_z, expected_z_min, expected_z_max); + } else { + EXPECT_EQ(expected_x_max, std::numeric_limits::min()); + } + } +} + +TEST(DivisionConstraintTest, CheckAllSolutionsOnExprs) { + absl::BitGen random; + const int kMaxValue = 30; + const int kMaxCoeff = 5; + const int kMaxOffset = 10; + const int kNumLoops = DEBUG_MODE ? 100 : 10000; + + for (int loop = 0; loop < kNumLoops; ++loop) { + CpModelProto initial_model; + + // Create the numerator. + int num_var_min = absl::Uniform(random, -kMaxValue, kMaxValue); + int num_var_max = absl::Uniform(random, -kMaxValue, kMaxValue); + if (num_var_min > num_var_max) std::swap(num_var_min, num_var_max); + IntegerVariableProto* num_var_proto = initial_model.add_variables(); + num_var_proto->add_domain(num_var_min); + num_var_proto->add_domain(num_var_max); + const int64_t num_coeff = absl::Uniform(random, 1, kMaxCoeff) * + (absl::Bernoulli(random, 0.5) ? 1 : -1); + const int64_t num_offset = absl::Uniform(random, -kMaxOffset, kMaxOffset); + + // Create the denominator. Make sure 0 is not accessible. + int denom_var_min = absl::Uniform(random, -kMaxValue, kMaxValue); + int denom_var_max = absl::Uniform(random, -kMaxValue, kMaxValue); + if (denom_var_min > denom_var_max) std::swap(denom_var_min, denom_var_max); + const int64_t denom_coeff = absl::Uniform(random, 1, kMaxCoeff) * + (absl::Bernoulli(random, 0.5) ? 1 : -1); + const int64_t denom_offset = absl::Uniform(random, -kMaxOffset, kMaxOffset); + if (denom_coeff == 0) continue; + Domain denom_var_domain = {denom_var_min, denom_var_max}; + const int64_t bad_value = -denom_offset / denom_coeff; + if (denom_var_domain.Contains(bad_value) && + bad_value * denom_coeff == -denom_offset) { + denom_var_domain = + denom_var_domain.IntersectionWith(Domain(bad_value).Complement()); + } + IntegerVariableProto* denom_var_proto = initial_model.add_variables(); + FillDomainInProto(denom_var_domain, denom_var_proto); + + int target_var_min = absl::Uniform(random, -kMaxValue, kMaxValue); + int target_var_max = absl::Uniform(random, -kMaxValue, kMaxValue); + if (target_var_min > target_var_max) + std::swap(target_var_min, target_var_max); + IntegerVariableProto* target_var_proto = initial_model.add_variables(); + target_var_proto->add_domain(target_var_min); + target_var_proto->add_domain(target_var_max); + const int64_t target_coeff = absl::Uniform(random, 1, kMaxCoeff) * + (absl::Bernoulli(random, 0.5) ? 1 : -1); + const int64_t target_offset = + absl::Uniform(random, -kMaxOffset, kMaxOffset); + + // target = num / denom. + LinearArgumentProto* div = + initial_model.add_constraints()->mutable_int_div(); + div->add_exprs()->add_vars(0); // num + div->mutable_exprs(0)->add_coeffs(num_coeff); + div->mutable_exprs(0)->set_offset(num_offset); + div->add_exprs()->add_vars(1); // denom + div->mutable_exprs(1)->add_coeffs(denom_coeff); + div->mutable_exprs(1)->set_offset(denom_offset); + div->mutable_target()->add_vars(2); // target + div->mutable_target()->add_coeffs(target_coeff); + div->mutable_target()->set_offset(target_offset); + + absl::btree_set> solutions; + const CpSolverResponse response = + SolveAndCheck(initial_model, "linearization_level:0", &solutions); + + // Loop through the domains of var and target, and collect valid solutions. + absl::btree_set> expected; + for (int i = num_var_min; i <= num_var_max; ++i) { + const int num_value = num_coeff * i + num_offset; + for (const int j : denom_var_domain.Values()) { + const int denom_value = denom_coeff * j + denom_offset; + if (denom_value == 0) continue; + const int target_expr_value = num_value / denom_value; + const int target_var_value = + (target_expr_value - target_offset) / target_coeff; + if (target_var_value >= target_var_min && + target_var_value <= target_var_max && + target_var_value * target_coeff + target_offset == + target_expr_value) { + expected.insert({i, j, target_var_value}); + } + } + } + + // Checks that we get we get the same solution set through the two methods. + EXPECT_EQ(solutions, expected) + << "\n---------\n" + << ProtobufDebugString(initial_model) << "---------\n"; + } +} + +void TestAllDivisionValues(int64_t min_a, int64_t max_a, int64_t b, + int64_t min_c, int64_t max_c) { + int64_t true_min_a = std::numeric_limits::max(); + int64_t true_max_a = std::numeric_limits::min(); + int64_t true_min_c = std::numeric_limits::max(); + int64_t true_max_c = std::numeric_limits::min(); + for (int64_t a = min_a; a <= max_a; ++a) { + for (int64_t c = min_c; c <= max_c; ++c) { + if (a / b == c) { + true_min_a = std::min(true_min_a, a); + true_max_a = std::max(true_max_a, a); + true_min_c = std::min(true_min_c, c); + true_max_c = std::max(true_max_c, c); + } + } + } + Model model; + const AffineExpression var_a = + min_a == max_a + ? AffineExpression(IntegerValue(min_a)) + : AffineExpression(model.Add(NewIntegerVariable(min_a, max_a))); + const AffineExpression var_c = + min_c == max_c + ? AffineExpression(IntegerValue(min_c)) + : AffineExpression(model.Add(NewIntegerVariable(min_c, max_c))); + model.Add(FixedDivisionConstraint(var_a, IntegerValue(b), var_c)); + const bool result = model.GetOrCreate()->Propagate(); + IntegerTrail* integer_trail = model.GetOrCreate(); + if (result) { + EXPECT_EQ(integer_trail->LowerBound(var_a), true_min_a); + EXPECT_EQ(integer_trail->UpperBound(var_a), true_max_a); + EXPECT_EQ(integer_trail->LowerBound(var_c), true_min_c); + EXPECT_EQ(integer_trail->UpperBound(var_c), true_max_c); + } else { + EXPECT_EQ(true_min_a, std::numeric_limits::max()); // No solution. + } +} + +TEST(FixedDivisionConstraintTest, AllSmallValues) { + for (int b = 1; b < 7; ++b) { + for (int min_a = -10; min_a <= 10; ++min_a) { + for (int max_a = min_a; max_a <= 10; ++max_a) { + TestAllDivisionValues(min_a, max_a, b, -20, 20); + } + } + for (int min_c = -10; min_c <= 10; ++min_c) { + for (int max_c = min_c; max_c <= 10; ++max_c) { + TestAllDivisionValues(-100, 100, b, min_c, max_c); + } + } + } +} + +bool PropagateFixedDivision(int64_t a, int64_t max_a, int64_t b, int64_t c, + int64_t max_c, int64_t new_a, int64_t new_max_a, + int64_t new_c, int64_t new_max_c) { + Model model; + const IntegerVariable var_a = model.Add(NewIntegerVariable(a, max_a)); + const IntegerVariable var_c = model.Add(NewIntegerVariable(c, max_c)); + model.Add(FixedDivisionConstraint(var_a, IntegerValue(b), var_c)); + const bool result = model.GetOrCreate()->Propagate(); + if (result) { + EXPECT_BOUNDS_EQ(var_a, new_a, new_max_a); + EXPECT_BOUNDS_EQ(var_c, new_c, new_max_c); + } + return result; +} + +TEST(FixedDivisionConstraintTest, ExpectedPropagation) { + // Propagate from a to c. + EXPECT_TRUE(PropagateFixedDivision(/*a=*/2, 21, /*b=*/3, /*c=*/-5, 10, + /*new_a=*/2, 21, /*new_c=*/0, 7)); + EXPECT_TRUE(PropagateFixedDivision(/*a=*/4, 20, /*b=*/3, /*c=*/0, 10, + /*new_a=*/4, 20, /*new_c=*/1, 6)); + EXPECT_TRUE(PropagateFixedDivision(/*a=*/-4, 20, /*b=*/3, /*c=*/-5, 10, + /*new_a=*/-4, 20, /*new_c=*/-1, 6)); + EXPECT_TRUE(PropagateFixedDivision(/*a=*/-15, -5, /*b=*/3, /*c=*/-10, 10, + /*new_a=*/-15, -5, /*new_c=*/-5, -1)); + // Propagate from c to a. + EXPECT_TRUE(PropagateFixedDivision(/*a=*/-10, 10, /*b=*/3, /*c=*/-2, 2, + /*new_a=*/-8, 8, /*new_c=*/-2, 2)); + EXPECT_TRUE(PropagateFixedDivision(/*a=*/-10, 10, /*b=*/3, /*c=*/1, 2, + /*new_a=*/3, 8, /*new_c=*/1, 2)); + EXPECT_TRUE(PropagateFixedDivision(/*a=*/-10, 10, /*b=*/3, /*c=*/0, 2, + /*new_a=*/-2, 8, /*new_c=*/0, 2)); + EXPECT_TRUE(PropagateFixedDivision(/*a=*/-10, 10, /*b=*/3, /*c=*/-2, -1, + /*new_a=*/-8, -3, /*new_c=*/-2, -1)); + EXPECT_TRUE(PropagateFixedDivision(/*a=*/-10, 10, /*b=*/3, /*c=*/-2, 0, + /*new_a=*/-8, 2, /*new_c=*/-2, 0)); + // Check large domains. + EXPECT_TRUE(PropagateFixedDivision( + /*a=*/0, std::numeric_limits::max() / 2, + /*b=*/5, /*c=*/3, std::numeric_limits::max() - 3, + /*new_a=*/15, std::numeric_limits::max() / 2, + /*new_c=*/3, std::numeric_limits::max() / 10)); + EXPECT_TRUE(PropagateFixedDivision( + /*a=*/0, std::numeric_limits::max() / 2, + /*b=*/5, /*c=*/3, std::numeric_limits::max() - 3, + /*new_a=*/15, std::numeric_limits::max() / 2, + /*new_c=*/3, std::numeric_limits::max() / 10)); +} + +TEST(ModuloConstraintTest, CheckAllSolutions) { + absl::BitGen random; + const int kMaxValue = 50; + const int kMaxModulo = 10; + const int kNumLoops = DEBUG_MODE ? 200 : 2000; + + for (int loop = 0; loop < kNumLoops; ++loop) { + CpModelProto initial_model; + int var_min = absl::Uniform(random, -kMaxValue, kMaxValue); + int var_max = absl::Uniform(random, -kMaxValue, kMaxValue); + if (var_min > var_max) std::swap(var_min, var_max); + IntegerVariableProto* var = initial_model.add_variables(); + var->add_domain(var_min); + var->add_domain(var_max); + + const int mod = absl::Uniform(random, 1, kMaxModulo); + IntegerVariableProto* mod_var = initial_model.add_variables(); + mod_var->add_domain(mod); + mod_var->add_domain(mod); + + IntegerVariableProto* target = initial_model.add_variables(); + int target_min = + absl::Uniform(random, -2 * kMaxModulo, 2 * kMaxModulo); + int target_max = + absl::Uniform(random, -2 * kMaxModulo, 2 * kMaxModulo); + if (target_min > target_max) std::swap(target_min, target_max); + target->add_domain(target_min); + target->add_domain(target_max); + + // target = var % mod. + LinearArgumentProto* modulo = + initial_model.add_constraints()->mutable_int_mod(); + modulo->add_exprs()->add_vars(0); // var. + modulo->mutable_exprs(0)->add_coeffs(1); + modulo->add_exprs()->add_vars(1); // mod + modulo->mutable_exprs(1)->add_coeffs(1); + modulo->mutable_target()->add_vars(2); // target + modulo->mutable_target()->add_coeffs(1); + + absl::btree_set> solutions; + const CpSolverResponse response = + SolveAndCheck(initial_model, "linearization_level:0", &solutions); + + // Loop through the domains of var and target, and collect valid solutions. + absl::btree_set> expected; + for (int i = var_min; i <= var_max; ++i) { + const int k = i % mod; + if (k < target_min || k > target_max) continue; + expected.insert({i, mod, k}); + } + + // Checks that we get we get the same solution set through the two methods. + EXPECT_EQ(solutions, expected) + << "\n---------\n" + << ProtobufDebugString(initial_model) << "---------\n"; + } +} + +TEST(ModuloConstraintTest, CheckAllPropagationsRandomProblem) { + absl::BitGen random; + const int kMaxValue = 50; + const int kMaxModulo = 10; + const int kNumLoops = DEBUG_MODE ? 5000 : 20000; + + for (int loop = 0; loop < kNumLoops; ++loop) { + // Generate domains for var and target. + int var_min = absl::Uniform(random, -kMaxValue, kMaxValue); + int var_max = absl::Uniform(random, -kMaxValue, kMaxValue); + if (var_min > var_max) std::swap(var_min, var_max); + int mod = absl::Uniform(random, 2, kMaxModulo); + int target_min = + absl::Uniform(random, -2 * kMaxModulo, 2 * kMaxModulo); + int target_max = + absl::Uniform(random, -2 * kMaxModulo, 2 * kMaxModulo); + if (target_min > target_max) std::swap(target_min, target_max); + + // Loop through the domains of var and target, and collect valid bounds. + int expected_var_min = std::numeric_limits::max(); + int expected_var_max = std::numeric_limits::min(); + int expected_target_min = std::numeric_limits::max(); + int expected_target_max = std::numeric_limits::min(); + for (int i = var_min; i <= var_max; ++i) { + const int k = i % mod; + if (k < target_min || k > target_max) continue; + expected_var_min = std::min(expected_var_min, i); + expected_var_max = std::max(expected_var_max, i); + expected_target_min = std::min(expected_target_min, k); + expected_target_max = std::max(expected_target_max, k); + } + + Model model; + const IntegerVariable var = model.Add(NewIntegerVariable(var_min, var_max)); + const IntegerVariable target = + model.Add(NewIntegerVariable(target_min, target_max)); + model.Add(FixedModuloConstraint(var, IntegerValue(mod), target)); + const bool result = model.GetOrCreate()->Propagate(); + if (result) { + EXPECT_BOUNDS_EQ(var, expected_var_min, expected_var_max); + EXPECT_BOUNDS_EQ(target, expected_target_min, expected_target_max) + << "var = [" << var_min << ".." << var_max << "], mod = " << mod + << ", target = [" << target_min << ".." << target_max + << "], expected_target = [" << expected_target_min << ".." + << expected_target_max << "], propagated target = [" + << model.Get(LowerBound(target)) << ".." + << model.Get(UpperBound(target)) << "]"; + } else { + EXPECT_EQ(expected_var_max, std::numeric_limits::min()); + } + } +} + +bool TestSquarePropagation(std::pair initial_domain_x, + std::pair initial_domain_s, + std::pair expected_domain_x, + std::pair expected_domain_s) { + Model model; + IntegerVariable x = model.Add( + NewIntegerVariable(initial_domain_x.first, initial_domain_x.second)); + IntegerVariable s = model.Add( + NewIntegerVariable(initial_domain_s.first, initial_domain_s.second)); + model.Add(ProductConstraint(x, x, s)); + const bool result = model.GetOrCreate()->Propagate(); + if (result) { + EXPECT_BOUNDS_EQ(x, expected_domain_x.first, expected_domain_x.second); + EXPECT_BOUNDS_EQ(s, expected_domain_s.first, expected_domain_s.second); + } + return result; +} + +bool TestSquarePropagation(std::pair initial_domain_x, + std::pair initial_domain_s) { + return TestSquarePropagation(initial_domain_x, initial_domain_s, + initial_domain_x, initial_domain_s); +} + +TEST(SquareConstraintTest, SquareExpectedPropagation) { + // Propagate s -> x, then x -> s. + EXPECT_TRUE(TestSquarePropagation({0, 3}, {1, 7}, {1, 2}, {1, 4})); + // Same but negative. + EXPECT_TRUE(TestSquarePropagation({-3, 0}, {1, 7}, {-2, -1}, {1, 4})); + // No propagation. + EXPECT_TRUE(TestSquarePropagation({2, 5}, {4, 25})); + // Propagate x -> s. + EXPECT_TRUE(TestSquarePropagation({2, 3}, {1, 12}, {2, 3}, {4, 9})); + // Infeasible, s has no square in its domain. + EXPECT_FALSE(TestSquarePropagation({0, 5}, {17, 20})); + // Infeasible, s cannot be the square of x. + EXPECT_FALSE(TestSquarePropagation({3, 7}, {50, 100})); + // Propagate s -> x. + EXPECT_TRUE(TestSquarePropagation({0, 10}, {16, 25}, {4, 5}, {16, 25})); +} + +TEST(SquareConstraintTest, LargestSquare) { + const int64_t max = kMaxIntegerValue.value(); + const int64_t square = + static_cast(std::floor(std::sqrt(static_cast(max)))); + CHECK_GE(CapProd(square + 1, square + 1), max); + EXPECT_TRUE(TestSquarePropagation({0, max}, {0, max}, {0, square}, + {0, square * square})); +} + +TEST(LevelZeroEqualityTest, BasicExample) { + Model model; + + const IntegerVariable obj = model.Add(NewIntegerVariable(1, 14)); + std::vector vars{model.Add(NewIntegerVariable(0, 1)), + model.Add(NewIntegerVariable(0, 1)), + model.Add(NewIntegerVariable(0, 1))}; + std::vector coeff{3, 4, 3}; + model.TakeOwnership(new LevelZeroEquality(obj, vars, coeff, &model)); + + // No propagations. + EXPECT_TRUE(model.GetOrCreate()->Propagate()); + EXPECT_EQ(model.Get(LowerBound(obj)), 1); + EXPECT_EQ(model.Get(UpperBound(obj)), 14); + + // Fix vars[1], obj is detected to be 3*X + 4. + // + // Note that the LB is not 4 because we have just the LevelZeroEquality + // propagator which doesn't propagate bounds. + model.Add(GreaterOrEqual(vars[1], 1)); + EXPECT_TRUE(model.GetOrCreate()->Propagate()); + EXPECT_EQ(model.Get(LowerBound(obj)), 1); + EXPECT_EQ(model.Get(UpperBound(obj)), 13); + + // Still propagate when new bound changes. + model.Add(GreaterOrEqual(obj, 5)); + EXPECT_TRUE(model.GetOrCreate()->Propagate()); + EXPECT_EQ(model.Get(LowerBound(obj)), 7); +} + +} // namespace +} // namespace sat +} // namespace operations_research diff --git a/ortools/sat/linear_model_test.cc b/ortools/sat/linear_model_test.cc new file mode 100644 index 0000000000..143cbe8bc5 --- /dev/null +++ b/ortools/sat/linear_model_test.cc @@ -0,0 +1,144 @@ +// Copyright 2010-2024 Google LLC +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "ortools/sat/linear_model.h" + +#include + +#include "gtest/gtest.h" +#include "ortools/base/gmock.h" +#include "ortools/base/parse_test_proto.h" +#include "ortools/sat/cp_model.pb.h" +#include "ortools/sat/cp_model_presolve.h" +#include "ortools/sat/model.h" +#include "ortools/sat/presolve_context.h" +#include "ortools/sat/sat_parameters.pb.h" +#include "ortools/util/logging.h" + +namespace operations_research { +namespace sat { +namespace { + +using ::google::protobuf::contrib::parse_proto::ParseTestProto; +using ::testing::EqualsProto; + +TEST(LinearModelTest, DetectFullEncoding) { + const CpModelProto cp_model = ParseTestProto(R"pb( + variables { domain: [ 0, 3 ] } + variables { domain: [ 0, 0, 5, 5, 10, 10 ] } + variables { domain: [ 0, 0 ] } + variables { domain: [ 5, 5 ] } + variables { domain: [ 10, 10 ] } + variables { domain: [ 0, 2 ] } + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 1 ] } + constraints { + enforcement_literal: 6 + linear { vars: 5 coeffs: 1 domain: 0 domain: 0 } + } + constraints { + enforcement_literal: -7 + linear { vars: 5 coeffs: 1 domain: 1 domain: 2 } + } + constraints { + exactly_one { literals: 7 literals: 8 literals: 9 literals: 10 } + } + constraints { + enforcement_literal: 7 + linear { vars: 0 coeffs: 1 domain: 0 domain: 0 } + } + constraints { + enforcement_literal: -8 + linear { vars: 0 coeffs: 1 domain: 1 domain: 3 } + } + constraints { + enforcement_literal: 7 + linear { vars: 5 coeffs: 1 domain: 1 domain: 1 } + } + constraints { + enforcement_literal: -8 + linear { vars: 5 coeffs: 1 domain: 0 domain: 0 domain: 2 domain: 2 } + } + constraints { + enforcement_literal: 8 + linear { vars: 0 coeffs: 1 domain: 1 domain: 1 } + } + constraints { + enforcement_literal: -9 + linear { vars: 0 coeffs: 1 domain: 0 domain: 0 domain: 2 domain: 3 } + } + constraints { + enforcement_literal: 9 + linear { vars: 0 coeffs: 1 domain: 2 domain: 2 } + } + constraints { + enforcement_literal: -10 + linear { vars: 0 coeffs: 1 domain: 0 domain: 1 domain: 3 domain: 3 } + } + constraints { + enforcement_literal: 9 + linear { vars: 5 coeffs: 1 domain: 2 domain: 2 } + } + constraints { + enforcement_literal: -10 + linear { vars: 5 coeffs: 1 domain: 0 domain: 1 } + } + constraints { + enforcement_literal: 10 + linear { vars: 0 coeffs: 1 domain: 3 domain: 3 } + } + constraints { + enforcement_literal: -11 + linear { vars: 0 coeffs: 1 domain: 0 domain: 2 } + } + constraints { exactly_one { literals: -7 literals: 8 literals: 10 } } + constraints { + linear { vars: 1 vars: 5 coeffs: 1 coeffs: -5 domain: 0 domain: 0 } + } + )pb"); + + LinearModel linear_model(cp_model); + int num_ignored = 0; + for (const bool is_ignored : linear_model.ignored_constraints()) { + if (is_ignored) num_ignored++; + } + EXPECT_EQ(num_ignored, 14); + ASSERT_EQ(linear_model.additional_constraints().size(), 3); + const ConstraintProto ct0 = ParseTestProto(R"pb( + linear { + vars: [ 0, 8, 9, 10 ] + coeffs: [ -1, 1, 2, 3 ] + domain: [ 0, 0 ] + } + )pb"); + const ConstraintProto ct1 = ParseTestProto(R"pb( + linear { + vars: [ 5, 7, 9 ] + coeffs: [ -1, 1, 2 ] + domain: [ 0, 0 ] + } + )pb"); + const ConstraintProto ct2 = ParseTestProto(R"pb( + exactly_one { literals: [ 6, 7, 9 ] } + )pb"); + EXPECT_THAT(ct0, EqualsProto(linear_model.additional_constraints()[0])); + EXPECT_THAT(ct1, EqualsProto(linear_model.additional_constraints()[1])); + EXPECT_THAT(ct2, EqualsProto(linear_model.additional_constraints()[2])); +} + +} // namespace +} // namespace sat +} // namespace operations_research diff --git a/ortools/sat/linear_relaxation_test.cc b/ortools/sat/linear_relaxation_test.cc new file mode 100644 index 0000000000..14c0682f28 --- /dev/null +++ b/ortools/sat/linear_relaxation_test.cc @@ -0,0 +1,1200 @@ +// Copyright 2010-2024 Google LLC +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "ortools/sat/linear_relaxation.h" + +#include +#include +#include + +#include "absl/base/attributes.h" +#include "absl/types/span.h" +#include "gtest/gtest.h" +#include "ortools/base/parse_test_proto.h" +#include "ortools/sat/cp_model.pb.h" +#include "ortools/sat/cp_model_loader.h" +#include "ortools/sat/cp_model_mapping.h" +#include "ortools/sat/integer.h" +#include "ortools/sat/intervals.h" +#include "ortools/sat/linear_constraint.h" +#include "ortools/sat/model.h" +#include "ortools/sat/sat_base.h" +#include "ortools/util/sorted_interval_list.h" + +namespace operations_research { +namespace sat { +namespace { + +using ::google::protobuf::contrib::parse_proto::ParseTestProto; + +TEST(AppendRelaxationForEqualityEncodingTest, DomainOfSize2) { + Model model; + IntegerEncoder* encoder = model.GetOrCreate(); + const IntegerVariable var = + model.Add(NewIntegerVariable(Domain::FromValues({4, 8}))); + encoder->FullyEncodeVariable(var); + + // Initially we don't have a view, so this should return false. + LinearRelaxation relaxation; + int num_tight = 0; + int num_loose = 0; + AppendRelaxationForEqualityEncoding(var, model, &relaxation, &num_tight, + &num_loose); + EXPECT_EQ(num_tight, 0); + EXPECT_EQ(num_loose, 0); + + // Make sure all literals have a view. + for (const auto literal_value : encoder->FullDomainEncoding(var)) { + model.Add(NewIntegerVariableFromLiteral(literal_value.literal)); + } + AppendRelaxationForEqualityEncoding(var, model, &relaxation, &num_tight, + &num_loose); + EXPECT_EQ(num_tight, 1); + + // In this case, because there is just two value, we should get a literal + // and its negation, so just one constraint (the first one is empty). + EXPECT_EQ(relaxation.linear_constraints.size(), 2); + EXPECT_EQ(relaxation.linear_constraints[0].num_terms, 0); + + // The variable (0) is equal to 8 - 4 * [var == 4]. + EXPECT_EQ(relaxation.linear_constraints[1].DebugString(), + "8 <= 1*X0 4*X1 <= 8"); +} + +// Convert the at_most_one to a linear constraint and call DebugString(). +std::string AtMostOneAsString(absl::Span at_most_one, + Model* model) { + LinearConstraintBuilder lc(model, kMinIntegerValue, IntegerValue(1)); + for (const Literal literal : at_most_one) { + const bool unused ABSL_ATTRIBUTE_UNUSED = + lc.AddLiteralTerm(literal, IntegerValue(1)); + } + return lc.Build().DebugString(); +} + +TEST(AppendRelaxationForEqualityEncodingTest, DomainOfSize4) { + Model model; + IntegerEncoder* encoder = model.GetOrCreate(); + const IntegerVariable var = + model.Add(NewIntegerVariable(Domain::FromValues({1, 5, 8, 9}))); + encoder->FullyEncodeVariable(var); + + // Make sure all relevant literals have a view. + for (const auto literal_value : encoder->FullDomainEncoding(var)) { + model.Add(NewIntegerVariableFromLiteral(literal_value.literal)); + } + + LinearRelaxation relaxation; + int num_tight = 0; + int num_loose = 0; + AppendRelaxationForEqualityEncoding(var, model, &relaxation, &num_tight, + &num_loose); + + EXPECT_EQ(relaxation.linear_constraints.size(), 2); + EXPECT_EQ(relaxation.linear_constraints[0].DebugString(), + "1 <= 1*X1 1*X2 1*X3 1*X4"); + EXPECT_EQ(relaxation.linear_constraints[1].DebugString(), + "1 <= 1*X0 -4*X2 -7*X3 -8*X4 <= 1"); + + EXPECT_EQ(relaxation.at_most_ones.size(), 1); + EXPECT_EQ(AtMostOneAsString(relaxation.at_most_ones[0], &model), + "1*X1 1*X2 1*X3 1*X4 <= 1"); +} + +TEST(AppendRelaxationForEqualityEncodingTest, PartialEncoding) { + Model model; + IntegerEncoder* encoder = model.GetOrCreate(); + const IntegerVariable var = model.Add(NewIntegerVariable(0, 10)); + for (const int value : {1, 5}) { + encoder->AssociateToIntegerEqualValue( + Literal(model.Add(NewBooleanVariable()), true), var, + IntegerValue(value)); + } + + // Make sure all relevant literals have a view. + for (const auto literal_value : encoder->PartialDomainEncoding(var)) { + model.Add(NewIntegerVariableFromLiteral(literal_value.literal)); + } + + // The encoded values should be 0, 1 and 5, so the min/max not encoded should + // be 2 and 10. + LinearRelaxation relaxation; + int num_tight = 0; + int num_loose = 0; + AppendRelaxationForEqualityEncoding(var, model, &relaxation, &num_tight, + &num_loose); + EXPECT_EQ(num_tight, 0); + EXPECT_EQ(num_loose, 2); + + EXPECT_EQ(relaxation.linear_constraints.size(), 2); + EXPECT_EQ(relaxation.linear_constraints[0].DebugString(), + "2 <= 1*X0 2*X1 1*X2 -3*X3"); + EXPECT_EQ(relaxation.linear_constraints[1].DebugString(), + "1*X0 10*X1 9*X2 5*X3 <= 10"); + + EXPECT_EQ(relaxation.at_most_ones.size(), 1); + EXPECT_EQ(AtMostOneAsString(relaxation.at_most_ones[0], &model), + "1*X1 1*X2 1*X3 <= 1"); +} + +TEST(AppendPartialGreaterThanEncodingRelaxationTest, FullEncoding) { + Model model; + IntegerEncoder* encoder = model.GetOrCreate(); + const IntegerVariable var = + model.Add(NewIntegerVariable(Domain::FromValues({1, 5, 8, 9}))); + encoder->FullyEncodeVariable(var); + + // Make sure all >= literal have a view. + for (const auto value_literal : encoder->PartialGreaterThanEncoding(var)) { + model.Add(NewIntegerVariableFromLiteral(value_literal.literal)); + } + + LinearRelaxation relaxation; + AppendPartialGreaterThanEncodingRelaxation(var, model, &relaxation); + + // The implications. + EXPECT_EQ(relaxation.at_most_ones.size(), 2); + EXPECT_EQ(AtMostOneAsString(relaxation.at_most_ones[0], &model), + "-1*X1 1*X2 <= 0"); + EXPECT_EQ(AtMostOneAsString(relaxation.at_most_ones[1], &model), + "-1*X2 1*X3 <= 0"); + + // The "diffs" are 4,3,1. + // Because here we have a full encoding, we actually have == 1. + EXPECT_EQ(relaxation.linear_constraints.size(), 2); + EXPECT_EQ(relaxation.linear_constraints[0].DebugString(), + "1 <= 1*X0 -4*X1 -3*X2 -1*X3"); + EXPECT_EQ(relaxation.linear_constraints[1].DebugString(), + "-1 <= -1*X0 4*X1 3*X2 1*X3"); +} + +TEST(AppendPartialGreaterThanEncodingRelaxationTest, PartialEncoding) { + Model model; + IntegerEncoder* encoder = model.GetOrCreate(); + const IntegerVariable var = model.Add(NewIntegerVariable(0, 10)); + + // Create a literal for var >= 1, var >= 2 and var >= 6 + for (const int value : {1, 2, 6}) { + encoder->AssociateToIntegerLiteral( + Literal(model.Add(NewBooleanVariable()), true), + IntegerLiteral::GreaterOrEqual(var, IntegerValue(value))); + } + + // Make sure all >= literal have a view. + for (const auto value_literal : encoder->PartialGreaterThanEncoding(var)) { + model.Add(NewIntegerVariableFromLiteral(value_literal.literal)); + } + + LinearRelaxation relaxation; + AppendPartialGreaterThanEncodingRelaxation(var, model, &relaxation); + + // The implications. + EXPECT_EQ(relaxation.at_most_ones.size(), 2); + EXPECT_EQ(AtMostOneAsString(relaxation.at_most_ones[0], &model), + "-1*X1 1*X2 <= 0"); + EXPECT_EQ(AtMostOneAsString(relaxation.at_most_ones[1], &model), + "-1*X2 1*X3 <= 0"); + + // The first constraint is var >= 0 + (>=1) + (>=2) + 4*(>=6) + EXPECT_EQ(relaxation.linear_constraints.size(), 2); + EXPECT_EQ(relaxation.linear_constraints[0].DebugString(), + "0 <= 1*X0 -1*X1 -1*X2 -4*X3"); + + // The second is var <= (>=1) + 4*(>=2) + 5*(>=6) which gives the bounds + // <=0,<=1,<=5 and <=10. + EXPECT_EQ(relaxation.linear_constraints[1].DebugString(), + "0 <= -1*X0 1*X1 4*X2 5*X3"); +} + +TEST(TryToLinearizeConstraint, BoolOr) { + const CpModelProto initial_model = ParseTestProto(R"pb( + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 1 ] } + constraints { + enforcement_literal: 0 + bool_or { literals: [ -2, 2 ] } + } + )pb"); + + Model model; + LoadVariables(initial_model, true, &model); + + LinearRelaxation relaxation; + TryToLinearizeConstraint(initial_model, initial_model.constraints(0), + /*linearization_level=*/2, &model, &relaxation); + + EXPECT_EQ(relaxation.linear_constraints.size(), 1); + EXPECT_EQ(relaxation.linear_constraints[0].DebugString(), + "-1 <= -1*X0 -1*X1 1*X2"); +} + +TEST(TryToLinearizeConstraint, BoolOrLevel1) { + const CpModelProto initial_model = ParseTestProto(R"pb( + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 1 ] } + constraints { + enforcement_literal: 0 + bool_or { literals: [ -2, 2 ] } + } + )pb"); + + Model model; + LoadVariables(initial_model, true, &model); + + LinearRelaxation relaxation; + TryToLinearizeConstraint(initial_model, initial_model.constraints(0), + /*linearization_level=*/1, &model, &relaxation); + + EXPECT_EQ(relaxation.linear_constraints.size(), 0); + EXPECT_EQ(relaxation.at_most_ones.size(), 0); +} + +TEST(TryToLinearizeConstraint, BoolAndSingleEnforcement) { + const CpModelProto initial_model = ParseTestProto(R"pb( + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 1 ] } + constraints { + enforcement_literal: 0 + bool_and { literals: [ -2, 2 ] } + } + )pb"); + + Model model; + LoadVariables(initial_model, true, &model); + + LinearRelaxation relaxation; + TryToLinearizeConstraint(initial_model, initial_model.constraints(0), + /*linearization_level=*/2, &model, &relaxation); + + EXPECT_EQ(relaxation.at_most_ones.size(), 2); + EXPECT_EQ(AtMostOneAsString(relaxation.at_most_ones[0], &model), + "1*X0 1*X1 <= 1"); + EXPECT_EQ(AtMostOneAsString(relaxation.at_most_ones[1], &model), + "1*X0 -1*X2 <= 0"); +} + +TEST(TryToLinearizeConstraint, BoolAndMultipleEnforcement) { + const CpModelProto initial_model = ParseTestProto(R"pb( + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 1 ] } + constraints { + enforcement_literal: [ 0, 3 ] + bool_and { literals: [ -2, 2 ] } + } + )pb"); + + Model model; + LoadVariables(initial_model, true, &model); + + LinearRelaxation relaxation; + TryToLinearizeConstraint(initial_model, initial_model.constraints(0), + /*linearization_level=*/2, &model, &relaxation); + + // X0 & X3 => X2 ==1 & not(X1) == 1; + EXPECT_EQ(relaxation.linear_constraints.size(), 2); + EXPECT_EQ(relaxation.linear_constraints[0].DebugString(), + "1*X0 1*X1 1*X3 <= 2"); + EXPECT_EQ(relaxation.linear_constraints[1].DebugString(), + "1*X0 -1*X2 1*X3 <= 1"); +} + +TEST(TryToLinearizeConstraint, BoolAndNoEnforcement) { + const CpModelProto initial_model = ParseTestProto(R"pb( + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 1 ] } + constraints { bool_and { literals: [ -2, 2 ] } } + )pb"); + + Model model; + LoadVariables(initial_model, true, &model); + + LinearRelaxation relaxation; + TryToLinearizeConstraint(initial_model, initial_model.constraints(0), + /*linearization_level=*/2, &model, &relaxation); + + EXPECT_EQ(relaxation.linear_constraints.size(), 0); + EXPECT_EQ(relaxation.at_most_ones.size(), 0); +} + +TEST(TryToLinearizeConstraint, BoolAndLevel1) { + const CpModelProto initial_model = ParseTestProto(R"pb( + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 1 ] } + constraints { + enforcement_literal: [ 0, 3 ] + bool_and { literals: [ -2, 2 ] } + } + )pb"); + + Model model; + LoadVariables(initial_model, true, &model); + + LinearRelaxation relaxation; + TryToLinearizeConstraint(initial_model, initial_model.constraints(0), + /*linearization_level=*/1, &model, &relaxation); + + EXPECT_EQ(relaxation.linear_constraints.size(), 0); + EXPECT_EQ(relaxation.at_most_ones.size(), 0); +} + +TEST(TryToLinearizeConstraint, LinMaxLevel1Bis) { + const CpModelProto initial_model = ParseTestProto(R"pb( + variables { domain: [ 0, 5 ] } + variables { domain: [ -1, 7 ] } + variables { domain: [ -2, 9 ] } + variables { domain: [ -5, 10 ] } + constraints { + lin_max { + target: { vars: 3 coeffs: 1 } + exprs: { vars: 0 coeffs: 1 } + exprs: { vars: 1 coeffs: 1 } + exprs: { vars: 2 coeffs: -1 } + } + } + )pb"); + + Model model; + LoadVariables(initial_model, true, &model); + + LinearRelaxation relaxation; + TryToLinearizeConstraint(initial_model, initial_model.constraints(0), + /*linearization_level=*/1, &model, &relaxation); + + EXPECT_EQ(relaxation.linear_constraints.size(), 3); + EXPECT_EQ(relaxation.linear_constraints[0].DebugString(), "1*X0 -1*X3 <= 0"); + EXPECT_EQ(relaxation.linear_constraints[1].DebugString(), "1*X1 -1*X3 <= 0"); + EXPECT_EQ(relaxation.linear_constraints[2].DebugString(), "-1*X2 -1*X3 <= 0"); +} + +TEST(TryToLinearizeConstraint, LinMaxSmall) { + const CpModelProto initial_model = ParseTestProto(R"pb( + variables { domain: [ 0, 5 ] } + variables { domain: [ -1, 7 ] } + variables { domain: [ -5, 10 ] } + constraints { + lin_max { + target: { vars: 2 coeffs: 1 } + exprs: { vars: 0 coeffs: 1 } + exprs: { vars: 1 coeffs: 1 } + } + } + )pb"); + + Model model; + LoadVariables(initial_model, true, &model); + + LinearRelaxation relaxation; + TryToLinearizeConstraint(initial_model, initial_model.constraints(0), + /*linearization_level=*/2, &model, &relaxation); + + // Take into account the constraints added by the cut generator. + EXPECT_GE(relaxation.linear_constraints.size(), 2); + EXPECT_EQ(relaxation.linear_constraints[0].DebugString(), "1*X0 -1*X2 <= 0"); + EXPECT_EQ(relaxation.linear_constraints[1].DebugString(), "1*X1 -1*X2 <= 0"); +} + +TEST(TryToLinearizeConstraint, IntSquare) { + const CpModelProto initial_model = ParseTestProto(R"pb( + variables { domain: [ 1, 10 ] } + variables { domain: [ 1, 100 ] } + constraints { + int_prod { + target: { vars: 1 coeffs: 1 } + exprs: { vars: 0 coeffs: 1 } + exprs: { vars: 0 coeffs: 1 } + } + } + )pb"); + + Model model; + LoadVariables(initial_model, true, &model); + + LinearRelaxation relaxation; + TryToLinearizeConstraint(initial_model, initial_model.constraints(0), + /*linearization_level=*/1, &model, &relaxation); + + EXPECT_EQ(relaxation.linear_constraints.size(), 3); + EXPECT_EQ(relaxation.linear_constraints[0].DebugString(), + "-11*X0 1*X1 <= -10"); + EXPECT_EQ(relaxation.linear_constraints[1].DebugString(), "-2 <= -3*X0 1*X1"); + EXPECT_EQ(relaxation.linear_constraints[2].DebugString(), + "-90 <= -19*X0 1*X1"); +} + +TEST(TryToLinearizeConstraint, IntAbs) { + const CpModelProto initial_model = ParseTestProto(R"pb( + variables { domain: [ 0, 100 ] } + variables { domain: [ -20, 30 ] } + constraints { + lin_max { + target: { vars: 0 coeffs: 1 } + exprs: { vars: 1 coeffs: 1 } + exprs: { vars: 1 coeffs: -1 } + } + } + )pb"); + + Model model; + LoadVariables(initial_model, true, &model); + + LinearRelaxation relaxation; + TryToLinearizeConstraint(initial_model, initial_model.constraints(0), + /*linearization_level=*/1, &model, &relaxation); + + EXPECT_EQ(relaxation.linear_constraints.size(), 3); + EXPECT_EQ(relaxation.linear_constraints[0].DebugString(), "-1*X0 1*X1 <= 0"); + EXPECT_EQ(relaxation.linear_constraints[1].DebugString(), "-1*X0 -1*X1 <= 0"); + EXPECT_EQ(relaxation.linear_constraints[2].DebugString(), + "50*X0 -10*X1 <= 1200"); +} + +TEST(TryToLinearizeConstraint, LinMaxLevel1) { + const CpModelProto initial_model = ParseTestProto(R"pb( + variables { domain: [ 0, 5 ] } + variables { domain: [ -1, 7 ] } + variables { domain: [ -2, 9 ] } + variables { domain: [ -5, 10 ] } + constraints { + lin_max { + target: { + vars: [ 0 ] + coeffs: [ 1 ] + offset: 3 + } + exprs: { + vars: [ 1 ] + coeffs: [ 2 ] + offset: 1 + } + exprs: { + vars: [ 2 ] + coeffs: [ -1 ] + offset: 2 + } + exprs: { + vars: [ 3 ] + coeffs: [ 3 ] + offset: 3 + } + } + } + )pb"); + + Model model; + LoadVariables(initial_model, true, &model); + + LinearRelaxation relaxation; + TryToLinearizeConstraint(initial_model, initial_model.constraints(0), + /*linearization_level=*/1, &model, &relaxation); + + EXPECT_EQ(relaxation.linear_constraints.size(), 3); + EXPECT_EQ(relaxation.linear_constraints[0].DebugString(), "-1*X0 2*X1 <= 2"); + EXPECT_EQ(relaxation.linear_constraints[1].DebugString(), "-1*X0 -1*X2 <= 1"); + EXPECT_EQ(relaxation.linear_constraints[2].DebugString(), "-1*X0 3*X3 <= 0"); +} + +TEST(AppendLinMaxRelaxation, BasicBehavior) { + Model model; + IntegerVariable x0 = model.Add(NewIntegerVariable(0, 5)); + IntegerVariable x1 = model.Add(NewIntegerVariable(-1, 7)); + IntegerVariable x2 = model.Add(NewIntegerVariable(-2, 9)); + IntegerVariable target = model.Add(NewIntegerVariable(-5, 10)); + LinearExpression e0; + e0.vars = {x0}; + e0.coeffs = {IntegerValue(1)}; + LinearExpression e1; + e1.vars = {x1}; + e1.coeffs = {IntegerValue(1)}; + LinearExpression e2; + e2.vars = {x2}; + e2.coeffs = {IntegerValue(-1)}; + + const std::vector exprs = {e0, e1, e2}; + + LinearRelaxation relaxation; + const std::vector literals = + CreateAlternativeLiteralsWithView(exprs.size(), &model, &relaxation); + AppendLinMaxRelaxationPart2(target, literals, exprs, &model, &relaxation); + + EXPECT_EQ(literals.size(), 3); + ASSERT_EQ(relaxation.linear_constraints.size(), 4); + EXPECT_EQ(relaxation.linear_constraints[0].DebugString(), + "1 <= 1*X4 1*X5 1*X6 <= 1"); + EXPECT_EQ(relaxation.linear_constraints[1].DebugString(), + "-1*X0 1*X3 -7*X5 -2*X6 <= 0"); + EXPECT_EQ(relaxation.linear_constraints[2].DebugString(), + "-1*X1 1*X3 -6*X4 -3*X6 <= 0"); + EXPECT_EQ(relaxation.linear_constraints[3].DebugString(), + "1*X2 1*X3 -14*X4 -16*X5 <= 0"); +} + +TEST(AppendLinMaxRelaxation, BasicBehaviorExprs) { + Model model; + IntegerVariable x0 = model.Add(NewIntegerVariable(-1, 1)); + IntegerVariable x1 = model.Add(NewIntegerVariable(-1, 1)); + IntegerVariable target = model.Add(NewIntegerVariable(-100, 100)); + LinearExpression e0; + e0.offset = IntegerValue(1); + LinearExpression e1; + e1.vars = {x0, x1}; + e1.coeffs = {IntegerValue(-1), IntegerValue(-2)}; + LinearExpression e2; + e2.vars = {x0, x1}; + e2.coeffs = {IntegerValue(-1), IntegerValue(1)}; + + const std::vector exprs = {e0, e1, e2}; + + LinearRelaxation relaxation; + const std::vector literals = + CreateAlternativeLiteralsWithView(exprs.size(), &model, &relaxation); + AppendLinMaxRelaxationPart2(target, literals, exprs, &model, &relaxation); + + EXPECT_EQ(literals.size(), 3); + ASSERT_EQ(relaxation.linear_constraints.size(), 4); + EXPECT_EQ(relaxation.linear_constraints[0].DebugString(), + "1 <= 1*X3 1*X4 1*X5 <= 1"); + EXPECT_EQ(relaxation.linear_constraints[1].DebugString(), + "1*X2 -1*X3 -3*X4 -2*X5 <= 0"); + EXPECT_EQ(relaxation.linear_constraints[2].DebugString(), + "1*X0 2*X1 1*X2 -4*X3 -3*X5 <= 0"); + EXPECT_EQ(relaxation.linear_constraints[3].DebugString(), + "1*X0 -1*X1 1*X2 -3*X3 -3*X4 <= 0"); +} + +TEST(AppendLinMaxRelaxation, BasicBehaviorExprs2) { + Model model; + IntegerVariable x0 = model.Add(NewIntegerVariable(1, 2)); + IntegerVariable x1 = model.Add(NewIntegerVariable(0, 1)); + IntegerVariable x2 = model.Add(NewIntegerVariable(-2, -1)); + IntegerVariable target = model.Add(NewIntegerVariable(-3, 0)); + LinearExpression e0; + e0.vars = {x0, x1}; + e0.coeffs = {IntegerValue(-2), IntegerValue(-3)}; + e0.offset = IntegerValue(5); + LinearExpression e1; + e1.vars = {x1, x2}; + e1.coeffs = {IntegerValue(-2), IntegerValue(-5)}; + e1.offset = IntegerValue(-6); + LinearExpression e2; + e2.vars = {x0, x2}; + e2.coeffs = {IntegerValue(-2), IntegerValue(-3)}; + + const std::vector exprs = {e0, e1, e2}; + + LinearRelaxation relaxation; + const std::vector literals = + CreateAlternativeLiteralsWithView(exprs.size(), &model, &relaxation); + AppendLinMaxRelaxationPart2(NegationOf(target), literals, exprs, &model, + &relaxation); + + EXPECT_EQ(literals.size(), 3); + ASSERT_EQ(relaxation.linear_constraints.size(), 4); + EXPECT_EQ(relaxation.linear_constraints[0].DebugString(), + "1 <= 1*X4 1*X5 1*X6 <= 1"); + EXPECT_EQ(relaxation.linear_constraints[1].DebugString(), + "2*X0 3*X1 -1*X3 -5*X4 -9*X5 -9*X6 <= 0"); + EXPECT_EQ(relaxation.linear_constraints[2].DebugString(), + "2*X1 5*X2 -1*X3 2*X4 6*X5 2*X6 <= 0"); + EXPECT_EQ(relaxation.linear_constraints[3].DebugString(), + "2*X0 3*X2 -1*X3 -2*X4 -2*X5 <= 0"); +} + +void AppendNoOverlapRelaxation(const ConstraintProto& ct, Model* model, + LinearRelaxation* relaxation) { + auto* mapping = model->GetOrCreate(); + std::vector intervals = + mapping->Intervals(ct.no_overlap().intervals()); + const IntegerValue one(1); + std::vector demands(intervals.size(), one); + IntervalsRepository* repository = model->GetOrCreate(); + SchedulingConstraintHelper* helper = repository->GetOrCreateHelper(intervals); + SchedulingDemandHelper* demands_helper = + new SchedulingDemandHelper(demands, helper, model); + model->TakeOwnership(demands_helper); + + AddCumulativeRelaxation(/*capacity=*/one, helper, demands_helper, + /*makespan=*/std::nullopt, model, relaxation); +} + +TEST(AppendNoOverlapRelaxation, IntersectingIntervals) { + const CpModelProto initial_model = ParseTestProto(R"pb( + variables { domain: [ 0, 5 ] } + variables { domain: [ 1, 7 ] } + variables { domain: [ 1, 12 ] } + variables { domain: [ 0, 5 ] } + variables { domain: [ 1, 7 ] } + variables { domain: [ 1, 12 ] } + constraints { no_overlap { intervals: [ 1, 2 ] } } + constraints { + interval { + start { vars: 0 coeffs: 1 } + size { vars: 1 coeffs: 1 } + end { vars: 2 coeffs: 1 } + } + } + constraints { + interval { + start { vars: 3 coeffs: 1 } + size { vars: 4 coeffs: 1 } + end { vars: 5 coeffs: 1 } + } + } + )pb"); + + Model model; + LoadVariables(initial_model, true, &model); + + LinearRelaxation relaxation; + AppendNoOverlapRelaxation(initial_model.constraints(0), &model, &relaxation); + + EXPECT_EQ(relaxation.linear_constraints.size(), 1); + EXPECT_EQ(relaxation.linear_constraints[0].DebugString(), "1*X1 1*X4 <= 12"); +} + +TEST(AppendNoOverlapRelaxation, NoIntersection) { + const CpModelProto initial_model = ParseTestProto(R"pb( + variables { domain: [ 0, 1 ] } + variables { domain: [ 1, 1 ] } + variables { domain: [ 1, 2 ] } + variables { domain: [ 2, 5 ] } + variables { domain: [ 1, 7 ] } + variables { domain: [ 1, 12 ] } + constraints { + interval { + start { vars: 0 coeffs: 1 } + size { vars: 1 coeffs: 1 } + end { vars: 2 coeffs: 1 } + } + } + constraints { + interval { + start { vars: 3 coeffs: 1 } + size { vars: 4 coeffs: 1 } + end { vars: 5 coeffs: 1 } + } + } + constraints { no_overlap { intervals: [ 0, 1 ] } } + )pb"); + + Model model; + LoadVariables(initial_model, true, &model); + + LinearRelaxation relaxation; + AppendNoOverlapRelaxation(initial_model.constraints(2), &model, &relaxation); + + EXPECT_EQ(relaxation.linear_constraints.size(), 1); + EXPECT_EQ(relaxation.linear_constraints[0].DebugString(), "1*X4 <= 11"); +} + +TEST(AppendNoOverlapRelaxation, IntervalWithEnforcement) { + const CpModelProto initial_model = ParseTestProto(R"pb( + variables { domain: [ 2, 5 ] } + variables { domain: [ 1, 7 ] } + variables { domain: [ 1, 12 ] } + variables { domain: [ 2, 5 ] } + variables { domain: [ 1, 7 ] } + variables { domain: [ 1, 12 ] } + variables { domain: [ 0, 1 ] } + constraints { + interval { + start { vars: 0 coeffs: 1 } + size { vars: 1 coeffs: 1 } + end { vars: 2 coeffs: 1 } + } + } + constraints { + enforcement_literal: 6 + interval { + start { vars: 3 coeffs: 1 } + size { vars: 4 coeffs: 1 } + end { vars: 5 coeffs: 1 } + } + } + constraints { no_overlap { intervals: [ 0, 1 ] } } + )pb"); + + Model model; + LoadVariables(initial_model, true, &model); + + LinearRelaxation relaxation; + AppendNoOverlapRelaxation(initial_model.constraints(2), &model, &relaxation); + + EXPECT_EQ(relaxation.linear_constraints.size(), 1); + EXPECT_EQ(relaxation.linear_constraints[0].DebugString(), "1*X1 1*X6 <= 10"); +} + +TEST(AppendNoOverlapRelaxation, ZeroMinEnergy) { + const CpModelProto initial_model = ParseTestProto(R"pb( + variables { domain: [ 1, 5 ] } + variables { domain: [ 0, 7 ] } + variables { domain: [ 1, 12 ] } + variables { domain: [ 0, 1 ] } + variables { domain: [ 1, 5 ] } + variables { domain: [ 0, 7 ] } + variables { domain: [ 1, 12 ] } + variables { domain: [ 0, 1 ] } + constraints { + enforcement_literal: 3 + interval { + start { vars: 0 coeffs: 1 } + size { vars: 1 coeffs: 1 } + end { vars: 2 coeffs: 1 } + } + } + constraints { + enforcement_literal: 7 + interval { + start { vars: 4 coeffs: 1 } + size { vars: 5 coeffs: 1 } + end { vars: 6 coeffs: 1 } + } + } + constraints { no_overlap { intervals: [ 0, 1 ] } } + )pb"); + + Model model; + LoadVariables(initial_model, true, &model); + + LinearRelaxation relaxation; + AppendNoOverlapRelaxation(initial_model.constraints(2), &model, &relaxation); + + EXPECT_EQ(relaxation.linear_constraints.size(), 0); +} + +TEST(AppendNoOverlapRelaxation, OneInterval) { + const CpModelProto initial_model = ParseTestProto(R"pb( + variables { domain: [ 0, 1 ] } + variables { domain: [ 1, 1 ] } + variables { domain: [ 1, 2 ] } + constraints { + interval { + start { vars: 0 coeffs: 1 } + size { vars: 1 coeffs: 1 } + end { vars: 2 coeffs: 1 } + } + } + constraints { no_overlap { intervals: 0 } } + )pb"); + + Model model; + LoadVariables(initial_model, true, &model); + + LinearRelaxation relaxation; + AppendNoOverlapRelaxation(initial_model.constraints(1), &model, &relaxation); + + EXPECT_EQ(relaxation.linear_constraints.size(), 0); +} + +void AppendCumulativeRelaxation(const ConstraintProto& ct, Model* model, + LinearRelaxation* relaxation) { + auto* mapping = model->GetOrCreate(); + std::vector intervals = + mapping->Intervals(ct.cumulative().intervals()); + const std::vector demands = + mapping->Affines(ct.cumulative().demands()); + const AffineExpression capacity = mapping->Affine(ct.cumulative().capacity()); + IntervalsRepository* repository = model->GetOrCreate(); + SchedulingConstraintHelper* helper = repository->GetOrCreateHelper(intervals); + SchedulingDemandHelper* demands_helper = + new SchedulingDemandHelper(demands, helper, model); + model->TakeOwnership(demands_helper); + + AddCumulativeRelaxation(capacity, helper, demands_helper, + /*makespan=*/std::nullopt, model, relaxation); +} + +TEST(AppendCumulativeRelaxation, GcdOnFixedDemandsSizesAndCapacity) { + const CpModelProto initial_model = ParseTestProto(R"pb( + variables { domain: [ 0, 5 ] } + variables { domain: [ 1, 4 ] } + variables { domain: [ 0, 7 ] } + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 1 ] } + constraints { + interval { + start { vars: 0 coeffs: 1 } + size { offset: 4 } + end { vars: 0 coeffs: 1 offset: 4 } + } + } + constraints { + enforcement_literal: 3 + interval { + start { vars: 1 coeffs: 1 } + size { offset: 4 } + end { vars: 1 coeffs: 1 offset: 4 } + } + } + constraints { + enforcement_literal: 4 + interval { + start { vars: 2 coeffs: 1 } + size { offset: 2 } + end { vars: 2 coeffs: 1 offset: 2 } + } + } + constraints { + cumulative { + intervals: [ 0, 1, 2 ] + demands { offset: 3 } + demands { offset: 6 } + demands { offset: 3 } + capacity { offset: 7 } + } + } + )pb"); + + Model model; + LoadVariables(initial_model, true, &model); + + LinearRelaxation relaxation; + AppendCumulativeRelaxation(initial_model.constraints(3), &model, &relaxation); + + EXPECT_EQ(relaxation.linear_constraints.size(), 1); + EXPECT_EQ(relaxation.linear_constraints[0].DebugString(), "4*X3 1*X4 <= 6"); +} + +TEST(AppendCumulativeRelaxation, IgnoreZeroDemandOrSize) { + const CpModelProto initial_model = ParseTestProto(R"pb( + variables { domain: [ 0, 5 ] } + variables { domain: [ 1, 4 ] } + variables { domain: [ 0, 7 ] } + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 1 ] } + constraints { + interval { + start { vars: 0 coeffs: 1 } + size { offset: 4 } + end { vars: 0 coeffs: 1 offset: 4 } + } + } + constraints { + enforcement_literal: 3 + interval { + start { vars: 1 coeffs: 1 } + size { offset: 4 } + end { vars: 1 coeffs: 1 offset: 4 } + } + } + constraints { + enforcement_literal: 4 + interval { + start { vars: 2 coeffs: 1 } + size { offset: 2 } + end { vars: 2 coeffs: 1 offset: 2 } + } + } + constraints { + enforcement_literal: 5 + interval { + start { vars: 2 coeffs: 1 } + size { offset: 0 } + end { vars: 2 coeffs: 1 } + } + } + constraints { + enforcement_literal: 6 + interval { + start { vars: 2 coeffs: 1 offset: 5 } + size { offset: 3 } + end { vars: 2 coeffs: 1 offset: 8 } + } + } + constraints { + cumulative { + intervals: [ 0, 1, 2, 3, 4 ] + demands { offset: 3 } + demands { offset: 6 } + demands { offset: 3 } + demands { offset: 3 } + demands { offset: 0 } + capacity { offset: 7 } + } + } + )pb"); + + Model model; + LoadVariables(initial_model, true, &model); + + LinearRelaxation relaxation; + AppendCumulativeRelaxation(initial_model.constraints(5), &model, &relaxation); + + EXPECT_EQ(relaxation.linear_constraints.size(), 1); + EXPECT_EQ(relaxation.linear_constraints[0].DebugString(), "4*X3 1*X4 <= 6"); +} + +TEST(AppendLinearConstraintRelaxation, NoEnforcementLiteral) { + const CpModelProto initial_model = ParseTestProto(R"pb( + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 2 ] } + constraints { + linear { + vars: [ 0, 2 ] + coeffs: [ 2, 1 ] + domain: [ 3, 4 ] + } + } + )pb"); + + Model model; + LoadVariables(initial_model, true, &model); + + LinearRelaxation relaxation; + AppendLinearConstraintRelaxation(initial_model.constraints(0), + /*linearize_enforced_constraints=*/true, + &model, &relaxation); + + EXPECT_EQ(relaxation.linear_constraints.size(), 1); + EXPECT_EQ(relaxation.linear_constraints[0].DebugString(), + "3 <= 2*X0 1*X2 <= 4"); +} + +TEST(AppendLinearConstraintRelaxation, SmallLinearizationLevel) { + const CpModelProto initial_model = ParseTestProto(R"pb( + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 2 ] } + constraints { + enforcement_literal: 1 + linear { + vars: [ 0, 2 ] + coeffs: [ 2, 1 ] + domain: [ 3, 5 ] + } + } + )pb"); + + Model model; + LoadVariables(initial_model, true, &model); + + LinearRelaxation relaxation; + AppendLinearConstraintRelaxation(initial_model.constraints(0), + /*linearize_enforced_constraints=*/false, + &model, &relaxation); + EXPECT_EQ(relaxation.linear_constraints.size(), 0); +} + +TEST(AppendLinearConstraintRelaxation, PbConstraint) { + const CpModelProto initial_model = ParseTestProto(R"pb( + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 1 ] } + constraints { + linear { + vars: [ 0, 1, 2 ] + coeffs: [ 2, 1, 3 ] + domain: [ 3, 5 ] + } + } + )pb"); + + Model model; + LoadVariables(initial_model, false, &model); + + LinearRelaxation relaxation; + AppendLinearConstraintRelaxation(initial_model.constraints(0), + /*linearize_enforced_constraints=*/false, + &model, &relaxation); + EXPECT_EQ(relaxation.linear_constraints.size(), 1); + EXPECT_EQ(relaxation.linear_constraints[0].DebugString(), + "3 <= 2*X0 1*X1 3*X2 <= 5"); +} + +TEST(AppendLinearConstraintRelaxation, SmallConstraint) { + const CpModelProto initial_model = ParseTestProto(R"pb( + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 1 ] } + constraints { + enforcement_literal: 1 + linear { + vars: 0 + coeffs: 2 + domain: [ 3, 5 ] + } + } + )pb"); + + Model model; + LoadVariables(initial_model, true, &model); + + LinearRelaxation relaxation; + AppendLinearConstraintRelaxation(initial_model.constraints(0), + /*linearize_enforced_constraints=*/true, + &model, &relaxation); + + EXPECT_EQ(relaxation.linear_constraints.size(), 0); +} + +TEST(AppendLinearConstraintRelaxation, SingleEnforcementLiteralLowerBound) { + const CpModelProto initial_model = ParseTestProto(R"pb( + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 2 ] } + constraints { + enforcement_literal: 1 + linear { + vars: [ 0, 2 ] + coeffs: [ 2, 1 ] + domain: [ 3, 9223372036854775807 ] + } + } + )pb"); + + Model model; + LoadVariables(initial_model, true, &model); + + LinearRelaxation relaxation; + AppendLinearConstraintRelaxation(initial_model.constraints(0), + /*linearize_enforced_constraints=*/true, + &model, &relaxation); + + EXPECT_EQ(relaxation.linear_constraints.size(), 1); + EXPECT_EQ(relaxation.linear_constraints[0].DebugString(), + "0 <= 2*X0 -3*X1 1*X2"); +} + +TEST(AppendLinearConstraintRelaxation, SingleEnforcementLiteralUpperBound) { + const CpModelProto initial_model = ParseTestProto(R"pb( + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 2 ] } + constraints { + enforcement_literal: 1 + linear { + vars: [ 0, 2 ] + coeffs: [ 2, 1 ] + domain: [ -9223372036854775808, 3 ] + } + } + )pb"); + + Model model; + LoadVariables(initial_model, true, &model); + + LinearRelaxation relaxation; + AppendLinearConstraintRelaxation(initial_model.constraints(0), + /*linearize_enforced_constraints=*/true, + &model, &relaxation); + + EXPECT_EQ(relaxation.linear_constraints.size(), 1); + EXPECT_EQ(relaxation.linear_constraints[0].DebugString(), + "2*X0 1*X1 1*X2 <= 4"); +} + +TEST(AppendLinearConstraintRelaxation, SingleEnforcementLiteralBothBounds) { + const CpModelProto initial_model = ParseTestProto(R"pb( + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 2 ] } + constraints { + enforcement_literal: 1 + linear { + vars: [ 0, 2 ] + coeffs: [ 2, 1 ] + domain: [ 2, 3 ] + } + } + )pb"); + + Model model; + LoadVariables(initial_model, true, &model); + + LinearRelaxation relaxation; + AppendLinearConstraintRelaxation(initial_model.constraints(0), + /*linearize_enforced_constraints=*/true, + &model, &relaxation); + + EXPECT_EQ(relaxation.linear_constraints.size(), 2); + EXPECT_EQ(relaxation.linear_constraints[0].DebugString(), + "0 <= 2*X0 -2*X1 1*X2"); + EXPECT_EQ(relaxation.linear_constraints[1].DebugString(), + "2*X0 1*X1 1*X2 <= 4"); +} + +TEST(AppendLinearConstraintRelaxation, MultipleEnforcementLiteral) { + const CpModelProto initial_model = ParseTestProto(R"pb( + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 2 ] } + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 1 ] } + constraints { + enforcement_literal: [ 1, 3, 4 ] + linear { + vars: [ 0, 2 ] + coeffs: [ 2, 1 ] + domain: [ 2, 3 ] + } + } + )pb"); + + Model model; + LoadVariables(initial_model, true, &model); + + LinearRelaxation relaxation; + AppendLinearConstraintRelaxation(initial_model.constraints(0), + /*linearize_enforced_constraints=*/true, + &model, &relaxation); + + EXPECT_EQ(relaxation.linear_constraints.size(), 2); + EXPECT_EQ(relaxation.linear_constraints[0].DebugString(), + "-4 <= 2*X0 -2*X1 1*X2 -2*X3 -2*X4"); + EXPECT_EQ(relaxation.linear_constraints[1].DebugString(), + "2*X0 1*X1 1*X2 1*X3 1*X4 <= 6"); +} + +// This used to generate the completely wrong constraint: +// 1*X0 -8*X1 1*X2 -8*X3 <= -6 before. +TEST(AppendLinearConstraintRelaxation, BoundsNotTight) { + const CpModelProto initial_model = ParseTestProto(R"pb( + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 1 ] } + constraints { + enforcement_literal: 1 + enforcement_literal: 3 + linear { + vars: [ 0, 2 ] + coeffs: [ 1, 1 ] + domain: [ 0, 10 ] # 10 > implied ub of 2. + } + } + )pb"); + + Model model; + LoadVariables(initial_model, true, &model); + + LinearRelaxation relaxation; + AppendLinearConstraintRelaxation(initial_model.constraints(0), + /*linearize_enforced_constraints=*/true, + &model, &relaxation); + + EXPECT_EQ(relaxation.linear_constraints.size(), 0); +} + +} // namespace +} // namespace sat +} // namespace operations_research diff --git a/ortools/sat/presolve_context_test.cc b/ortools/sat/presolve_context_test.cc new file mode 100644 index 0000000000..89c58a2972 --- /dev/null +++ b/ortools/sat/presolve_context_test.cc @@ -0,0 +1,1038 @@ +// Copyright 2010-2024 Google LLC +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "ortools/sat/presolve_context.h" + +#include +#include + +#include "absl/container/flat_hash_set.h" +#include "absl/types/span.h" +#include "gtest/gtest.h" +#include "ortools/base/gmock.h" +#include "ortools/base/parse_test_proto.h" +#include "ortools/base/types.h" +#include "ortools/sat/cp_model.pb.h" +#include "ortools/sat/cp_model_utils.h" +#include "ortools/sat/model.h" +#include "ortools/util/affine_relation.h" +#include "ortools/util/sorted_interval_list.h" + +namespace operations_research { +namespace sat { +namespace { + +using ::google::protobuf::contrib::parse_proto::ParseTestProto; + +TEST(PresolveContextTest, GetOrCreateEncodingOnIntVar) { + Model model; + CpModelProto working_model; + PresolveContext context(&model, &working_model, nullptr); + context.NewIntVar({1, 5}); + + EXPECT_EQ(1, context.GetOrCreateVarValueEncoding(0, 2)); + EXPECT_EQ(2, context.GetOrCreateVarValueEncoding(0, 4)); + EXPECT_EQ(1, context.GetOrCreateVarValueEncoding(0, 2)); + EXPECT_EQ(1, context.GetOrCreateVarValueEncoding(-1, -2)); +} + +TEST(PresolveContextTest, GetOrCreateEncodingOnBoolVar) { + Model model; + CpModelProto working_model; + PresolveContext context(&model, &working_model, nullptr); + context.NewBoolVar("test"); + + EXPECT_EQ(0, context.GetOrCreateVarValueEncoding(0, 1)); + EXPECT_EQ(-1, context.GetOrCreateVarValueEncoding(0, 0)); +} + +TEST(PresolveContextTest, GetOrCreateEncodingOnSize2Var) { + Model model; + CpModelProto working_model; + PresolveContext context(&model, &working_model, nullptr); + context.NewIntVar(Domain::FromValues({1, 4})); + + EXPECT_EQ(-2, context.GetOrCreateVarValueEncoding(0, 1)); + EXPECT_EQ(1, context.GetOrCreateVarValueEncoding(0, 4)); +} + +TEST(PresolveContextTest, GetOrCreateEncodingOnSize2VarBis) { + Model model; + CpModelProto working_model; + PresolveContext context(&model, &working_model, nullptr); + context.NewIntVar(Domain::FromValues({1, 4})); + + EXPECT_EQ(1, context.GetOrCreateVarValueEncoding(0, 4)); + EXPECT_EQ(-2, context.GetOrCreateVarValueEncoding(0, 1)); +} + +TEST(PresolveContextTest, InsertVarValueEncodingOnIntVar) { + Model model; + CpModelProto working_model; + PresolveContext context(&model, &working_model, nullptr); + context.NewIntVar({1, 5}); + context.NewBoolVar("test"); + + context.InsertVarValueEncoding(1, 0, 2); + EXPECT_EQ(1, context.GetOrCreateVarValueEncoding(0, 2)); + EXPECT_EQ(1, context.GetOrCreateVarValueEncoding(-1, -2)); +} + +TEST(PresolveContextTest, InsertVarValueEncodingOnSize2Var) { + Model model; + CpModelProto working_model; + PresolveContext context(&model, &working_model, nullptr); + context.NewIntVar(Domain::FromValues({1, 4})); + context.NewBoolVar("test"); + + context.InsertVarValueEncoding(1, 0, 1); + EXPECT_EQ(1, context.GetOrCreateVarValueEncoding(0, 1)); + EXPECT_EQ(-2, context.GetOrCreateVarValueEncoding(0, 4)); +} + +TEST(PresolveContextTest, InsertVarValueEncodingOnSize2VarBis) { + Model model; + CpModelProto working_model; + PresolveContext context(&model, &working_model, nullptr); + context.NewIntVar(Domain::FromValues({1, 4})); + context.NewBoolVar("test"); + + context.InsertVarValueEncoding(1, 0, 4); + EXPECT_EQ(1, context.GetOrCreateVarValueEncoding(0, 4)); + EXPECT_EQ(-2, context.GetOrCreateVarValueEncoding(0, 1)); +} + +TEST(PresolveContextTest, InsertVarValueEncodingOnPosLitMinLit) { + Model model; + CpModelProto working_model; + PresolveContext context(&model, &working_model, nullptr); + const int a = context.NewBoolVar("test"); + const int b = context.NewBoolVar("test"); + context.InsertVarValueEncoding(a, b, 0); + EXPECT_EQ(context.GetLiteralRepresentative(b), NegatedRef(a)); + EXPECT_TRUE(context.VarToConstraints(a).contains(kAffineRelationConstraint)); + EXPECT_TRUE(context.VarToConstraints(b).contains(kAffineRelationConstraint)); +} + +TEST(PresolveContextTest, InsertVarValueEncodingOnPosLitPosMaxLit) { + Model model; + CpModelProto working_model; + PresolveContext context(&model, &working_model, nullptr); + const int a = context.NewBoolVar("test"); + const int b = context.NewBoolVar("test"); + context.InsertVarValueEncoding(a, b, 1); + EXPECT_EQ(context.GetLiteralRepresentative(b), a); + EXPECT_TRUE(context.VarToConstraints(a).contains(kAffineRelationConstraint)); + EXPECT_TRUE(context.VarToConstraints(b).contains(kAffineRelationConstraint)); +} + +TEST(PresolveContextTest, InsertVarValueEncodingOnNegLitMinLit) { + Model model; + CpModelProto working_model; + PresolveContext context(&model, &working_model, nullptr); + const int a = context.NewBoolVar("test"); + const int b = context.NewBoolVar("test"); + context.InsertVarValueEncoding(NegatedRef(a), b, 0); + EXPECT_EQ(context.GetLiteralRepresentative(b), a); + EXPECT_TRUE(context.VarToConstraints(a).contains(kAffineRelationConstraint)); + EXPECT_TRUE(context.VarToConstraints(b).contains(kAffineRelationConstraint)); +} + +TEST(PresolveContextTest, InsertVarValueEncodingOnNegLitMaxLit) { + Model model; + CpModelProto working_model; + PresolveContext context(&model, &working_model, nullptr); + const int a = context.NewBoolVar("test"); + const int b = context.NewBoolVar("test"); + context.InsertVarValueEncoding(NegatedRef(a), b, 1); + EXPECT_EQ(context.GetLiteralRepresentative(b), NegatedRef(a)); + EXPECT_TRUE(context.VarToConstraints(a).contains(kAffineRelationConstraint)); + EXPECT_TRUE(context.VarToConstraints(b).contains(kAffineRelationConstraint)); +} + +TEST(PresolveContextTest, InsertVarValueEncodingOnPosLitMinVar) { + Model model; + CpModelProto working_model; + PresolveContext context(&model, &working_model, nullptr); + const int a = context.NewBoolVar("test"); + const int b = context.NewIntVar(Domain::FromValues({2, 5})); + context.InsertVarValueEncoding(a, b, 2); + + EXPECT_EQ(context.GetAffineRelation(b).representative, a); + EXPECT_EQ(context.GetAffineRelation(b).coeff, -3); + EXPECT_EQ(context.GetAffineRelation(b).offset, 5); + + EXPECT_TRUE(context.VarToConstraints(a).contains(kAffineRelationConstraint)); + EXPECT_TRUE(context.VarToConstraints(b).contains(kAffineRelationConstraint)); +} + +TEST(PresolveContextTest, InsertVarValueEncodingOnPosLitMaxVar) { + Model model; + CpModelProto working_model; + PresolveContext context(&model, &working_model, nullptr); + const int a = context.NewBoolVar("test"); + const int b = context.NewIntVar(Domain::FromValues({2, 5})); + context.InsertVarValueEncoding(a, b, 5); + + EXPECT_EQ(context.GetAffineRelation(b).representative, a); + EXPECT_EQ(context.GetAffineRelation(b).coeff, 3); + EXPECT_EQ(context.GetAffineRelation(b).offset, 2); + + EXPECT_TRUE(context.VarToConstraints(a).contains(kAffineRelationConstraint)); + EXPECT_TRUE(context.VarToConstraints(b).contains(kAffineRelationConstraint)); +} + +TEST(PresolveContextTest, InsertVarValueEncodingOnNegLitMinVar) { + Model model; + CpModelProto working_model; + PresolveContext context(&model, &working_model, nullptr); + const int a = context.NewBoolVar("test"); + const int b = context.NewIntVar(Domain::FromValues({2, 5})); + context.InsertVarValueEncoding(NegatedRef(a), b, 2); + + EXPECT_EQ(context.GetAffineRelation(b).representative, a); + EXPECT_EQ(context.GetAffineRelation(b).coeff, 3); + EXPECT_EQ(context.GetAffineRelation(b).offset, 2); + + EXPECT_TRUE(context.VarToConstraints(a).contains(kAffineRelationConstraint)); + EXPECT_TRUE(context.VarToConstraints(b).contains(kAffineRelationConstraint)); +} + +TEST(PresolveContextTest, InsertVarValueEncodingOnNegLitMaxVar) { + Model model; + CpModelProto working_model; + PresolveContext context(&model, &working_model, nullptr); + const int a = context.NewBoolVar("test"); + const int b = context.NewIntVar(Domain::FromValues({2, 5})); + context.InsertVarValueEncoding(NegatedRef(a), b, 5); + + EXPECT_EQ(context.GetAffineRelation(b).representative, a); + EXPECT_EQ(context.GetAffineRelation(b).coeff, -3); + EXPECT_EQ(context.GetAffineRelation(b).offset, 5); + + EXPECT_TRUE(context.VarToConstraints(a).contains(kAffineRelationConstraint)); + EXPECT_TRUE(context.VarToConstraints(b).contains(kAffineRelationConstraint)); +} + +TEST(PresolveContextTest, DomainContainsExpr) { + Model model; + CpModelProto working_model; + PresolveContext context(&model, &working_model, nullptr); + const int var = context.NewIntVar({1, 5}); + + LinearExpressionProto expr; + expr.add_vars(var); + expr.add_coeffs(3); + expr.set_offset(2); + + EXPECT_FALSE(context.DomainContains(expr, 2)); + EXPECT_FALSE(context.DomainContains(expr, 7)); + EXPECT_TRUE(context.DomainContains(expr, 11)); + + LinearExpressionProto fixed; + fixed.set_offset(-1); + EXPECT_FALSE(context.DomainContains(fixed, 2)); + EXPECT_TRUE(context.DomainContains(fixed, -1)); + + LinearExpressionProto coeff0; + coeff0.add_vars(var); + coeff0.add_coeffs(0); + coeff0.set_offset(5); + EXPECT_FALSE(context.DomainContains(coeff0, 2)); + EXPECT_TRUE(context.DomainContains(coeff0, 5)); +} + +TEST(PresolveContextTest, GetOrCreateEncodingOnAffine) { + Model model; + CpModelProto working_model; + PresolveContext context(&model, &working_model, nullptr); + const int var = context.NewIntVar({1, 5}); + + LinearExpressionProto expr; + expr.add_vars(var); + expr.add_coeffs(3); + expr.set_offset(2); + + const int zero = context.GetFalseLiteral(); + const int one = context.GetTrueLiteral(); + + EXPECT_EQ(zero, context.GetOrCreateAffineValueEncoding(expr, 2)); + EXPECT_EQ(zero, context.GetOrCreateAffineValueEncoding(expr, 7)); + EXPECT_EQ(context.GetOrCreateAffineValueEncoding(expr, 11), + context.GetOrCreateVarValueEncoding(var, 3)); + + LinearExpressionProto fixed; + fixed.set_offset(-1); + EXPECT_EQ(zero, context.GetOrCreateAffineValueEncoding(fixed, 2)); + EXPECT_EQ(one, context.GetOrCreateAffineValueEncoding(fixed, -1)); +} + +TEST(PresolveContextTest, LinearExpressionMinMax) { + Model model; + CpModelProto working_model; + PresolveContext context(&model, &working_model, nullptr); + context.NewIntVar(Domain(0, 1)); + context.NewIntVar(Domain(0, 1)); + const LinearExpressionProto expr = ParseTestProto(R"pb( + vars: [ 0, 1 ] + coeffs: [ 2, -3 ] + offset: 5 + )pb"); + + EXPECT_EQ(2, context.MinOf(expr)); + EXPECT_EQ(7, context.MaxOf(expr)); +} + +TEST(PresolveContextTest, ObjectiveReadCanonicalizeWrite) { + Model model; + CpModelProto working_model = ParseTestProto(R"pb( + variables { domain: [ 0, 8 ] } + variables { domain: [ 3, 3 ] } + variables { domain: [ -2, 7 ] } + variables { domain: [ -2, -2 ] } + variables { domain: [ -4, 11 ] } + objective { + vars: [ 0, 4, 2, 3, 1 ] + coeffs: [ 2, 4, -2, -4, -2 ] + domain: [ 0, 1000 ] + offset: 3 + } + )pb"); + + PresolveContext context(&model, &working_model, nullptr); + context.InitializeNewDomains(); + context.ReadObjectiveFromProto(); + EXPECT_TRUE(context.CanonicalizeObjective()); + context.WriteObjectiveToProto(); + + const CpModelProto expected = ParseTestProto(R"pb( + variables { domain: [ 0, 8 ] } + variables { domain: [ 3, 3 ] } + variables { domain: [ -2, 7 ] } + variables { domain: [ -2, -2 ] } + variables { domain: [ -4, 11 ] } + objective { + vars: [ 0, 2, 4 ] + coeffs: [ 1, -1, 2 ] + domain: [ -1, 32 ] + offset: 2.5 + scaling_factor: 2 + integer_before_offset: 1 + integer_scaling_factor: 2 + } + )pb"); + EXPECT_THAT(working_model, testing::EqualsProto(expected)); +} + +TEST(PresolveContextTest, ExploitAtMostOneInObjective) { + Model model; + CpModelProto working_model = ParseTestProto(R"pb( + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 1 ] } + objective { + vars: [ 0, 1, 2, 3 ] + coeffs: [ 2, 3, 7, 4 ] + } + constraints { bool_or { literals: [ 0, 1, 2, 3 ] } } + )pb"); + + PresolveContext context(&model, &working_model, nullptr); + context.InitializeNewDomains(); + context.ReadObjectiveFromProto(); + EXPECT_TRUE(context.CanonicalizeObjective()); + + // Do not crash if called with empty exactly one. The problem should be UNSAT + // in this case, but we might call this before reporting it. + EXPECT_FALSE(context.ExploitExactlyOneInObjective({})); + + EXPECT_TRUE(context.ExploitExactlyOneInObjective({0, 1, 2})); + EXPECT_TRUE(context.CanonicalizeObjective()); + context.WriteObjectiveToProto(); + + const CpModelProto expected = ParseTestProto(R"pb( + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 1 ] } + objective { + vars: [ 1, 2, 3 ] + coeffs: [ 1, 5, 4 ] + domain: [ 0, 10 ] + offset: 2 + scaling_factor: 1 + integer_before_offset: 2 + } + constraints { bool_or { literals: [ 0, 1, 2, 3 ] } } + )pb"); + EXPECT_THAT(working_model, testing::EqualsProto(expected)); +} + +TEST(PresolveContextTest, ExploitAtMostOneInObjectiveNegatedRef) { + Model model; + CpModelProto working_model = ParseTestProto(R"pb( + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 1 ] } + constraints { bool_or { literals: [ 0, 1, 2, 3 ] } } + objective { + vars: [ 0, 1, 2, 3 ] + coeffs: [ 2, 3, 7, 4 ] + } + )pb"); + + PresolveContext context(&model, &working_model, nullptr); + context.InitializeNewDomains(); + context.ReadObjectiveFromProto(); + EXPECT_TRUE(context.CanonicalizeObjective()); + EXPECT_TRUE(context.ExploitExactlyOneInObjective({0, NegatedRef(1), 2})); + EXPECT_TRUE(context.CanonicalizeObjective()); + context.WriteObjectiveToProto(); + + // The objective is 2X + 3(1 - Y) + 7Z with X + Y + Z = 1 + // So we get 3 + 2 X - 3 Y + 7 Z and when shifted by -3, we get 5X + 10Z. + const CpModelProto expected = ParseTestProto(R"pb( + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 1 ] } + constraints { bool_or { literals: [ 0, 1, 2, 3 ] } } + objective { + vars: [ 0, 2, 3 ] + coeffs: [ 5, 10, 4 ] + domain: [ 0, 15 ] # We get 15 because 16 is not reachable. + scaling_factor: 1 + } + )pb"); + EXPECT_THAT(working_model, testing::EqualsProto(expected)); +} + +TEST(PresolveContextTest, ObjectiveSubstitution) { + Model model; + CpModelProto working_model = ParseTestProto(R"pb( + variables { domain: [ 0, 9 ] } + variables { domain: [ 0, 9 ] } + variables { domain: [ 0, 9 ] } + variables { domain: [ 0, 9 ] } + variables { domain: [ 0, 9 ] } + objective { + vars: [ 0 ] + coeffs: [ 1 ] + domain: [ 0, 1000 ] + offset: 3 + } + )pb"); + + PresolveContext context(&model, &working_model, nullptr); + context.InitializeNewDomains(); + context.ReadObjectiveFromProto(); + EXPECT_TRUE(context.CanonicalizeObjective()); + + const ConstraintProto constraint = ParseTestProto(R"pb( + linear { + vars: [ 0, 1, 2 ] + coeffs: [ -1, 1, 1 ] + domain: [ 6, 6 ] + } + )pb"); + EXPECT_TRUE(context.SubstituteVariableInObjective(0, -1, constraint)); + + context.WriteObjectiveToProto(); + const CpModelProto expected = ParseTestProto(R"pb( + variables { domain: [ 0, 9 ] } + variables { domain: [ 0, 9 ] } + variables { domain: [ 0, 9 ] } + variables { domain: [ 0, 9 ] } + variables { domain: [ 0, 9 ] } + objective { + vars: [ 1, 2 ] + coeffs: [ 1, 1 ] + domain: [ 6, 15 ] # [0, 9] initially, + 6 offset. + offset: -3 + integer_before_offset: -6 + scaling_factor: 1 + } + )pb"); + EXPECT_THAT(working_model, testing::EqualsProto(expected)); +} + +TEST(PresolveContextTest, ObjectiveSubstitutionWithLargeCoeff) { + Model model; + CpModelProto working_model = ParseTestProto(R"pb( + variables { domain: [ 0, 9 ] } + variables { domain: [ 0, 9 ] } + variables { domain: [ 0, 9 ] } + variables { domain: [ 0, 9 ] } + variables { domain: [ 0, 9 ] } + objective { + vars: [ 0 ] + coeffs: [ 4 ] + domain: [ 0, 1000 ] + offset: 3 + } + )pb"); + + PresolveContext context(&model, &working_model, nullptr); + context.InitializeNewDomains(); + context.ReadObjectiveFromProto(); + + const ConstraintProto constraint = ParseTestProto(R"pb( + linear { + vars: [ 0, 1, 2 ] + coeffs: [ -2, 1, 1 ] + domain: [ 6, 6 ] + } + )pb"); + EXPECT_TRUE(context.SubstituteVariableInObjective(0, -2, constraint)); + + context.WriteObjectiveToProto(); + const CpModelProto expected = ParseTestProto(R"pb( + variables { domain: [ 0, 9 ] } + variables { domain: [ 0, 9 ] } + variables { domain: [ 0, 9 ] } + variables { domain: [ 0, 9 ] } + variables { domain: [ 0, 9 ] } + objective { + vars: [ 1, 2 ] + coeffs: [ 2, 2 ] + domain: [ 12, 1012 ] # [0, 1000] initially, + 2*6 offset. + offset: -9 + integer_before_offset: -12 + scaling_factor: 1 + } + )pb"); + EXPECT_THAT(working_model, testing::EqualsProto(expected)); +} + +TEST(PresolveContextTest, VarValueEncoding) { + Model model; + CpModelProto working_model = ParseTestProto(R"pb( + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 9 ] } + variables { domain: [ 0, 9 ] } + )pb"); + + PresolveContext context(&model, &working_model, nullptr); + context.InitializeNewDomains(); + EXPECT_TRUE(context.StoreLiteralImpliesVarEqValue(0, 2, 4)); + EXPECT_FALSE(context.StoreLiteralImpliesVarEqValue(0, 2, 4)); + EXPECT_FALSE(context.HasVarValueEncoding(2, 4)); + + EXPECT_TRUE(context.StoreLiteralImpliesVarNEqValue(-1, 2, 4)); + EXPECT_FALSE(context.StoreLiteralImpliesVarNEqValue(-1, 2, 4)); + EXPECT_TRUE(context.HasVarValueEncoding(2, 4)); + + EXPECT_TRUE(context.StoreLiteralImpliesVarNEqValue(0, 1, 4)); + EXPECT_FALSE(context.StoreLiteralImpliesVarNEqValue(0, 1, 4)); + EXPECT_FALSE(context.HasVarValueEncoding(1, 4)); + + EXPECT_TRUE(context.StoreLiteralImpliesVarEqValue(-1, 1, 4)); + EXPECT_FALSE(context.StoreLiteralImpliesVarEqValue(-1, 1, 4)); + EXPECT_TRUE(context.HasVarValueEncoding(1, 4)); +} + +TEST(PresolveContextTest, DetectVarEqValueHalfEncoding) { + Model model; + CpModelProto working_model = ParseTestProto(R"pb( + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 9 ] } + constraints { + enforcement_literal: 1 + linear { + vars: [ 2 ] + coeffs: [ 1 ] + domain: [ 6, 6 ] + } + } + constraints { + enforcement_literal: -2 + linear { + vars: [ 2 ] + coeffs: [ 1 ] + domain: [ 0, 5, 7, 9 ] + } + } + )pb"); + + const int kLiteral = 1; + const int kVar = 2; + const int64_t kValue = 6; + + PresolveContext context(&model, &working_model, nullptr); + context.InitializeNewDomains(); + + context.StoreLiteralImpliesVarEqValue(kLiteral, kVar, kValue); + context.StoreLiteralImpliesVarNEqValue(NegatedRef(kLiteral), kVar, kValue); + int encoding_literal = 0; + EXPECT_TRUE(context.HasVarValueEncoding(kVar, kValue, &encoding_literal)); + EXPECT_EQ(encoding_literal, kLiteral); +} + +TEST(PresolveContextTest, GetLiteralRepresentative) { + Model model; + CpModelProto working_model = ParseTestProto(R"pb( + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 1 ] } + )pb"); + + PresolveContext context(&model, &working_model, nullptr); + context.InitializeNewDomains(); + + EXPECT_NE(context.GetLiteralRepresentative(0), + context.GetLiteralRepresentative(1)); + EXPECT_NE(context.GetLiteralRepresentative(0), + context.GetLiteralRepresentative(2)); + EXPECT_NE(context.GetLiteralRepresentative(1), + context.GetLiteralRepresentative(2)); + EXPECT_NE(context.GetLiteralRepresentative(-1), + context.GetLiteralRepresentative(-2)); + EXPECT_NE(context.GetLiteralRepresentative(-1), + context.GetLiteralRepresentative(-3)); + EXPECT_NE(context.GetLiteralRepresentative(-2), + context.GetLiteralRepresentative(-3)); + + context.StoreBooleanEqualityRelation(0, 1); + EXPECT_EQ(context.GetLiteralRepresentative(0), + context.GetLiteralRepresentative(1)); + EXPECT_NE(context.GetLiteralRepresentative(0), + context.GetLiteralRepresentative(2)); + EXPECT_NE(context.GetLiteralRepresentative(1), + context.GetLiteralRepresentative(2)); + EXPECT_EQ(context.GetLiteralRepresentative(-1), + context.GetLiteralRepresentative(-2)); + EXPECT_NE(context.GetLiteralRepresentative(-1), + context.GetLiteralRepresentative(-3)); + EXPECT_NE(context.GetLiteralRepresentative(-2), + context.GetLiteralRepresentative(-3)); + + context.StoreBooleanEqualityRelation(0, -3); + EXPECT_EQ(context.GetLiteralRepresentative(0), + context.GetLiteralRepresentative(1)); + EXPECT_EQ(context.GetLiteralRepresentative(0), + context.GetLiteralRepresentative(-3)); + EXPECT_EQ(context.GetLiteralRepresentative(1), + context.GetLiteralRepresentative(-3)); + EXPECT_EQ(context.GetLiteralRepresentative(-1), + context.GetLiteralRepresentative(-2)); + EXPECT_EQ(context.GetLiteralRepresentative(-1), + context.GetLiteralRepresentative(2)); + EXPECT_EQ(context.GetLiteralRepresentative(-2), + context.GetLiteralRepresentative(2)); +} + +TEST(PresolveContextTest, VarIsOnlyUsedInEncoding) { + Model model; + CpModelProto working_model = ParseTestProto(R"pb( + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 10 ] } + variables { domain: [ 0, 10 ] } + variables { domain: [ 0, 10 ] } + constraints { + enforcement_literal: 0 + linear { + vars: [ 2 ] + coeffs: [ 1 ] + domain: [ 3, 15 ] + } + } + constraints { + enforcement_literal: 1 + linear { + vars: [ 2 ] + coeffs: [ 1 ] + domain: [ 7, 8 ] + } + } + constraints { + linear { + vars: [ 3, 4 ] + coeffs: [ 1, 1 ] + domain: [ 5, 5 ] + } + } + )pb"); + PresolveContext context(&model, &working_model, nullptr); + context.InitializeNewDomains(); + context.UpdateNewConstraintsVariableUsage(); + EXPECT_FALSE(context.VariableIsOnlyUsedInEncodingAndMaybeInObjective(0)); + EXPECT_FALSE(context.VariableIsOnlyUsedInEncodingAndMaybeInObjective(1)); + EXPECT_TRUE(context.VariableIsOnlyUsedInEncodingAndMaybeInObjective(2)); + EXPECT_FALSE(context.VariableIsOnlyUsedInEncodingAndMaybeInObjective(3)); + EXPECT_FALSE(context.VariableIsOnlyUsedInEncodingAndMaybeInObjective(4)); +} + +TEST(PresolveContextTest, ReifiedConstraintCache) { + Model model; + CpModelProto working_model = ParseTestProto(R"pb( + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 10 ] } + variables { domain: [ 0, 10 ] } + )pb"); + PresolveContext context(&model, &working_model, nullptr); + context.InitializeNewDomains(); + context.UpdateNewConstraintsVariableUsage(); + LinearExpressionProto expr1; + expr1.add_vars(2); + expr1.add_coeffs(1); + LinearExpressionProto expr2; + expr2.add_vars(3); + expr2.add_coeffs(1); + + const int var2_before_var3 = + context.GetOrCreateReifiedPrecedenceLiteral(expr1, expr2, 0, 1); + EXPECT_EQ(var2_before_var3, + context.GetOrCreateReifiedPrecedenceLiteral(expr1, expr2, 0, 1)); + EXPECT_EQ(var2_before_var3, + context.GetOrCreateReifiedPrecedenceLiteral(expr1, expr2, 1, 0)); + EXPECT_NE(var2_before_var3, + context.GetOrCreateReifiedPrecedenceLiteral(expr2, expr1, 1, 0)); + ConstraintProto bool_or = ParseTestProto(R"pb( + bool_or { literals: [ 5, 4, -2, -1 ] })pb"); + // 2 x (2 implications , 2 enforced linear) + bool_or. + ASSERT_EQ(9, working_model.constraints_size()); + EXPECT_THAT(working_model.constraints(8), ::testing::EqualsProto(bool_or)); +} + +TEST(PresolveContextTest, ExploitFixedDomainOverflow) { + Model model; + CpModelProto working_model = ParseTestProto(R"pb( + variables { domain: 0 domain: 0 } + variables { domain: 34359738368 domain: 34359738368 } + constraints { dummy_constraint { vars: 0 vars: 1 } } + )pb"); + PresolveContext context(&model, &working_model, nullptr); + context.InitializeNewDomains(); + context.UpdateNewConstraintsVariableUsage(); +} + +TEST(PresolveContextTest, IntersectDomainWithConstant) { + Model model; + CpModelProto working_model; + PresolveContext context(&model, &working_model, nullptr); + + LinearExpressionProto constant; + constant.set_offset(3); + EXPECT_TRUE(context.IntersectDomainWith(constant, Domain(2, 3))); + EXPECT_FALSE(context.IntersectDomainWith(constant, Domain(2, 2))); +} + +// Most of the logic is already tested by the Domain() manipulation function, +// we just test a simpel case here. +TEST(PresolveContextTest, IntersectDomainWithAffineExpression) { + Model model; + CpModelProto working_model = ParseTestProto(R"pb( + variables { domain: 0 domain: 5 } + )pb"); + PresolveContext context(&model, &working_model, nullptr); + context.InitializeNewDomains(); + + // -2 x + 3 in [2, 3] so -2x in [-1, 0] and x must be in [0, 1]. + LinearExpressionProto expr; + expr.add_vars(0); + expr.add_coeffs(-1); + expr.set_offset(3); + EXPECT_TRUE(context.IntersectDomainWith(expr, Domain(2, 3))); + EXPECT_EQ(context.DomainOf(0), Domain(0, 1)); +} + +TEST(PresolveContextTest, DomainSuperSetOf) { + Model model; + CpModelProto working_model = ParseTestProto(R"pb( + variables { domain: 0 domain: 1000 } + )pb"); + PresolveContext context(&model, &working_model, nullptr); + context.InitializeNewDomains(); + + const LinearExpressionProto expr1 = + ParseTestProto(R"pb(vars: 0 coeffs: 1 offset: 4)pb"); + EXPECT_EQ(context.DomainSuperSetOf(expr1), Domain(4, 1004)); + + const LinearExpressionProto expr2 = + ParseTestProto(R"pb(vars: 0 coeffs: 2 offset: 4)pb"); + EXPECT_EQ(context.DomainSuperSetOf(expr2), Domain(4, 2004)); +} + +TEST(PresolveContextTest, DomainSuperSetOfDiscrete) { + Model model; + CpModelProto working_model = ParseTestProto(R"pb( + variables { domain: 0 domain: 1 } + )pb"); + PresolveContext context(&model, &working_model, nullptr); + context.InitializeNewDomains(); + + const LinearExpressionProto expr1 = + ParseTestProto(R"pb(vars: 0 coeffs: -2 offset: 4)pb"); + EXPECT_EQ(context.DomainSuperSetOf(expr1), Domain::FromValues({2, 4})); +} + +TEST(PresolveContextTest, AddAffineRelation) { + Model model; + CpModelProto working_model = ParseTestProto(R"pb( + variables { domain: [ 0, 1000 ] } + variables { domain: [ 0, 1000 ] } + variables { domain: [ 0, 1000 ] } + variables { domain: [ 0, 1000 ] } + )pb"); + PresolveContext context(&model, &working_model, nullptr); + context.InitializeNewDomains(); + context.UpdateNewConstraintsVariableUsage(); + + EXPECT_TRUE(context.StoreAffineRelation(0, 1, 3, 0)); // x0 = 3x1 + EXPECT_TRUE(context.StoreAffineRelation(2, 3, 5, 0)); // x2 = 5x3 + EXPECT_TRUE(context.StoreAffineRelation(0, 2, 2, 0)); // x0 = 2x2 ! + + // A new variable is created: x4 ! + // x0 = 2x2 get expanded into 3x1 = 10 x3, so x1 is a multiple of 10. + EXPECT_EQ(context.GetAffineRelation(1).representative, 4); + EXPECT_EQ(context.GetAffineRelation(1).coeff, 10); + EXPECT_EQ(context.DomainOf(4).ToString(), "[0,33]"); + + // x0 = 3x1 multiple of 30. + EXPECT_EQ(context.GetAffineRelation(0).representative, 4); + EXPECT_EQ(context.GetAffineRelation(0).coeff, 30); + + // x3 is a multiple of 3. + EXPECT_EQ(context.GetAffineRelation(3).representative, 4); + EXPECT_EQ(context.GetAffineRelation(3).coeff, 3); + + // x2 = 5x3 is a multiple of 15. + EXPECT_EQ(context.GetAffineRelation(2).representative, 4); + EXPECT_EQ(context.GetAffineRelation(2).coeff, 15); +} + +TEST(PresolveContextTest, AddAffineRelationWithOffset) { + Model model; + CpModelProto working_model = ParseTestProto(R"pb( + variables { domain: [ 0, 1000 ] } + variables { domain: [ 0, 1000 ] } + variables { domain: [ 0, 1000 ] } + variables { domain: [ 0, 1000 ] } + )pb"); + PresolveContext context(&model, &working_model, nullptr); + context.InitializeNewDomains(); + context.UpdateNewConstraintsVariableUsage(); + + EXPECT_TRUE(context.StoreAffineRelation(0, 1, 3, 10)); // x0 = 3x1 + 10 + EXPECT_TRUE(context.StoreAffineRelation(2, 3, 1, 30)); // x2 = x3 + 30 + EXPECT_TRUE(context.StoreAffineRelation(0, 2, 1, 0)); // x0 = x2 ! + + // x0 = 3x1 + 10 + EXPECT_EQ(context.GetAffineRelation(0).representative, 1); + EXPECT_EQ(context.GetAffineRelation(0).coeff, 3); + EXPECT_EQ(context.GetAffineRelation(0).offset, 10); + + // x3 = x2 - 30 = 3x1 - 20 + EXPECT_EQ(context.GetAffineRelation(3).representative, 1); + EXPECT_EQ(context.GetAffineRelation(3).coeff, 3); + EXPECT_EQ(context.GetAffineRelation(3).offset, -20); + + // x2 same as x0 + EXPECT_EQ(context.GetAffineRelation(2).representative, 1); + EXPECT_EQ(context.GetAffineRelation(2).coeff, 3); + EXPECT_EQ(context.GetAffineRelation(2).offset, 10); +} + +TEST(PresolveContextTest, AddAffineRelationPreventOverflow) { + Model model; + CpModelProto working_model = ParseTestProto(R"pb( + variables { domain: [ 0, 1000000 ] } + variables { domain: [ 100000001, 100000004 ] } + )pb"); + PresolveContext context(&model, &working_model, nullptr); + context.InitializeNewDomains(); + context.UpdateNewConstraintsVariableUsage(); + + // x0 = 10 x2 - 1e9. + EXPECT_TRUE(context.StoreAffineRelation(0, 1, 10, -1000000000)); + + // To avoid "future" overflow a new variable is created. + // And everything is expressed using that one. + EXPECT_EQ(context.GetAffineRelation(1).representative, 2); + EXPECT_EQ(context.GetAffineRelation(1).coeff, 1); + EXPECT_EQ(context.GetAffineRelation(1).offset, 100000001); + EXPECT_EQ(context.DomainOf(2).ToString(), "[0,3]"); + + // And x0 is in term of that one. + EXPECT_EQ(context.GetAffineRelation(0).representative, 2); + EXPECT_EQ(context.GetAffineRelation(0).coeff, 10); + EXPECT_EQ(context.DomainOf(0).ToString(), "[10][20][30][40]"); +} + +TEST(PresolveContextTest, ObjectiveScalingMinimize) { + Model model; + CpModelProto working_model = ParseTestProto(R"pb( + variables { domain: [ 0, 20 ] } + variables { domain: [ 10, 30 ] } + floating_point_objective { + vars: [ 0, 1 ], + coeffs: [ 3.5, -1.3333333333 ], + maximize: false, + offset: 1.0 + } + )pb"); + PresolveContext context(&model, &working_model, nullptr); + context.InitializeNewDomains(); + ASSERT_TRUE(context.ScaleFloatingPointObjective()); + ASSERT_TRUE(working_model.has_objective()); + ASSERT_FALSE(working_model.has_floating_point_objective()); + const CpObjectiveProto& obj = working_model.objective(); + EXPECT_EQ(2, obj.vars_size()); + EXPECT_FLOAT_EQ(obj.scaling_factor() * obj.coeffs(0), 3.5); + EXPECT_NEAR(obj.scaling_factor() * obj.coeffs(1), -4.0 / 3.0, 1e-5); + EXPECT_FLOAT_EQ(obj.scaling_factor() * obj.offset(), 1.0); +} + +TEST(PresolveContextTest, ObjectiveScalingMaximize) { + Model model; + CpModelProto working_model = ParseTestProto(R"pb( + variables { domain: [ 0, 20 ] } + variables { domain: [ 10, 30 ] } + floating_point_objective { + vars: [ 0, 1 ], + coeffs: [ 3.5, -1.3333333333 ], + maximize: true, + offset: 1.0 + } + )pb"); + PresolveContext context(&model, &working_model, nullptr); + context.InitializeNewDomains(); + ASSERT_TRUE(context.ScaleFloatingPointObjective()); + ASSERT_TRUE(working_model.has_objective()); + ASSERT_FALSE(working_model.has_floating_point_objective()); + const CpObjectiveProto& obj = working_model.objective(); + EXPECT_EQ(2, obj.vars_size()); + EXPECT_FLOAT_EQ(obj.scaling_factor() * obj.coeffs(0), 3.5); + EXPECT_NEAR(obj.scaling_factor() * obj.coeffs(1), -4.0 / 3.0, 1e-5); + EXPECT_FLOAT_EQ(obj.scaling_factor() * obj.offset(), 1.0); +} + +TEST(ExpressionIsALiteralTest, BasicApi) { + Model model; + CpModelProto working_model = ParseTestProto(R"pb( + variables { domain: [ 0, 5 ] } + variables { domain: [ 0, 1 ] } + )pb"); + PresolveContext context(&model, &working_model, nullptr); + context.InitializeNewDomains(); + context.UpdateNewConstraintsVariableUsage(); + + int ref; + const LinearExpressionProto expr1 = ParseTestProto(R"pb( + vars: 0 coeffs: 1 + )pb"); + EXPECT_FALSE(context.ExpressionIsALiteral(expr1)); + + const LinearExpressionProto expr2 = ParseTestProto(R"pb( + vars: 1 coeffs: 1 + )pb"); + EXPECT_TRUE(context.ExpressionIsALiteral(expr2, &ref)); + EXPECT_EQ(1, ref); + + const LinearExpressionProto expr3 = + ParseTestProto(R"pb( + vars: 1 coeffs: -1 offset: 1 + )pb"); + EXPECT_TRUE(context.ExpressionIsALiteral(expr3, &ref)); + EXPECT_EQ(-2, ref); + + const LinearExpressionProto expr4 = + ParseTestProto(R"pb( + vars: 1 coeffs: -1 offset: 2 + )pb"); + EXPECT_FALSE(context.ExpressionIsALiteral(expr4)); + + const LinearExpressionProto expr5 = + ParseTestProto(R"pb( + vars: -2 coeffs: 1 offset: 1 + )pb"); + EXPECT_TRUE(context.ExpressionIsALiteral(expr5, &ref)); + EXPECT_EQ(-2, ref); +} + +TEST(PresolveContextTest, CanonicalizeAffineVariable) { + Model model; + CpModelProto working_model; + PresolveContext context(&model, &working_model, nullptr); + const int x = context.NewIntVar(Domain(0, 15)); + + // 3 * x + 9 is a multiple of 6. + // This is the same as x + 3 is a multiple of 2. + EXPECT_TRUE(context.CanonicalizeAffineVariable(x, 3, 6, 9)); + + const AffineRelation::Relation r = context.GetAffineRelation(x); + EXPECT_EQ(r.coeff, 2); +} + +TEST(PresolveContextTest, ComputeMinMaxActivity) { + Model model; + CpModelProto working_model = ParseTestProto(R"pb( + variables { domain: [ 0, 8 ] } + variables { domain: [ 3, 3 ] } + variables { domain: [ -2, 7 ] } + variables { domain: [ -2, -2 ] } + variables { domain: [ -4, 11 ] } + objective { + vars: [ 0, 1, 2, 3, 4 ] + coeffs: [ 2, 4, -2, -4, -2 ] + domain: [ 0, 1000 ] + offset: 3 + } + )pb"); + + PresolveContext context(&model, &working_model, nullptr); + context.InitializeNewDomains(); + const auto [min_activity, max_activity] = + context.ComputeMinMaxActivity(working_model.objective()); + EXPECT_EQ(min_activity, 2 * 0 + 4 * 3 - 2 * 7 - 4 * -2 - 2 * 11); + EXPECT_EQ(max_activity, 2 * 8 + 4 * 3 - 2 * -2 - 4 * -2 - 2 * -4); +} + +TEST(PresolveContextTest, CanonicalizeLinearConstraint) { + Model model; + CpModelProto working_model = ParseTestProto(R"pb( + variables { domain: [ 0, 8 ] } + variables { domain: [ 0, 8 ] } + variables { domain: [ 0, 8 ] } + variables { domain: [ 0, 8 ] } + constraints { + linear { + vars: [ 0, 1, 2, 0, 1 ] + coeffs: [ 2, 4, -2, -4, -2 ] + domain: [ 0, 1000 ] + } + } + )pb"); + PresolveContext context(&model, &working_model, nullptr); + context.InitializeNewDomains(); + + context.CanonicalizeLinearConstraint(working_model.mutable_constraints(0)); + + const ConstraintProto expected = ParseTestProto(R"pb( + linear { + vars: [ 0, 1, 2 ] + coeffs: [ -2, 2, -2 ] + domain: [ 0, 1000 ] + } + )pb"); + EXPECT_THAT(working_model.constraints(0), testing::EqualsProto(expected)); +} + +} // namespace +} // namespace sat +} // namespace operations_research diff --git a/ortools/sat/presolve_util_test.cc b/ortools/sat/presolve_util_test.cc new file mode 100644 index 0000000000..61cf0f2de7 --- /dev/null +++ b/ortools/sat/presolve_util_test.cc @@ -0,0 +1,513 @@ +// Copyright 2010-2024 Google LLC +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "ortools/sat/presolve_util.h" + +#include + +#include +#include +#include + +#include "absl/container/flat_hash_set.h" +#include "absl/random/random.h" +#include "absl/types/span.h" +#include "gtest/gtest.h" +#include "ortools/base/gmock.h" +#include "ortools/base/logging.h" +#include "ortools/base/parse_test_proto.h" +#include "ortools/sat/cp_model.h" +#include "ortools/sat/cp_model.pb.h" +#include "ortools/sat/cp_model_solver.h" +#include "ortools/sat/cp_model_utils.h" +#include "ortools/sat/sat_parameters.pb.h" +#include "ortools/util/sorted_interval_list.h" + +namespace operations_research { +namespace sat { +namespace { + +using ::google::protobuf::contrib::parse_proto::ParseTestProto; +using ::testing::ElementsAre; + +TEST(DomainDeductionsTest, BasicTest) { + DomainDeductions deductions; + + deductions.AddDeduction(0, 3, Domain(0, 4)); + deductions.AddDeduction(1, 3, Domain(1, 8)); + + EXPECT_TRUE(deductions.ProcessClause({0, 1, 2}).empty()); + EXPECT_THAT(deductions.ProcessClause({0, 1}), + ElementsAre(std::make_pair(3, Domain(0, 8)))); + EXPECT_THAT(deductions.ProcessClause({0}), + ElementsAre(std::make_pair(3, Domain(0, 4)))); + EXPECT_THAT(deductions.ProcessClause({1}), + ElementsAre(std::make_pair(3, Domain(1, 8)))); + + deductions.MarkProcessingAsDoneForNow(); + EXPECT_TRUE(deductions.ProcessClause({0}).empty()); + + deductions.AddDeduction(0, 3, Domain(4, 4)); + EXPECT_EQ(deductions.ImpliedDomain(0, 3), Domain(4, 4)); + EXPECT_EQ(deductions.ImpliedDomain(7, 3), Domain::AllValues()); + EXPECT_TRUE(deductions.ProcessClause({1}).empty()); + EXPECT_THAT(deductions.ProcessClause({0}), + ElementsAre(std::make_pair(3, Domain(4, 4)))); + EXPECT_THAT(deductions.ProcessClause({0, 1}), + ElementsAre(std::make_pair(3, Domain(1, 8)))); +} + +TEST(AddLinearConstraintMultiple, BasicTestWithPositiveCoeff) { + ConstraintProto to_modify = ParseTestProto(R"pb( + linear { + vars: [ 0, 1, 2, 3 ] + coeffs: [ 2, 3, 4, 5 ] + domain: [ 0, 10 ] + } + )pb"); + const ConstraintProto to_add = ParseTestProto(R"pb( + linear { + vars: [ 0, 1, 2, 3 ] + coeffs: [ 2, 1, 4, 5 ] + domain: [ 3, 3 ] + } + )pb"); + + EXPECT_TRUE(AddLinearConstraintMultiple(3, to_add, &to_modify)); + const ConstraintProto expected = ParseTestProto(R"pb( + linear { + vars: [ 0, 1, 2, 3 ] + coeffs: [ 8, 6, 16, 20 ] + domain: [ 9, 19 ] + } + )pb"); + EXPECT_THAT(to_modify, testing::EqualsProto(expected)); +} + +TEST(SubstituteVariableTest, BasicTestWithPositiveCoeff) { + ConstraintProto constraint = ParseTestProto(R"pb( + linear { + vars: [ 0, 1, 2, 3 ] + coeffs: [ 2, 3, 4, 5 ] + domain: [ 0, 10 ] + } + )pb"); + const ConstraintProto definition = ParseTestProto(R"pb( + linear { + vars: [ 0, 1, 2, 3 ] + coeffs: [ 2, 1, 4, 5 ] + domain: [ 3, 3 ] + } + )pb"); + + EXPECT_TRUE(SubstituteVariable(1, 1, definition, &constraint)); + + // We have X1 = 3 - 2X0 - 4X2 -5X3 and the coeff of X1 in constraint is 3. + const ConstraintProto expected = ParseTestProto(R"pb( + linear { + vars: [ 0, 2, 3 ] + coeffs: [ -4, -8, -10 ] + domain: [ -9, 1 ] + } + )pb"); + EXPECT_THAT(constraint, testing::EqualsProto(expected)); +} + +TEST(SubstituteVariableTest, BasicTestWithNegativeCoeff) { + ConstraintProto constraint = ParseTestProto(R"pb( + linear { + vars: [ 0, 1, 2, 3 ] + coeffs: [ 2, 3, 4, 5 ] + domain: [ 0, 10 ] + } + )pb"); + const ConstraintProto definition = ParseTestProto(R"pb( + linear { + vars: [ 0, 1, 2, 3 ] + coeffs: [ 2, -1, 4, 5 ] + domain: [ 3, 3 ] + } + )pb"); + + EXPECT_TRUE(SubstituteVariable(1, -1, definition, &constraint)); + + // We have X1 = 2X0 + 4X2 + 5X3 - 3 and the coeff of X1 in constraint is 3. + const ConstraintProto expected = ParseTestProto(R"pb( + linear { + vars: [ 0, 2, 3 ] + coeffs: [ 8, 16, 20 ] + domain: [ 9, 19 ] + } + )pb"); + EXPECT_THAT(constraint, testing::EqualsProto(expected)); +} + +TEST(SubstituteVariableTest, WorkWithDuplicate) { + ConstraintProto constraint = ParseTestProto(R"pb( + linear { + vars: [ 0, 1, 2, 3, 1, 3 ] + coeffs: [ 2, 3, 4, 5, 5, 5 ] + domain: [ 0, 10 ] + } + )pb"); + const ConstraintProto definition = ParseTestProto(R"pb( + linear { + vars: [ 0, 1, 2, 3 ] + coeffs: [ 2, 1, 4, 5 ] + domain: [ 3, 3 ] + } + )pb"); + + EXPECT_TRUE(SubstituteVariable(1, 1, definition, &constraint)); + + // Constraint is actually 2X0 + 7X1 + 4X2 + 10X3 + // Which gives 2X0 + 8(3 - 2X0 - 4X2 -5X3) + 4X2 + 10X3 + const ConstraintProto expected = ParseTestProto(R"pb( + linear { + vars: [ 0, 2, 3 ] + coeffs: [ -14, -28, -30 ] + domain: [ -24, -14 ] + } + )pb"); + EXPECT_THAT(constraint, testing::EqualsProto(expected)); +} + +TEST(SubstituteVariableTest, FalseIfVariableNotThere) { + ConstraintProto constraint = ParseTestProto(R"pb( + linear { + vars: [ 0, 1, 1 ] + coeffs: [ 2, 3, -3 ] + domain: [ 0, 10 ] + } + )pb"); + const ConstraintProto definition = ParseTestProto(R"pb( + linear { + vars: [ 0, 1, 2, 3 ] + coeffs: [ 2, 1, 4, 5 ] + domain: [ 3, 3 ] + } + )pb"); + + EXPECT_FALSE(SubstituteVariable(1, 1, definition, &constraint)); +} + +TEST(ActivityBoundHelperTest, TrivialMaxBound) { + ActivityBoundHelper helper; + + // If there are no amo, we get trivial values + std::vector> conditional; + const int64_t result = + helper.ComputeMaxActivity({{+3, 4}, {-1, -7}, {-3, 5}}, &conditional); + EXPECT_EQ(result, 9); + ASSERT_EQ(conditional.size(), 3); + EXPECT_EQ(conditional[0][0], 5); + EXPECT_EQ(conditional[0][1], 9); + EXPECT_EQ(conditional[1][0], 9); + EXPECT_EQ(conditional[1][1], 2); + EXPECT_EQ(conditional[2][0], 4); + EXPECT_EQ(conditional[2][1], 9); +} + +TEST(ActivityBoundHelperTest, TrivialMinBound) { + ActivityBoundHelper helper; + + // If there are no amo, we get trivial values + std::vector> conditional; + const int64_t result = + helper.ComputeMinActivity({{+3, 4}, {-1, -7}, {-3, 5}}, &conditional); + EXPECT_EQ(result, -7); + ASSERT_EQ(conditional.size(), 3); + EXPECT_EQ(conditional[0][0], -7); + EXPECT_EQ(conditional[0][1], -3); + EXPECT_EQ(conditional[1][0], 0); + EXPECT_EQ(conditional[1][1], -7); + EXPECT_EQ(conditional[2][0], -7); + EXPECT_EQ(conditional[2][1], -2); +} + +TEST(ActivityBoundHelperTest, DisjointAmo) { + ActivityBoundHelper helper; + helper.AddAtMostOne({+1, +2, -3}); + helper.AddAtMostOne({-5, -6, -7}); + + std::vector> conditional; + const int64_t result = helper.ComputeMaxActivity( + {{+1, 4}, {+2, 7}, {-5, 5}, {-6, 6}, {10, 3}}, &conditional); + + // We have a partition [+1, +2] [-5, -6] [10]. + EXPECT_EQ(result, 16); + ASSERT_EQ(conditional.size(), 5); + EXPECT_EQ(conditional[0][0], 16); + EXPECT_EQ(conditional[0][1], 13); + EXPECT_EQ(conditional[1][0], 13); + EXPECT_EQ(conditional[1][1], 16); + + EXPECT_EQ(conditional[2][0], 16); + EXPECT_EQ(conditional[2][1], 15); + EXPECT_EQ(conditional[3][0], 15); + EXPECT_EQ(conditional[3][1], 16); + + EXPECT_EQ(conditional[4][0], 13); + EXPECT_EQ(conditional[4][1], 16); +} + +TEST(ActivityBoundHelperTest, PartitionLiteralsIntoAmo) { + ActivityBoundHelper helper; + helper.AddAtMostOne({+1, +2, -3}); + helper.AddAtMostOne({-5, -6, -7}); + + // The order is not documented, but it actually follow the original order. + std::vector literals({+1, -6, +2, 10, -5}); + EXPECT_THAT( + helper.PartitionLiteralsIntoAmo(literals), + ElementsAre(ElementsAre(+1, +2), ElementsAre(-6, -5), ElementsAre(10))); +} + +TEST(ActivityBoundHelperTest, IsAmo) { + ActivityBoundHelper helper; + helper.AddAtMostOne({+1, +2, -3}); + helper.AddAtMostOne({-5, -6, -7}); + + EXPECT_FALSE(helper.IsAmo({+1, +2, +3})); + EXPECT_FALSE(helper.IsAmo({+1, -5, -6})); + EXPECT_TRUE(helper.IsAmo({+1, -3})); + EXPECT_TRUE(helper.IsAmo({-5, -7})); +} + +// We will compare with CP-SAT on small instances, and make sure bounds are +// correct. +TEST(ActivityBoundHelperTest, RandomTest) { + for (int num_test = 0; num_test < 10; ++num_test) { + absl::BitGen random; + const int num_vars = 10; + const int num_amos = 5; + + // Generate random sat instances. + // These are always feasible. + CpModelBuilder model; + std::vector vars; + for (int i = 0; i < num_vars; ++i) vars.push_back(model.NewBoolVar()); + for (int c = 0; c < num_amos; ++c) { + std::vector amo; + for (int i = 0; i < num_vars; ++i) { + if (absl::Bernoulli(random, 0.5)) { + amo.push_back(vars[i]); + } + } + if (!amo.empty()) model.AddAtMostOne(amo); + } + LinearExpr obj; + std::vector> terms; + for (int i = 0; i < num_vars; ++i) { + const int coeff = absl::Uniform(random, -100, 100); + obj += coeff * vars[i]; + terms.push_back({i, coeff}); + } + model.Maximize(obj); + + // Get Maximum bound. + SatParameters params; + params.set_log_search_progress(false); + params.set_cp_model_presolve(false); + const CpModelProto proto = model.Build(); + const CpSolverResponse response = SolveWithParameters(proto, params); + EXPECT_EQ(response.status(), CpSolverStatus::OPTIMAL); + + // Same with helper + ActivityBoundHelper helper; + helper.AddAllAtMostOnes(proto); + std::vector> conditional_max; + const int64_t max_activity = + helper.ComputeMaxActivity(terms, &conditional_max); + EXPECT_GE(max_activity, response.objective_value()); + LOG(INFO) << response.objective_value() << " " << max_activity; + for (int i = 0; i < conditional_max.size(); ++i) { + // We also know the exact bound for the returned optimal solution. + EXPECT_GE(conditional_max[i][response.solution(i)], + response.objective_value()); + } + } +} + +TEST(ActivityBoundHelperTest, PresolveEnforcement) { + ActivityBoundHelper helper; + helper.AddAtMostOne({+1, +2, +3}); + helper.AddAtMostOne({+4, +5, +6, +7}); + + ConstraintProto ct; + ct.add_enforcement_literal(+1); + ct.add_enforcement_literal(NegatedRef(+2)); + ct.add_enforcement_literal(+6); + + absl::flat_hash_set at_true; + EXPECT_TRUE(helper.PresolveEnforcement({1, 2, 3, 4, 5}, &ct, &at_true)); + + // NegatedRef(+2) is a consequence of +1 (we process in order), so removed. + EXPECT_THAT(ct.enforcement_literal(), ElementsAre(+1, +6)); + EXPECT_TRUE(at_true.contains(+1)); + EXPECT_TRUE(at_true.contains(NegatedRef(+2))); + EXPECT_TRUE(at_true.contains(NegatedRef(+3))); + EXPECT_TRUE(at_true.contains(NegatedRef(+4))); + EXPECT_TRUE(at_true.contains(NegatedRef(+5))); + + // Not in the list, so not contained. + EXPECT_FALSE(at_true.contains(+7)); + EXPECT_FALSE(at_true.contains(NegatedRef(+7))); +} + +// This used to fail because of the degenerate AMO with x and not(x). +TEST(ActivityBoundHelperTest, PresolveEnforcementCornerCase) { + ActivityBoundHelper helper; + helper.AddAtMostOne({+1, -2}); + + ConstraintProto ct; + ct.add_enforcement_literal(+1); + + absl::flat_hash_set at_true; + EXPECT_TRUE(helper.PresolveEnforcement({}, &ct, &at_true)); + EXPECT_THAT(ct.enforcement_literal(), ElementsAre(+1)); +} + +TEST(ClauseWithOneMissingHasherTest, BasicTest) { + absl::BitGen random; + ClauseWithOneMissingHasher hasher(random); + + hasher.RegisterClause(0, {+1, -5, +6, +7}); + hasher.RegisterClause(2, {+1, +7, +6, -4}); + EXPECT_EQ(hasher.HashWithout(0, -5), hasher.HashWithout(2, -4)); + EXPECT_NE(hasher.HashWithout(0, +6), hasher.HashWithout(2, +6)); +} + +// !X1 => X2 + X3 <= 1 +// X1 + X2 <= 1 +// +// when X1 is true, we can see that X2 + X3 <= 1 still stand, so we don't need +// the enforcement. +TEST(ActivityBoundHelper, RemoveEnforcementThatCouldBeLifted) { + ActivityBoundHelper helper; + helper.AddAtMostOne({+1, +2}); + + ConstraintProto ct; + ct.add_enforcement_literal(NegatedRef(1)); + std::vector> terms{{+2, 1}, {+3, 1}}; + + const int num_removed = helper.RemoveEnforcementThatMakesConstraintTrivial( + terms, Domain(0), Domain(0, 1), &ct); + EXPECT_EQ(num_removed, 1); + EXPECT_TRUE(ct.enforcement_literal().empty()); +} + +// !X1 => 2 * X2 + X3 + X4 <= 2 and X1 + X2 + X3 <= 1 +// Note that in this case, if X1 is 1, we have some slack, so we could lift it +// into X1 + 2 * X2 + X3 + X4 <= 2. +// +// But here, we could just extract X2 as an enforcement too, and just have +// X2 => X4 <= 0. This should just be a stronger relaxation. +TEST(ActivityBoundHelper, RemoveEnforcementThatCouldBeLiftedCase2) { + ActivityBoundHelper helper; + helper.AddAtMostOne({+1, +2, +3}); + + ConstraintProto ct; + ct.add_enforcement_literal(NegatedRef(1)); + std::vector> terms{{+2, 2}, {+3, 1}, {+4, 1}}; + + const int num_removed = helper.RemoveEnforcementThatMakesConstraintTrivial( + terms, Domain(0), Domain(0, 2), &ct); + EXPECT_EQ(num_removed, 1); + EXPECT_TRUE(ct.enforcement_literal().empty()); +} + +TEST(ClauseIsEnforcementImpliesLiteralTest, BasicTest) { + EXPECT_TRUE(ClauseIsEnforcementImpliesLiteral( + {+1, -5, +7, -9}, {NegatedRef(+1), NegatedRef(-5), NegatedRef(-9)}, +7)); +} + +LinearConstraintProto GetLinear(std::vector> terms) { + LinearConstraintProto result; + for (const auto [var, coeff] : terms) { + result.add_vars(var); + result.add_coeffs(coeff); + } + return result; +} + +TEST(FindSingleLinearDifferenceTest, TwoDiff1) { + LinearConstraintProto lin1 = GetLinear({{0, 1}, {1, 1}, {2, 1}}); + LinearConstraintProto lin2 = GetLinear({{0, 2}, {1, 1}, {2, 2}}); + int var1, var2; + int64_t coeff1, coeff2; + EXPECT_FALSE( + FindSingleLinearDifference(lin1, lin2, &var1, &coeff1, &var2, &coeff2)); + EXPECT_FALSE( + FindSingleLinearDifference(lin2, lin1, &var1, &coeff1, &var2, &coeff2)); +} + +TEST(FindSingleLinearDifferenceTest, TwoDiff2) { + LinearConstraintProto lin1 = GetLinear({{0, 1}, {1, 1}, {3, 1}}); + LinearConstraintProto lin2 = GetLinear({{0, 2}, {1, 1}, {2, 1}}); + int var1, var2; + int64_t coeff1, coeff2; + EXPECT_FALSE( + FindSingleLinearDifference(lin1, lin2, &var1, &coeff1, &var2, &coeff2)); + EXPECT_FALSE( + FindSingleLinearDifference(lin2, lin1, &var1, &coeff1, &var2, &coeff2)); +} + +TEST(FindSingleLinearDifferenceTest, OkNotSameVariable) { + LinearConstraintProto lin1 = GetLinear({{0, 1}, {1, 1}, {3, 1}}); + LinearConstraintProto lin2 = GetLinear({{0, 1}, {2, 1}, {3, 1}}); + int var1, var2; + int64_t coeff1, coeff2; + EXPECT_TRUE( + FindSingleLinearDifference(lin2, lin1, &var1, &coeff1, &var2, &coeff2)); + EXPECT_TRUE( + FindSingleLinearDifference(lin1, lin2, &var1, &coeff1, &var2, &coeff2)); + EXPECT_EQ(var1, 1); + EXPECT_EQ(coeff1, 1); + EXPECT_EQ(var2, 2); + EXPECT_EQ(coeff2, 1); +} + +TEST(FindSingleLinearDifferenceTest, OkNotSameCoeff) { + LinearConstraintProto lin1 = GetLinear({{0, 1}, {1, 1}, {3, 1}}); + LinearConstraintProto lin2 = GetLinear({{0, 1}, {1, 3}, {3, 1}}); + int var1, var2; + int64_t coeff1, coeff2; + EXPECT_TRUE( + FindSingleLinearDifference(lin2, lin1, &var1, &coeff1, &var2, &coeff2)); + EXPECT_TRUE( + FindSingleLinearDifference(lin1, lin2, &var1, &coeff1, &var2, &coeff2)); + EXPECT_EQ(var1, 1); + EXPECT_EQ(coeff1, 1); + EXPECT_EQ(var2, 1); + EXPECT_EQ(coeff2, 3); +} + +TEST(FindSingleLinearDifferenceTest, OkNotSamePosition) { + LinearConstraintProto lin1 = GetLinear({{0, 1}, {3, 1}, {5, 1}}); + LinearConstraintProto lin2 = GetLinear({{0, 1}, {1, 3}, {3, 1}}); + int var1, var2; + int64_t coeff1, coeff2; + EXPECT_TRUE( + FindSingleLinearDifference(lin2, lin1, &var1, &coeff1, &var2, &coeff2)); + EXPECT_TRUE( + FindSingleLinearDifference(lin1, lin2, &var1, &coeff1, &var2, &coeff2)); + EXPECT_EQ(var1, 5); + EXPECT_EQ(coeff1, 1); + EXPECT_EQ(var2, 1); + EXPECT_EQ(coeff2, 3); +} + +} // namespace +} // namespace sat +} // namespace operations_research diff --git a/ortools/sat/rins_test.cc b/ortools/sat/rins_test.cc new file mode 100644 index 0000000000..e08921c1b1 --- /dev/null +++ b/ortools/sat/rins_test.cc @@ -0,0 +1,173 @@ +// Copyright 2010-2024 Google LLC +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "ortools/sat/rins.h" + +#include +#include + +#include "absl/types/span.h" +#include "gtest/gtest.h" +#include "ortools/base/parse_test_proto.h" +#include "ortools/sat/cp_model.pb.h" +#include "ortools/sat/cp_model_loader.h" +#include "ortools/sat/model.h" +#include "ortools/sat/synchronization.h" +#include "ortools/util/random_engine.h" + +namespace operations_research { +namespace sat { + +namespace { + +using ::google::protobuf::contrib::parse_proto::ParseTestProto; + +TEST(GetRinsRensNeighborhoodTest, GetRENSNeighborhood) { + CpModelProto proto = ParseTestProto(R"pb( + variables { name: 'x' domain: 0 domain: 10 } + variables { name: 'y' domain: 0 domain: 10 } + objective { + vars: [ 0, 1 ] + coeffs: [ 1, 1 ] + } + )pb"); + + Model model; + LoadVariables(proto, /*view_all_booleans_as_integers=*/true, &model); + + SharedLPSolutionRepository lp_solutions(/*num_solutions_to_keep=*/1); + SharedIncompleteSolutionManager incomplete_solutions; + + // No solutions are recorded. + random_engine_t random; + const ReducedDomainNeighborhood empty_rins_neighborhood = + GetRinsRensNeighborhood( + /*response_manager=*/nullptr, &lp_solutions, &incomplete_solutions, + /*difficulty=*/1.0, random); + + EXPECT_EQ(empty_rins_neighborhood.reduced_domain_vars.size(), 0); + EXPECT_EQ(empty_rins_neighborhood.fixed_vars.size(), 0); + + // Add a lp solution. + lp_solutions.NewLPSolution({3.5, 5.0}); + lp_solutions.Synchronize(); + + const ReducedDomainNeighborhood rins_neighborhood = GetRinsRensNeighborhood( + /*response_manager=*/nullptr, &lp_solutions, &incomplete_solutions, + /*difficulty=*/0.5, random); + + EXPECT_EQ(rins_neighborhood.reduced_domain_vars.size(), 1); + EXPECT_EQ(rins_neighborhood.reduced_domain_vars[0].first, 0); + EXPECT_EQ(rins_neighborhood.reduced_domain_vars[0].second.first, 3); + EXPECT_EQ(rins_neighborhood.reduced_domain_vars[0].second.second, 4); + + EXPECT_EQ(rins_neighborhood.fixed_vars.size(), 1); + EXPECT_EQ(rins_neighborhood.fixed_vars[0].first, 1); + EXPECT_EQ(rins_neighborhood.fixed_vars[0].second, 5); +} + +TEST(GetRinsRensNeighborhoodTest, GetRENSNeighborhoodIncomplete) { + CpModelProto proto = ParseTestProto(R"pb( + variables { name: 'x' domain: 0 domain: 10 } + variables { name: 'y' domain: 0 domain: 10 } + objective { + vars: [ 0, 1 ] + coeffs: [ 1, 1 ] + } + )pb"); + + Model model; + LoadVariables(proto, /*view_all_booleans_as_integers=*/true, &model); + + SharedLPSolutionRepository lp_solutions(/*num_solutions_to_keep=*/1); + SharedIncompleteSolutionManager incomplete_solutions; + + // No solutions are recorded. + random_engine_t random; + const ReducedDomainNeighborhood empty_rins_neighborhood = + GetRinsRensNeighborhood( + /*response_manager=*/nullptr, &lp_solutions, &incomplete_solutions, + /*difficulty=*/1.0, random); + + EXPECT_EQ(empty_rins_neighborhood.reduced_domain_vars.size(), 0); + EXPECT_EQ(empty_rins_neighborhood.fixed_vars.size(), 0); + + // Add a incomplete solution. + incomplete_solutions.AddSolution({4.0, 5.0}); + + const ReducedDomainNeighborhood rins_neighborhood = GetRinsRensNeighborhood( + /*response_manager=*/nullptr, &lp_solutions, &incomplete_solutions, + /*difficulty=*/0.0, random); + + EXPECT_EQ(rins_neighborhood.fixed_vars.size(), 2); + const int pos_0 = rins_neighborhood.fixed_vars[0].first == 0 ? 0 : 1; + const int pos_1 = 1 - pos_0; + EXPECT_EQ(rins_neighborhood.fixed_vars[pos_0].first, 0); + EXPECT_EQ(rins_neighborhood.fixed_vars[pos_0].second, 4); + + EXPECT_EQ(rins_neighborhood.fixed_vars[pos_1].first, 1); + EXPECT_EQ(rins_neighborhood.fixed_vars[pos_1].second, 5); +} + +TEST(GetRinsRensNeighborhoodTest, GetRinsRensNeighborhoodLP) { + const CpModelProto proto = ParseTestProto(R"pb( + variables { name: 'x' domain: 0 domain: 10 } + variables { name: 'y' domain: 0 domain: 10 } + objective { + vars: [ 0, 1 ] + coeffs: [ 1, 1 ] + } + )pb"); + + Model model; + LoadVariables(proto, /*view_all_booleans_as_integers=*/true, &model); + + auto* shared_response_manager = model.GetOrCreate(); + shared_response_manager->InitializeObjective(proto); + SharedLPSolutionRepository lp_solutions(/*num_solutions_to_keep=*/1); + SharedIncompleteSolutionManager incomplete_solutions; + + // No solutions are recorded. + random_engine_t random; + const ReducedDomainNeighborhood empty_rins_neighborhood = + GetRinsRensNeighborhood(shared_response_manager, &lp_solutions, + &incomplete_solutions, + /*difficulty=*/0.5, random); + + EXPECT_EQ(empty_rins_neighborhood.reduced_domain_vars.size(), 0); + EXPECT_EQ(empty_rins_neighborhood.fixed_vars.size(), 0); + + // Add a lp solution. + lp_solutions.NewLPSolution({3.5, 5}); + lp_solutions.Synchronize(); + // Add a solution. + CpSolverResponse solution; + solution.add_solution(4); + solution.add_solution(5); + shared_response_manager->NewSolution(solution.solution(), + solution.solution_info(), &model); + shared_response_manager->MutableSolutionsRepository()->Synchronize(); + + const ReducedDomainNeighborhood rins_neighborhood = GetRinsRensNeighborhood( + shared_response_manager, &lp_solutions, &incomplete_solutions, + /*difficulty=*/0.5, random); + + EXPECT_EQ(rins_neighborhood.reduced_domain_vars.size(), 0); + EXPECT_EQ(rins_neighborhood.fixed_vars.size(), 1); + EXPECT_EQ(rins_neighborhood.fixed_vars[0].first, 1); + EXPECT_EQ(rins_neighborhood.fixed_vars[0].second, 5); +} + +} // namespace +} // namespace sat +} // namespace operations_research diff --git a/ortools/sat/table_test.cc b/ortools/sat/table_test.cc new file mode 100644 index 0000000000..04555f6524 --- /dev/null +++ b/ortools/sat/table_test.cc @@ -0,0 +1,603 @@ +// Copyright 2010-2024 Google LLC +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "ortools/sat/table.h" + +#include +#include +#include + +#include "absl/container/btree_set.h" +#include "absl/types/span.h" +#include "gtest/gtest.h" +#include "ortools/base/container_logging.h" +#include "ortools/base/gmock.h" +#include "ortools/base/logging.h" +#include "ortools/base/parse_test_proto.h" +#include "ortools/sat/cp_model.h" +#include "ortools/sat/cp_model.pb.h" +#include "ortools/sat/cp_model_solver.h" +#include "ortools/sat/integer.h" +#include "ortools/sat/model.h" +#include "ortools/sat/sat_base.h" +#include "ortools/sat/sat_parameters.pb.h" +#include "ortools/sat/sat_solver.h" + +namespace operations_research { +namespace sat { +namespace { + +using ::google::protobuf::contrib::parse_proto::ParseTestProto; + +TEST(TableConstraintTest, EnumerationAndEncoding) { + const CpModelProto model_proto = ParseTestProto(R"pb( + variables { + name: "X1" + domain: [ 0, 4 ] + } + variables { + name: "X3" + domain: [ 0, 4 ] + } + variables { + name: "X2" + domain: [ 0, 4 ] + } + variables { + name: "X4" + domain: [ 0, 4 ] + } + constraints { table { vars: 0 vars: 2 values: 0 values: 1 } } + constraints { table { vars: 1 vars: 3 values: 4 values: 0 } } + constraints { table { vars: 2 vars: 1 values: 1 values: 4 } } + )pb"); + + Model model; + model.Add(NewSatParameters("enumerate_all_solutions:true")); + int count = 0; + model.Add( + NewFeasibleSolutionObserver([&count](const CpSolverResponse& response) { + LOG(INFO) << gtl::LogContainer(response.solution()); + ++count; + })); + const CpSolverResponse response = SolveCpModel(model_proto, &model); + EXPECT_EQ(response.status(), CpSolverStatus::OPTIMAL); + + // There should be just one solution [0, 4, 1, 0], but the solver used to + // report more because of extra "free" variable used in the encoding. + EXPECT_EQ(count, 1); +} + +TEST(TableConstraintTest, EnumerationAndEncodingTwoVars) { + const CpModelProto model_proto = ParseTestProto(R"pb( + variables { + name: "X1" + domain: [ 0, 4 ] + } + variables { + name: "X3" + domain: [ 0, 4 ] + } + constraints { + table { + vars: [ 0, 1 ] + values: [ 0, 0, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4 ] + } + } + )pb"); + + Model model; + model.Add(NewSatParameters("enumerate_all_solutions:true")); + int count = 0; + model.Add( + NewFeasibleSolutionObserver([&count](const CpSolverResponse& response) { + LOG(INFO) << gtl::LogContainer(response.solution()); + ++count; + })); + const CpSolverResponse response = SolveCpModel(model_proto, &model); + EXPECT_EQ(response.status(), CpSolverStatus::OPTIMAL); + EXPECT_EQ(count, 7); +} + +TEST(TableConstraintTest, EnumerationAndEncodingFullPrefix) { + const CpModelProto model_proto = ParseTestProto(R"pb( + variables { domain: [ 0, 2 ] } + variables { domain: [ 0, 2 ] } + variables { domain: [ 0, 2 ] } + constraints { + table { + vars: [ 0, 1, 2 ] + values: [ + 0, 0, 0, 0, 1, 1, 0, 2, 2, 1, 0, 1, 1, 1, + 2, 1, 2, 0, 2, 0, 2, 2, 1, 0, 2, 2, 1 + ] + } + } + )pb"); + + Model model; + model.Add(NewSatParameters("enumerate_all_solutions:true")); + int count = 0; + model.Add( + NewFeasibleSolutionObserver([&count](const CpSolverResponse& response) { + LOG(INFO) << gtl::LogContainer(response.solution()); + ++count; + })); + const CpSolverResponse response = SolveCpModel(model_proto, &model); + EXPECT_EQ(response.status(), CpSolverStatus::OPTIMAL); + + EXPECT_EQ(count, 9); +} + +TEST(TableConstraintTest, EnumerationAndEncodingPartialPrefix) { + const CpModelProto model_proto = ParseTestProto(R"pb( + variables { domain: [ 0, 2 ] } + variables { domain: [ 0, 2 ] } + variables { domain: [ 0, 2 ] } + constraints { + table { + vars: [ 0, 1, 2 ] + values: [ + 0, 0, 0, 0, 2, 2, 1, 0, 1, 1, 1, 2, 1, 2, 0, 2, 0, 2, 2, 1, 0 + ] + } + } + )pb"); + + Model model; + model.Add(NewSatParameters("enumerate_all_solutions:true")); + int count = 0; + model.Add( + NewFeasibleSolutionObserver([&count](const CpSolverResponse& response) { + LOG(INFO) << gtl::LogContainer(response.solution()); + ++count; + })); + const CpSolverResponse response = SolveCpModel(model_proto, &model); + EXPECT_EQ(response.status(), CpSolverStatus::OPTIMAL); + + EXPECT_EQ(count, 7); +} + +TEST(TableConstraintTest, EnumerationAndEncodingInvalidTuples) { + const CpModelProto model_proto = ParseTestProto(R"pb( + variables { domain: [ 0, 2 ] } + variables { domain: [ 0, 2 ] } + variables { domain: [ 0, 2 ] } + constraints { + table { + vars: [ 0, 1, 2 ] + values: [ + 0, 0, 4, 0, 2, 2, 1, 0, 1, 1, 1, 2, 1, 2, 0, 2, 0, 2, 2, 1, 4 + ] + } + } + )pb"); + + Model model; + model.Add(NewSatParameters("enumerate_all_solutions:true")); + int count = 0; + model.Add( + NewFeasibleSolutionObserver([&count](const CpSolverResponse& response) { + LOG(INFO) << gtl::LogContainer(response.solution()); + ++count; + })); + const CpSolverResponse response = SolveCpModel(model_proto, &model); + EXPECT_EQ(response.status(), CpSolverStatus::OPTIMAL); + + // There should be exactly one solution per valid tuple. + EXPECT_EQ(count, 5); +} + +TEST(TableConstraintTest, EnumerationAndEncodingOneTupleWithAny) { + const CpModelProto model_proto = ParseTestProto(R"pb( + variables { domain: [ 0, 3 ] } + variables { domain: [ 0, 3 ] } + variables { domain: [ 0, 3 ] } + constraints { + table { + vars: [ 0, 1, 2 ] + values: [ 1, 0, 2, 1, 1, 2, 1, 2, 2 ] + } + } + )pb"); + + Model model; + model.Add(NewSatParameters("enumerate_all_solutions:true")); + int count = 0; + model.Add( + NewFeasibleSolutionObserver([&count](const CpSolverResponse& response) { + LOG(INFO) << gtl::LogContainer(response.solution()); + ++count; + })); + const CpSolverResponse response = SolveCpModel(model_proto, &model); + EXPECT_EQ(response.status(), CpSolverStatus::OPTIMAL); + + EXPECT_EQ(count, 3); +} + +TEST(TableConstraintTest, EnumerationAndEncodingPrefixWithLargeNegatedPart) { + const CpModelProto model_proto = ParseTestProto(R"pb( + variables { domain: [ 0, 5 ] } + variables { domain: [ 0, 5 ] } + variables { domain: [ 0, 5 ] } + constraints { + table { + vars: [ 0, 1, 2 ] + values: [ 0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5 ] + } + } + )pb"); + + Model model; + model.Add(NewSatParameters("enumerate_all_solutions:true")); + int count = 0; + model.Add( + NewFeasibleSolutionObserver([&count](const CpSolverResponse& response) { + LOG(INFO) << gtl::LogContainer(response.solution()); + ++count; + })); + const CpSolverResponse response = SolveCpModel(model_proto, &model); + EXPECT_EQ(response.status(), CpSolverStatus::OPTIMAL); + + EXPECT_EQ(count, 6); +} + +TEST(TableConstraintTest, UnsatTable) { + const CpModelProto model_proto = ParseTestProto(R"pb( + variables { domain: [ 0, 4 ] } + variables { domain: [ 5, 9 ] } + constraints { table { vars: 0 vars: 1 values: 3 values: 3 } } + )pb"); + + Model model; + model.Add(NewSatParameters("cp_model_presolve:false")); + const CpSolverResponse response = SolveCpModel(model_proto, &model); + EXPECT_EQ(response.status(), CpSolverStatus::INFEASIBLE); +} + +TEST(NegatedTableConstraintTest, BasicTest) { + CpModelBuilder cp_model; + std::vector vars; + vars.push_back(cp_model.NewIntVar({1, 2})); + vars.push_back(cp_model.NewIntVar({1, 3})); + vars.push_back(cp_model.NewIntVar({1, 3})); + + TableConstraint table = cp_model.AddForbiddenAssignments(vars); + table.AddTuple({1, 2, 1}); + table.AddTuple({1, 2, 3}); + table.AddTuple({2, 2, 1}); + + Model model; + absl::btree_set> solutions; + model.Add(NewFeasibleSolutionObserver([&](const CpSolverResponse& r) { + std::vector solution; + for (const IntVar var : vars) { + solution.push_back(SolutionIntegerValue(r, var)); + } + solutions.insert(solution); + })); + + // Tell the solver to enumerate all solutions. + SatParameters parameters; + parameters.set_enumerate_all_solutions(true); + model.Add(NewSatParameters(parameters)); + const CpSolverResponse response = SolveCpModel(cp_model.Build(), &model); + + absl::btree_set> expected{{1, 1, 1}, + {1, 1, 2}, + {1, 1, 3}, + // {1, 2, 1}, + {1, 2, 2}, + // {1, 2, 3}, + {1, 3, 1}, + {1, 3, 2}, + {1, 3, 3}, + {2, 1, 1}, + {2, 1, 2}, + {2, 1, 3}, + // {2, 2, 1}, + {2, 2, 2}, + {2, 2, 3}, + {2, 3, 1}, + {2, 3, 2}, + {2, 3, 3}}; + EXPECT_EQ(solutions, expected); +} + +TEST(AutomatonTest, TestAutomaton) { + const int kNumVars = 4; + CpModelBuilder cp_model; + std::vector variables; + for (int i = 0; i < kNumVars; ++i) { + variables.push_back(IntVar(cp_model.NewBoolVar())); + } + + AutomatonConstraint automaton = cp_model.AddAutomaton(variables, 0, {3}); + automaton.AddTransition(0, 1, 0L); + automaton.AddTransition(0, 2, 1L); + automaton.AddTransition(1, 1, 0L); + automaton.AddTransition(2, 2, 1L); + automaton.AddTransition(1, 3, 1L); + automaton.AddTransition(2, 3, 0L); + const CpModelProto expected_model = ParseTestProto(R"pb( + variables { domain: 0 domain: 1 } + variables { domain: 0 domain: 1 } + variables { domain: 0 domain: 1 } + variables { domain: 0 domain: 1 } + constraints { + automaton { + final_states: 3 + transition_tail: 0 + transition_tail: 0 + transition_tail: 1 + transition_tail: 2 + transition_tail: 1 + transition_tail: 2 + transition_head: 1 + transition_head: 2 + transition_head: 1 + transition_head: 2 + transition_head: 3 + transition_head: 3 + transition_label: 0 + transition_label: 1 + transition_label: 0 + transition_label: 1 + transition_label: 1 + transition_label: 0 + vars: 0 + vars: 1 + vars: 2 + vars: 3 + } + } + )pb"); + EXPECT_THAT(cp_model.Proto(), testing::EqualsProto(expected_model)); + + Model model; + int num_solutions = 0; + model.Add(NewFeasibleSolutionObserver([&](const CpSolverResponse& r) { + num_solutions++; + EXPECT_EQ(r.solution(0), r.solution(1)); + EXPECT_EQ(r.solution(0), r.solution(2)); + EXPECT_NE(r.solution(0), r.solution(3)); + })); + + // Tell the solver to enumerate all solutions. + SatParameters parameters; + parameters.set_enumerate_all_solutions(true); + model.Add(NewSatParameters(parameters)); + + SolveCpModel(cp_model.Build(), &model); + EXPECT_EQ(num_solutions, 2); +} + +TEST(AutomatonTest, LoopingAutomatonMultipleFinalStates) { + CpModelBuilder cp_model; + std::vector variables; + for (int i = 0; i < 10; ++i) { + variables.push_back(cp_model.NewIntVar({0, 10})); + } + + // These tuples accept "0*(12)+0*". + AutomatonConstraint automaton = cp_model.AddAutomaton(variables, 1, {3, 4}); + automaton.AddTransition(1, 1, 0); + automaton.AddTransition(1, 2, 1); + automaton.AddTransition(2, 3, 2); + automaton.AddTransition(3, 2, 1); + automaton.AddTransition(3, 4, 0); + automaton.AddTransition(4, 4, 0); + + Model model; + absl::btree_set> solutions; + model.Add(NewFeasibleSolutionObserver([&](const CpSolverResponse& r) { + std::vector solution; + for (const IntVar var : variables) { + solution.push_back(SolutionIntegerValue(r, var)); + } + solutions.insert(solution); + })); + + // Tell the solver to enumerate all solutions. + SatParameters parameters; + parameters.set_enumerate_all_solutions(true); + model.Add(NewSatParameters(parameters)); + const CpSolverResponse response = SolveCpModel(cp_model.Build(), &model); + + absl::btree_set> expected{ + {0, 0, 0, 0, 0, 0, 0, 0, 1, 2}, {0, 0, 0, 0, 0, 0, 0, 1, 2, 0}, + {0, 0, 0, 0, 0, 0, 1, 2, 0, 0}, {0, 0, 0, 0, 0, 0, 1, 2, 1, 2}, + {0, 0, 0, 0, 0, 1, 2, 0, 0, 0}, {0, 0, 0, 0, 0, 1, 2, 1, 2, 0}, + {0, 0, 0, 0, 1, 2, 0, 0, 0, 0}, {0, 0, 0, 0, 1, 2, 1, 2, 0, 0}, + {0, 0, 0, 0, 1, 2, 1, 2, 1, 2}, {0, 0, 0, 1, 2, 0, 0, 0, 0, 0}, + {0, 0, 0, 1, 2, 1, 2, 0, 0, 0}, {0, 0, 0, 1, 2, 1, 2, 1, 2, 0}, + {0, 0, 1, 2, 0, 0, 0, 0, 0, 0}, {0, 0, 1, 2, 1, 2, 0, 0, 0, 0}, + {0, 0, 1, 2, 1, 2, 1, 2, 0, 0}, {0, 0, 1, 2, 1, 2, 1, 2, 1, 2}, + {0, 1, 2, 0, 0, 0, 0, 0, 0, 0}, {0, 1, 2, 1, 2, 0, 0, 0, 0, 0}, + {0, 1, 2, 1, 2, 1, 2, 0, 0, 0}, {0, 1, 2, 1, 2, 1, 2, 1, 2, 0}, + {1, 2, 0, 0, 0, 0, 0, 0, 0, 0}, {1, 2, 1, 2, 0, 0, 0, 0, 0, 0}, + {1, 2, 1, 2, 1, 2, 0, 0, 0, 0}, {1, 2, 1, 2, 1, 2, 1, 2, 0, 0}, + {1, 2, 1, 2, 1, 2, 1, 2, 1, 2}}; + EXPECT_EQ(solutions, expected); +} + +TEST(AutomatonTest, NonogramRule) { + CpModelBuilder cp_model; + std::vector variables; + for (int i = 0; i < 10; ++i) { + variables.push_back(cp_model.NewIntVar({0, 10})); + } + + // Accept sequences with 3 '1', then 2 '1', then 1 '1', separated by at least + // one '0'. + AutomatonConstraint automaton = cp_model.AddAutomaton(variables, 1, {9}); + automaton.AddTransition(1, 1, 0); + automaton.AddTransition(1, 2, 1); + automaton.AddTransition(2, 3, 1); + automaton.AddTransition(3, 4, 1); + automaton.AddTransition(4, 5, 0); + automaton.AddTransition(5, 5, 0); + automaton.AddTransition(5, 6, 1); + automaton.AddTransition(6, 7, 1); + automaton.AddTransition(7, 8, 0); + automaton.AddTransition(8, 8, 0); + automaton.AddTransition(8, 9, 1); + automaton.AddTransition(9, 9, 0); + + Model model; + absl::btree_set> solutions; + model.Add(NewFeasibleSolutionObserver([&](const CpSolverResponse& r) { + std::vector solution; + for (const IntVar var : variables) { + solution.push_back(SolutionIntegerValue(r, var)); + } + solutions.insert(solution); + })); + + // Tell the solver to enumerate all solutions. + SatParameters parameters; + parameters.set_enumerate_all_solutions(true); + model.Add(NewSatParameters(parameters)); + const CpSolverResponse response = SolveCpModel(cp_model.Build(), &model); + + absl::btree_set> expected{ + {0, 0, 1, 1, 1, 0, 1, 1, 0, 1}, {0, 1, 1, 1, 0, 0, 1, 1, 0, 1}, + {0, 1, 1, 1, 0, 1, 1, 0, 0, 1}, {0, 1, 1, 1, 0, 1, 1, 0, 1, 0}, + {1, 1, 1, 0, 0, 0, 1, 1, 0, 1}, {1, 1, 1, 0, 0, 1, 1, 0, 0, 1}, + {1, 1, 1, 0, 0, 1, 1, 0, 1, 0}, {1, 1, 1, 0, 1, 1, 0, 0, 0, 1}, + {1, 1, 1, 0, 1, 1, 0, 0, 1, 0}, {1, 1, 1, 0, 1, 1, 0, 1, 0, 0}}; + EXPECT_EQ(solutions, expected); +} + +TEST(AutomatonTest, AnotherAutomaton) { + CpModelBuilder cp_model; + std::vector variables; + for (int i = 0; i < 7; ++i) { + variables.push_back(cp_model.NewIntVar({0, 10})); + } + + AutomatonConstraint automaton = + cp_model.AddAutomaton(variables, 1, {1, 2, 3, 4, 5, 6, 7}); + automaton.AddTransition(1, 2, 1); + automaton.AddTransition(1, 5, 2); + automaton.AddTransition(2, 3, 1); + automaton.AddTransition(2, 5, 2); + automaton.AddTransition(3, 4, 1); + automaton.AddTransition(3, 5, 2); + automaton.AddTransition(4, 0, 1); + automaton.AddTransition(4, 5, 2); + automaton.AddTransition(5, 2, 1); + automaton.AddTransition(5, 6, 2); + automaton.AddTransition(6, 2, 1); + automaton.AddTransition(6, 7, 2); + automaton.AddTransition(7, 2, 1); + automaton.AddTransition(7, 0, 2); + + Model model; + absl::btree_set> solutions; + model.Add(NewFeasibleSolutionObserver([&](const CpSolverResponse& r) { + std::vector solution; + for (const IntVar var : variables) { + solution.push_back(SolutionIntegerValue(r, var)); + } + solutions.insert(solution); + })); + + // Tell the solver to enumerate all solutions. + SatParameters parameters; + parameters.set_enumerate_all_solutions(true); + parameters.set_log_search_progress(true); + model.Add(NewSatParameters(parameters)); + const CpSolverResponse response = SolveCpModel(cp_model.Build(), &model); + + // Out of the 2**7 tuples, the one that contains 4 consecutive 1 are: + // - 1111??? (8) + // - 21111?? (4) + // - ?21111? (4) + // - ??21111 (4) + EXPECT_EQ(solutions.size(), 128 - 2 * 20); +} + +TEST(LiteralTableConstraint, PropagationFromLiterals) { + Model model; + std::vector selected; + for (int i = 0; i < 4; ++i) { + selected.push_back(Literal(model.Add(NewBooleanVariable()), true)); + } + std::vector> literals(3); + for (int i = 0; i < 3; ++i) { + for (int j = 0; j < 3; ++j) { + literals[i].push_back(Literal(model.Add(NewBooleanVariable()), true)); + } + model.Add(ExactlyOneConstraint(literals[i])); + } + + // Tuples (0, 0, 0), (1, 1, 1), (2, 2, 2), (0, 1, 2). + std::vector> tuples = { + {literals[0][0], literals[1][0], literals[2][0]}, + {literals[0][1], literals[1][1], literals[2][1]}, + {literals[0][2], literals[1][2], literals[2][2]}, + {literals[0][0], literals[1][1], literals[2][2]}}; + + model.Add(LiteralTableConstraint(tuples, selected)); + SatSolver* sat_solver = model.GetOrCreate(); + + EXPECT_TRUE(sat_solver->EnqueueDecisionIfNotConflicting(literals[0][0])); + EXPECT_TRUE(sat_solver->Propagate()); + EXPECT_TRUE(sat_solver->Assignment().LiteralIsFalse(selected[1])); + EXPECT_TRUE(sat_solver->Assignment().LiteralIsFalse(selected[2])); + + EXPECT_TRUE(sat_solver->EnqueueDecisionIfNotConflicting(literals[1][1])); + EXPECT_TRUE(sat_solver->Propagate()); + EXPECT_TRUE(sat_solver->Assignment().LiteralIsFalse(selected[0])); + EXPECT_TRUE(sat_solver->Assignment().LiteralIsTrue(selected[3])); +} + +TEST(LiteralTableConstraint, PropagationFromSelected) { + Model model; + std::vector selected; + for (int i = 0; i < 4; ++i) { + selected.push_back(Literal(model.Add(NewBooleanVariable()), true)); + } + std::vector> literals(3); + for (int i = 0; i < 3; ++i) { + for (int j = 0; j < 3; ++j) { + literals[i].push_back(Literal(model.Add(NewBooleanVariable()), true)); + } + model.Add(ExactlyOneConstraint(literals[i])); + } + + // Tuples (0, 0, 0), (1, 1, 1), (2, 2, 2), (0, 1, 2). + std::vector> tuples = { + {literals[0][0], literals[1][0], literals[2][0]}, + {literals[0][1], literals[1][1], literals[2][1]}, + {literals[0][2], literals[1][2], literals[2][2]}, + {literals[0][0], literals[1][1], literals[2][2]}}; + + model.Add(LiteralTableConstraint(tuples, selected)); + Trail* trail = model.GetOrCreate(); + SatSolver* sat_solver = model.GetOrCreate(); + + trail->EnqueueSearchDecision(selected[1].Negated()); + EXPECT_TRUE(sat_solver->Propagate()); + EXPECT_TRUE(sat_solver->Assignment().LiteralIsFalse(literals[0][1])); + EXPECT_TRUE(sat_solver->Assignment().LiteralIsFalse(literals[2][1])); + + trail->EnqueueSearchDecision(selected[3].Negated()); + EXPECT_TRUE(sat_solver->Propagate()); + EXPECT_TRUE(sat_solver->Assignment().LiteralIsFalse(literals[1][1])); +} + +} // namespace +} // namespace sat +} // namespace operations_research diff --git a/ortools/sat/var_domination_test.cc b/ortools/sat/var_domination_test.cc new file mode 100644 index 0000000000..b1c6399782 --- /dev/null +++ b/ortools/sat/var_domination_test.cc @@ -0,0 +1,614 @@ +// Copyright 2010-2024 Google LLC +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "ortools/sat/var_domination.h" + +#include + +#include "gtest/gtest.h" +#include "ortools/base/gmock.h" +#include "ortools/base/parse_test_proto.h" +#include "ortools/sat/cp_model.pb.h" +#include "ortools/sat/integer.h" +#include "ortools/sat/model.h" +#include "ortools/sat/presolve_context.h" +#include "ortools/util/sorted_interval_list.h" + +namespace operations_research { +namespace sat { +namespace { + +using ::google::protobuf::contrib::parse_proto::ParseTestProto; +using ::testing::ElementsAre; +using ::testing::IsEmpty; +using ::testing::UnorderedElementsAre; + +// X + 2Y + Z = 0 +// X + 2Z >= 2 +// +// Doing (X--, Z++) is always beneficial if possible. +TEST(VarDominationTest, BasicExample1) { + CpModelProto model_proto = ParseTestProto(R"pb( + variables { + name: "X" + domain: [ -10, 10 ] + } + variables { + name: "Y" + domain: [ -10, 10 ] + } + variables { + name: "Z" + domain: [ -10, 10 ] + } + constraints { + linear { + vars: [ 0, 1, 2 ] + coeffs: [ 1, 2, 1 ] + domain: [ 0, 0 ] + } + } + constraints { + linear { + vars: [ 0, 2 ] + coeffs: [ 1, 2 ] + domain: [ 2, 9223372036854775807 ] + } + } + )pb"); + VarDomination var_dom; + Model model; + PresolveContext context(&model, &model_proto, nullptr); + context.InitializeNewDomains(); + context.ReadObjectiveFromProto(); + ScanModelForDominanceDetection(context, &var_dom); + + const IntegerVariable X = VarDomination::RefToIntegerVariable(0); + const IntegerVariable Y = VarDomination::RefToIntegerVariable(1); + const IntegerVariable Z = VarDomination::RefToIntegerVariable(2); + EXPECT_THAT(var_dom.DominatingVariables(X), ElementsAre(Z)); + EXPECT_THAT(var_dom.DominatingVariables(Y), IsEmpty()); + EXPECT_THAT(var_dom.DominatingVariables(Z), IsEmpty()); + EXPECT_THAT(var_dom.DominatingVariables(NegationOf(X)), IsEmpty()); + EXPECT_THAT(var_dom.DominatingVariables(NegationOf(Y)), IsEmpty()); + EXPECT_THAT(var_dom.DominatingVariables(NegationOf(Z)), + ElementsAre(NegationOf(X))); +} + +// X + 2Y + Z = 0 +// X + 2Z >= 2 +// +// Doing (X--, Z++) is always beneficial if possible. +TEST(VarDominationTest, ExploitDominanceRelation) { + CpModelProto model_proto = ParseTestProto(R"pb( + variables { + name: "X" + domain: [ -10, 10 ] + } + variables { + name: "Y" + domain: [ -10, 10 ] + } + variables { + name: "Z" + domain: [ -10, 10 ] + } + constraints { + linear { + vars: [ 0, 1, 2 ] + coeffs: [ 1, 2, 1 ] + domain: [ 0, 0 ] + } + } + constraints { + linear { + vars: [ 0, 2 ] + coeffs: [ 1, 2 ] + domain: [ 2, 9223372036854775807 ] + } + } + )pb"); + VarDomination var_dom; + Model model; + PresolveContext context(&model, &model_proto, nullptr); + context.InitializeNewDomains(); + context.ReadObjectiveFromProto(); + context.UpdateNewConstraintsVariableUsage(); + ScanModelForDominanceDetection(context, &var_dom); + EXPECT_TRUE(ExploitDominanceRelations(var_dom, &context)); + + // Because X--, Z++ is always ok, we can exclude some value from Z using + // equation X + 2Z >=2 we see that if Z=5, X >= -8, so we can decrease it, + // but for Z = 6, X might be -10, so we are not sure. + // + // Also not that X can be 10 with Z at 10 too, so we cannot reduced the domain + // of X. + EXPECT_EQ(context.DomainOf(0).ToString(), "[-10,10]"); + EXPECT_EQ(context.DomainOf(1).ToString(), "[-10,10]"); + EXPECT_EQ(context.DomainOf(2).ToString(), "[6,10]"); +} + +// Same example as before but now Z has holes, which complicate a bit the +// final result. +TEST(VarDominationTest, ExploitDominanceRelationWithHoles) { + CpModelProto model_proto = ParseTestProto(R"pb( + variables { + name: "X" + domain: [ -10, 10 ] + } + variables { + name: "Y" + domain: [ -10, 10 ] + } + variables { + name: "Z" + domain: [ -10, 0, 7, 10 ] + } + constraints { + linear { + vars: [ 0, 1, 2 ] + coeffs: [ 1, 2, 1 ] + domain: [ 0, 0 ] + } + } + constraints { + linear { + vars: [ 0, 2 ] + coeffs: [ 1, 2 ] + domain: [ 2, 9223372036854775807 ] + } + } + )pb"); + VarDomination var_dom; + Model model; + PresolveContext context(&model, &model_proto, nullptr); + context.InitializeNewDomains(); + context.ReadObjectiveFromProto(); + context.UpdateNewConstraintsVariableUsage(); + ScanModelForDominanceDetection(context, &var_dom); + EXPECT_TRUE(ExploitDominanceRelations(var_dom, &context)); + + // With hole, if Z is 0, we will not be able to increase it up to 6, so we + // can't remove 0. If it is lower, we can safely increase it to zero though. + EXPECT_EQ(context.DomainOf(0).ToString(), "[-10,10]"); + EXPECT_EQ(context.DomainOf(1).ToString(), "[-10,10]"); + EXPECT_EQ(context.DomainOf(2).ToString(), "[0][7,10]"); +} + +// X + Y + Z = 0 +// X + 2 Z >= 2 +TEST(VarDominationTest, BasicExample1Variation) { + CpModelProto model_proto = ParseTestProto(R"pb( + variables { + name: "X" + domain: [ -10, 10 ] + } + variables { + name: "Y" + domain: [ -10, 10 ] + } + variables { + name: "Z" + domain: [ -10, 10 ] + } + constraints { + linear { + vars: [ 0, 1, 2 ] + coeffs: [ 1, 1, 1 ] + domain: [ 0, 0 ] + } + } + constraints { + linear { + vars: [ 0, 2 ] + coeffs: [ 1, 2 ] + domain: [ 2, 9223372036854775807 ] + } + } + )pb"); + VarDomination var_dom; + Model model; + PresolveContext context(&model, &model_proto, nullptr); + context.InitializeNewDomains(); + context.ReadObjectiveFromProto(); + ScanModelForDominanceDetection(context, &var_dom); + + const IntegerVariable X = VarDomination::RefToIntegerVariable(0); + const IntegerVariable Y = VarDomination::RefToIntegerVariable(1); + const IntegerVariable Z = VarDomination::RefToIntegerVariable(2); + EXPECT_THAT(var_dom.DominatingVariables(X), ElementsAre(Z)); + EXPECT_THAT(var_dom.DominatingVariables(Y), UnorderedElementsAre(X, Z)); + EXPECT_THAT(var_dom.DominatingVariables(Z), IsEmpty()); + + // TODO(user): Transpose is broken. + EXPECT_THAT(var_dom.DominatingVariables(NegationOf(X)), + ElementsAre(NegationOf(Y))); + EXPECT_THAT(var_dom.DominatingVariables(NegationOf(Y)), IsEmpty()); + EXPECT_THAT(var_dom.DominatingVariables(NegationOf(Z)), + UnorderedElementsAre(NegationOf(X), NegationOf(Y))); +} + +// X + Y + Z >= 0 +// Y + Z <= 0 +TEST(VarDominationTest, BasicExample2) { + CpModelProto model_proto = ParseTestProto(R"pb( + variables { + name: "X" + domain: [ -10, 10 ] + } + variables { + name: "Y" + domain: [ -10, 10 ] + } + variables { + name: "Z" + domain: [ -10, 10 ] + } + constraints { + linear { + vars: [ 0, 1, 2 ] + coeffs: [ 1, 1, 1 ] + domain: [ 0, 9223372036854775807 ] + } + } + constraints { + linear { + vars: [ 1, 2 ] + coeffs: [ 1, 1 ] + domain: [ -9223372036854775808, 0 ] + } + } + )pb"); + VarDomination var_dom; + Model model; + PresolveContext context(&model, &model_proto, nullptr); + context.InitializeNewDomains(); + context.ReadObjectiveFromProto(); + ScanModelForDominanceDetection(context, &var_dom); + + const IntegerVariable X = VarDomination::RefToIntegerVariable(0); + const IntegerVariable Y = VarDomination::RefToIntegerVariable(1); + const IntegerVariable Z = VarDomination::RefToIntegerVariable(2); + + EXPECT_FALSE(var_dom.CanFreelyDecrease(X)); + EXPECT_THAT(var_dom.DominatingVariables(X), IsEmpty()); + EXPECT_TRUE(var_dom.CanFreelyDecrease(NegationOf(X))); + + // We do not include X in these lists, because X++ can always happen. + EXPECT_THAT(var_dom.DominatingVariables(Y), UnorderedElementsAre(Z)); + EXPECT_THAT(var_dom.DominatingVariables(Z), UnorderedElementsAre(Y)); + EXPECT_THAT(var_dom.DominatingVariables(NegationOf(Y)), + ElementsAre(NegationOf(Z))); + EXPECT_THAT(var_dom.DominatingVariables(NegationOf(Z)), + ElementsAre(NegationOf(Y))); +} + +// X + Y <= 0 +// Y + Z <= 0 +TEST(VarDominationTest, BasicExample3) { + CpModelProto model_proto = ParseTestProto(R"pb( + variables { + name: "X" + domain: [ -10, 10 ] + } + variables { + name: "Y" + domain: [ -10, 10 ] + } + variables { + name: "Z" + domain: [ -10, 10 ] + } + constraints { + linear { + vars: [ 0, 1 ] + coeffs: [ 1, 1 ] + domain: [ -9223372036854775808, 0 ] + } + } + constraints { + linear { + vars: [ 1, 2 ] + coeffs: [ 1, 1 ] + domain: [ -9223372036854775808, 0 ] + } + } + )pb"); + VarDomination var_dom; + Model model; + PresolveContext context(&model, &model_proto, nullptr); + context.InitializeNewDomains(); + context.ReadObjectiveFromProto(); + ScanModelForDominanceDetection(context, &var_dom); + + const IntegerVariable X = VarDomination::RefToIntegerVariable(0); + const IntegerVariable Y = VarDomination::RefToIntegerVariable(1); + const IntegerVariable Z = VarDomination::RefToIntegerVariable(2); + + EXPECT_TRUE(var_dom.CanFreelyDecrease(X)); + EXPECT_TRUE(var_dom.CanFreelyDecrease(Y)); + EXPECT_TRUE(var_dom.CanFreelyDecrease(Z)); + EXPECT_FALSE(var_dom.CanFreelyDecrease(NegationOf(X))); + EXPECT_FALSE(var_dom.CanFreelyDecrease(NegationOf(Y))); + EXPECT_FALSE(var_dom.CanFreelyDecrease(NegationOf(Z))); + + // No domination here, because all the dominator can just freely move in + // one direction. + EXPECT_THAT(var_dom.DominatingVariables(NegationOf(X)), IsEmpty()); + EXPECT_THAT(var_dom.DominatingVariables(NegationOf(Y)), IsEmpty()); + EXPECT_THAT(var_dom.DominatingVariables(NegationOf(Z)), IsEmpty()); +} + +// X + Y >= 0 +// Y + Z >= 0 +TEST(VarDominationTest, BasicExample4) { + CpModelProto model_proto = ParseTestProto(R"pb( + variables { + name: "X" + domain: [ -10, 10 ] + } + variables { + name: "Y" + domain: [ -10, 10 ] + } + variables { + name: "Z" + domain: [ -10, 10 ] + } + constraints { + linear { + vars: [ 0, 1 ] + coeffs: [ 1, 1 ] + domain: [ 0, 9223372036854775807 ] + } + } + constraints { + linear { + vars: [ 1, 2 ] + coeffs: [ 1, 1 ] + domain: [ 0, 9223372036854775807 ] + } + } + )pb"); + VarDomination var_dom; + Model model; + PresolveContext context(&model, &model_proto, nullptr); + context.InitializeNewDomains(); + context.ReadObjectiveFromProto(); + ScanModelForDominanceDetection(context, &var_dom); + + const IntegerVariable X = VarDomination::RefToIntegerVariable(0); + const IntegerVariable Y = VarDomination::RefToIntegerVariable(1); + const IntegerVariable Z = VarDomination::RefToIntegerVariable(2); + + EXPECT_FALSE(var_dom.CanFreelyDecrease(X)); + EXPECT_FALSE(var_dom.CanFreelyDecrease(Y)); + EXPECT_FALSE(var_dom.CanFreelyDecrease(Z)); + EXPECT_TRUE(var_dom.CanFreelyDecrease(NegationOf(X))); + EXPECT_TRUE(var_dom.CanFreelyDecrease(NegationOf(Y))); + EXPECT_TRUE(var_dom.CanFreelyDecrease(NegationOf(Z))); + + EXPECT_THAT(var_dom.DominatingVariables(X), IsEmpty()); + EXPECT_THAT(var_dom.DominatingVariables(Y), IsEmpty()); + EXPECT_THAT(var_dom.DominatingVariables(Z), IsEmpty()); +} + +// X + Y + Z = 0 +TEST(VarDominationTest, AllEquivalent) { + CpModelProto model_proto = ParseTestProto(R"pb( + variables { + name: "X" + domain: [ -10, 10 ] + } + variables { + name: "Y" + domain: [ -10, 10 ] + } + variables { + name: "Z" + domain: [ -10, 10 ] + } + constraints { + linear { + vars: [ 0, 1, 2 ] + coeffs: [ 1, 1, 1 ] + domain: [ 0, 0 ] + } + } + )pb"); + VarDomination var_dom; + Model model; + PresolveContext context(&model, &model_proto, nullptr); + context.InitializeNewDomains(); + context.ReadObjectiveFromProto(); + ScanModelForDominanceDetection(context, &var_dom); + + // Domination is slightly related to symmetry and duplicate columns. + const IntegerVariable X = VarDomination::RefToIntegerVariable(0); + const IntegerVariable Y = VarDomination::RefToIntegerVariable(1); + const IntegerVariable Z = VarDomination::RefToIntegerVariable(2); + EXPECT_THAT(var_dom.DominatingVariables(X), UnorderedElementsAre(Y, Z)); + EXPECT_THAT(var_dom.DominatingVariables(Y), UnorderedElementsAre(X, Z)); + EXPECT_THAT(var_dom.DominatingVariables(Z), UnorderedElementsAre(X, Y)); +} + +// X + Y + Z <= 0 (to prevent freely moving variables). +// -X + -2Y + -3Z <= 0 +TEST(VarDominationTest, NegativeCoefficients) { + CpModelProto model_proto = ParseTestProto(R"pb( + variables { + name: "X" + domain: [ -10, 10 ] + } + variables { + name: "Y" + domain: [ -10, 10 ] + } + variables { + name: "Z" + domain: [ -10, 10 ] + } + constraints { + linear { + vars: [ 0, 1, 2 ] + coeffs: [ 1, 1, 1 ] + domain: [ -9223372036854775808, 0 ] + } + } + constraints { + linear { + vars: [ 0, 1, 2 ] + coeffs: [ -1, -2, -3 ] + domain: [ -9223372036854775808, 0 ] + } + } + )pb"); + VarDomination var_dom; + Model model; + PresolveContext context(&model, &model_proto, nullptr); + context.InitializeNewDomains(); + context.ReadObjectiveFromProto(); + ScanModelForDominanceDetection(context, &var_dom); + + const IntegerVariable X = VarDomination::RefToIntegerVariable(0); + const IntegerVariable Y = VarDomination::RefToIntegerVariable(1); + const IntegerVariable Z = VarDomination::RefToIntegerVariable(2); + EXPECT_THAT(var_dom.DominatingVariables(X), UnorderedElementsAre(Y, Z)); + EXPECT_THAT(var_dom.DominatingVariables(Y), UnorderedElementsAre(Z)); + EXPECT_THAT(var_dom.DominatingVariables(Z), IsEmpty()); +} + +// Bound propagation see nothing, but if we can remove feasible solution, from +// this constraint point of view, all variables can freely increase or decrease +// until zero (because the constraint is trivial above/below). +// +// -20 <= X + Y + Z <= 20 +TEST(DualBoundReductionTest, BasicTest) { + CpModelProto model_proto = ParseTestProto(R"pb( + variables { domain: [ -10, 10 ] } + variables { domain: [ -10, 10 ] } + variables { domain: [ -10, 10 ] } + constraints { + linear { + vars: [ 0, 1, 2 ] + coeffs: [ 1, 1, 1 ] + domain: [ -20, 20 ] + } + } + )pb"); + DualBoundStrengthening dual_bound_strengthening; + Model model; + PresolveContext context(&model, &model_proto, nullptr); + context.InitializeNewDomains(); + context.ReadObjectiveFromProto(); + ScanModelForDualBoundStrengthening(context, &dual_bound_strengthening); + EXPECT_TRUE(dual_bound_strengthening.Strengthen(&context)); + EXPECT_EQ(context.DomainOf(0).ToString(), "[0]"); + EXPECT_EQ(context.DomainOf(1).ToString(), "[0]"); + EXPECT_EQ(context.DomainOf(2).ToString(), "[0]"); +} + +TEST(DualBoundReductionTest, CarefulWithHoles) { + CpModelProto model_proto = ParseTestProto(R"pb( + variables { domain: [ -10, 10 ] } + variables { domain: [ -10, 0, 7, 10 ] } + variables { domain: [ -10, -6, 3, 10 ] } + constraints { + linear { + vars: [ 0, 1, 2 ] + coeffs: [ 1, 1, 1 ] + domain: [ -15, 15 ] + } + } + )pb"); + DualBoundStrengthening dual_bound_strengthening; + Model model; + PresolveContext context(&model, &model_proto, nullptr); + context.InitializeNewDomains(); + context.ReadObjectiveFromProto(); + ScanModelForDualBoundStrengthening(context, &dual_bound_strengthening); + EXPECT_TRUE(dual_bound_strengthening.Strengthen(&context)); + EXPECT_EQ(context.DomainOf(0).ToString(), "[-5,5]"); + EXPECT_EQ(context.DomainOf(1).ToString(), "[-5,0][7]"); + EXPECT_EQ(context.DomainOf(2).ToString(), "[-6][3,5]"); +} + +// Here the infered bounds crosses, so we have multiple choices, we will fix +// to the lowest magnitude. +TEST(DualBoundReductionTest, Choices) { + CpModelProto model_proto = ParseTestProto(R"pb( + variables { domain: [ -10, 10 ] } + variables { domain: [ -10, -2, 3, 10 ] } + variables { domain: [ -10, -3, 2, 10 ] } + constraints { + linear { + vars: [ 0, 1, 2 ] + coeffs: [ 1, 1, 1 ] + domain: [ -25, 25 ] + } + } + )pb"); + DualBoundStrengthening dual_bound_strengthening; + Model model; + PresolveContext context(&model, &model_proto, nullptr); + context.InitializeNewDomains(); + context.ReadObjectiveFromProto(); + ScanModelForDualBoundStrengthening(context, &dual_bound_strengthening); + EXPECT_TRUE(dual_bound_strengthening.Strengthen(&context)); + EXPECT_EQ(context.DomainOf(0).ToString(), "[0]"); + EXPECT_EQ(context.DomainOf(1).ToString(), "[-2]"); + EXPECT_EQ(context.DomainOf(2).ToString(), "[2]"); +} + +TEST(DualBoundReductionTest, EquivalenceDetection) { + CpModelProto model_proto = ParseTestProto(R"pb( + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 1 ] } + constraints { + # a => b + enforcement_literal: 0 + bool_and { literals: [ 1 ] } + } + constraints { + linear { + # b == c (we just want b and c not to freely vary) + vars: [ 1, 2 ] + coeffs: [ 1, -1 ] + domain: [ 0, 0 ] + } + } + constraints { + bool_or { + literals: [ 0, 2 ] # a + c >= 0 + } + } + )pb"); + DualBoundStrengthening dual_bound_strengthening; + Model model; + PresolveContext context(&model, &model_proto, nullptr); + context.InitializeNewDomains(); + context.ReadObjectiveFromProto(); + ScanModelForDualBoundStrengthening(context, &dual_bound_strengthening); + EXPECT_TRUE(dual_bound_strengthening.Strengthen(&context)); + EXPECT_EQ(context.DomainOf(0).ToString(), "[0,1]"); + EXPECT_EQ(context.DomainOf(1).ToString(), "[0,1]"); + EXPECT_EQ(context.DomainOf(2).ToString(), "[0,1]"); + + // Equivalence between a and b. + EXPECT_EQ(context.GetLiteralRepresentative(1), 0); +} + +} // namespace +} // namespace sat +} // namespace operations_research From da1be192d6f8656d4a243249b7bb7aca9ee7e6d1 Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Thu, 3 Oct 2024 11:00:02 +0200 Subject: [PATCH 040/105] graph: export from google3 --- ortools/graph/BUILD.bazel | 9 +++++++-- ortools/graph/assignment_test.cc | 1 + ortools/graph/christofides_test.cc | 14 ++++++-------- ortools/graph/cliques_test.cc | 9 +++++---- ortools/graph/min_cost_flow_test.cc | 8 ++++++-- ortools/graph/one_tree_lower_bound_test.cc | 14 +++++++------- ortools/graph/samples/assignment_min_flow.py | 15 +++++++-------- .../graph/samples/simple_min_cost_flow_program.py | 3 ++- 8 files changed, 41 insertions(+), 32 deletions(-) diff --git a/ortools/graph/BUILD.bazel b/ortools/graph/BUILD.bazel index 0d1be0e5a5..45811e3633 100644 --- a/ortools/graph/BUILD.bazel +++ b/ortools/graph/BUILD.bazel @@ -178,10 +178,11 @@ cc_test( deps = [ ":cliques", "//ortools/base:gmock_main", - "//ortools/base:mathutil", "//ortools/util:time_limit", "@com_google_absl//absl/container:flat_hash_set", + "@com_google_absl//absl/flags:flag", "@com_google_absl//absl/functional:bind_front", + "@com_google_absl//absl/log", "@com_google_absl//absl/log:check", "@com_google_absl//absl/random:distributions", "@com_google_absl//absl/strings", @@ -317,8 +318,8 @@ cc_test( ":one_tree_lower_bound", "//ortools/base:gmock_main", "//ortools/base:path", - "//ortools/base:types", "//ortools/routing/parsers:tsplib_parser", + "@com_google_absl//absl/log", "@com_google_absl//absl/types:span", ], ) @@ -512,9 +513,12 @@ cc_test( size = "medium", srcs = ["min_cost_flow_test.cc"], deps = [ + ":ebert_graph", ":graphs", ":min_cost_flow", "//ortools/base:gmock_main", + "@com_google_absl//absl/log", + "@com_google_absl//absl/log:check", "@com_google_absl//absl/random:distributions", "@com_google_absl//absl/strings:str_format", "@com_google_absl//absl/types:span", @@ -567,6 +571,7 @@ cc_test( srcs = ["assignment_test.cc"], deps = [ ":assignment", + ":ebert_graph", "//ortools/base:gmock_main", ], ) diff --git a/ortools/graph/assignment_test.cc b/ortools/graph/assignment_test.cc index c36a795699..4c169f9d45 100644 --- a/ortools/graph/assignment_test.cc +++ b/ortools/graph/assignment_test.cc @@ -16,6 +16,7 @@ #include #include "gtest/gtest.h" +#include "ortools/graph/ebert_graph.h" namespace operations_research { diff --git a/ortools/graph/christofides_test.cc b/ortools/graph/christofides_test.cc index c092b06722..5058f9a489 100644 --- a/ortools/graph/christofides_test.cc +++ b/ortools/graph/christofides_test.cc @@ -13,8 +13,8 @@ #include "ortools/graph/christofides.h" +#include #include -#include #include #include #include @@ -26,7 +26,6 @@ #include "benchmark/benchmark.h" #include "gtest/gtest.h" #include "ortools/base/logging.h" -#include "ortools/base/macros.h" namespace operations_research { @@ -210,28 +209,27 @@ TEST(HamiltonianPathTest, Ulysses) { } TEST(ChristofidesTest, EmptyModel) { - ChristofidesPathSolver chris_solver(0, [](int i, int j) { return 0; }); + ChristofidesPathSolver chris_solver(0, [](int, int) { return 0; }); EXPECT_EQ(0, chris_solver.TravelingSalesmanCost()); EXPECT_TRUE(chris_solver.TravelingSalesmanPath().empty()); } TEST(ChristofidesTest, SingleNodeModel) { - ChristofidesPathSolver chris_solver(1, [](int i, int j) { return 0; }); + ChristofidesPathSolver chris_solver(1, [](int, int) { return 0; }); EXPECT_EQ(0, chris_solver.TravelingSalesmanCost()); EXPECT_EQ("0 0 ", PathToString(chris_solver.TravelingSalesmanPath())); } TEST(ChristofidesTest, Int64Overflow) { ChristofidesPathSolver chris_solver( - 10, [](int i, int j) { return std::numeric_limits::max() / 2; }); + 10, [](int, int) { return std::numeric_limits::max() / 2; }); EXPECT_EQ(std::numeric_limits::max(), chris_solver.TravelingSalesmanCost()); } TEST(ChristofidesTest, SaturatedDouble) { - ChristofidesPathSolver chris_solver(10, [](int i, int j) { - return std::numeric_limits::max() / 2.0; - }); + ChristofidesPathSolver chris_solver( + 10, [](int, int) { return std::numeric_limits::max() / 2.0; }); EXPECT_EQ(std::numeric_limits::infinity(), chris_solver.TravelingSalesmanCost()); } diff --git a/ortools/graph/cliques_test.cc b/ortools/graph/cliques_test.cc index b28bbda188..ddb68665ac 100644 --- a/ortools/graph/cliques_test.cc +++ b/ortools/graph/cliques_test.cc @@ -24,6 +24,7 @@ #include #include "absl/container/flat_hash_set.h" +#include "absl/flags/flag.h" #include "absl/functional/bind_front.h" #include "absl/log/check.h" #include "absl/random/distributions.h" @@ -31,7 +32,7 @@ #include "absl/types/span.h" #include "benchmark/benchmark.h" #include "gtest/gtest.h" -#include "ortools/base/mathutil.h" +#include "ortools/base/logging.h" #include "ortools/util/time_limit.h" namespace operations_research { @@ -98,7 +99,7 @@ class CliqueSizeVerifier { int64_t num_cliques_; }; -inline bool FullGraph(int index1, int index2) { return true; } +inline bool FullGraph(int /*index1*/, int /*index2*/) { return true; } inline bool EmptyGraph(int index1, int index2) { return (index1 == index2); } @@ -562,7 +563,7 @@ TEST(BronKerboschAlgorithmTest, WallTimeLimit) { absl::SetFlag(&FLAGS_time_limit_use_usertime, true); TimeLimit time_limit(kTimeLimitSeconds); - const auto graph = [kNumPartitions](int index1, int index2) { + const auto graph = [](int index1, int index2) { return FullKPartiteGraph(kNumPartitions, index1, index2); }; CliqueSizeVerifier verifier(kExpectedCliqueSize, kExpectedCliqueSize); @@ -583,7 +584,7 @@ TEST(BronKerboschAlgorithmTest, DeterministicTimeLimit) { std::unique_ptr time_limit = TimeLimit::FromDeterministicTime(kDeterministicLimit); - const auto graph = [kNumPartitions](int index1, int index2) { + const auto graph = [](int index1, int index2) { return FullKPartiteGraph(kNumPartitions, index1, index2); }; CliqueSizeVerifier verifier(kExpectedCliqueSize, kExpectedCliqueSize); diff --git a/ortools/graph/min_cost_flow_test.cc b/ortools/graph/min_cost_flow_test.cc index 8ad26727b2..3391167d48 100644 --- a/ortools/graph/min_cost_flow_test.cc +++ b/ortools/graph/min_cost_flow_test.cc @@ -20,12 +20,15 @@ #include #include +#include "absl/log/check.h" #include "absl/random/distributions.h" #include "absl/strings/str_format.h" #include "absl/types/span.h" #include "benchmark/benchmark.h" #include "gtest/gtest.h" #include "ortools/algorithms/binary_search.h" +#include "ortools/base/logging.h" +#include "ortools/graph/ebert_graph.h" #include "ortools/graph/graph.h" #include "ortools/graph/graphs.h" #include "ortools/linear_solver/linear_solver.h" @@ -631,7 +634,8 @@ struct MinCostFlowSolver { template void FullRandomAssignment(typename MinCostFlowSolver::Solver f, NodeIndex num_sources, NodeIndex num_targets, - CostValue expected_cost1, CostValue expected_cost2) { + CostValue expected_cost1, + CostValue /*expected_cost2*/) { const CostValue kCostRange = 1000; Graph graph; GenerateCompleteGraph(num_sources, num_targets, &graph); @@ -656,7 +660,7 @@ template void PartialRandomAssignment(typename MinCostFlowSolver::Solver f, NodeIndex num_sources, NodeIndex num_targets, CostValue expected_cost1, - CostValue expected_cost2) { + CostValue /*expected_cost2*/) { const NodeIndex kDegree = 10; const CostValue kCostRange = 1000; Graph graph; diff --git a/ortools/graph/one_tree_lower_bound_test.cc b/ortools/graph/one_tree_lower_bound_test.cc index 977b0c5236..0554d56e69 100644 --- a/ortools/graph/one_tree_lower_bound_test.cc +++ b/ortools/graph/one_tree_lower_bound_test.cc @@ -21,15 +21,15 @@ #include "absl/types/span.h" #include "gtest/gtest.h" +#include "ortools/base/logging.h" #include "ortools/base/path.h" -#include "ortools/base/types.h" #include "ortools/routing/parsers/tsplib_parser.h" namespace operations_research { namespace { TEST(OneTreeLBTest, VolgenantJonkerEmpty) { - const double cost = ComputeOneTreeLowerBound(0, [](int from, int to) { + const double cost = ComputeOneTreeLowerBound(0, [](int /*from*/, int /*to*/) { ADD_FAILURE(); // Making sure the function is not being called. return 0; }); @@ -42,7 +42,7 @@ TEST(OneTreeLBTest, HeldWolfeCrowderEmpty) { TravelingSalesmanLowerBoundParameters::HeldWolfeCrowder; const double cost = ComputeOneTreeLowerBoundWithParameters( 0, - [](int from, int to) { + [](int /*from*/, int /*to*/) { ADD_FAILURE(); // Making sure the function is not being called. return 0; }, @@ -52,7 +52,7 @@ TEST(OneTreeLBTest, HeldWolfeCrowderEmpty) { TEST(OneTreeLBTest, VolgenantJonkerOneNode) { const double cost = - ComputeOneTreeLowerBound(1, [](int from, int to) { return 0; }); + ComputeOneTreeLowerBound(1, [](int /*from*/, int /*to*/) { return 0; }); EXPECT_EQ(0, cost); } @@ -61,13 +61,13 @@ TEST(OneTreeLBTest, HeldWolfeCrowderOneNode) { parameters.algorithm = TravelingSalesmanLowerBoundParameters::HeldWolfeCrowder; const double cost = ComputeOneTreeLowerBoundWithParameters( - 1, [](int from, int to) { return 0; }, parameters); + 1, [](int /*from*/, int /*to*/) { return 0; }, parameters); EXPECT_EQ(0, cost); } TEST(OneTreeLBTest, VolgenantJonkerTwoNodes) { const double cost = - ComputeOneTreeLowerBound(2, [](int from, int to) { return 1; }); + ComputeOneTreeLowerBound(2, [](int /*from*/, int /*to*/) { return 1; }); EXPECT_EQ(2, cost); } @@ -76,7 +76,7 @@ TEST(OneTreeLBTest, HeldWolfeCrowderTwoNodes) { parameters.algorithm = TravelingSalesmanLowerBoundParameters::HeldWolfeCrowder; const double cost = ComputeOneTreeLowerBoundWithParameters( - 2, [](int from, int to) { return 1; }, parameters); + 2, [](int /*from*/, int /*to*/) { return 1; }, parameters); EXPECT_EQ(2, cost); } diff --git a/ortools/graph/samples/assignment_min_flow.py b/ortools/graph/samples/assignment_min_flow.py index 22f9cd8e18..381a75da5f 100755 --- a/ortools/graph/samples/assignment_min_flow.py +++ b/ortools/graph/samples/assignment_min_flow.py @@ -51,13 +51,13 @@ def main(): # [START constraints] # Add each arc. - for i in range(len(start_nodes)): + for idx, _ in enumerate(start_nodes): smcf.add_arc_with_capacity_and_unit_cost( - start_nodes[i], end_nodes[i], capacities[i], costs[i] + start_nodes[idx], end_nodes[idx], capacities[idx], costs[idx] ) # Add node supplies. - for i in range(len(supplies)): - smcf.set_node_supply(i, supplies[i]) + for idx, supply in enumerate(supplies): + smcf.set_node_supply(idx, supply) # [END constraints] # [START solve] @@ -67,8 +67,7 @@ def main(): # [START print_solution] if status == smcf.OPTIMAL: - print("Total cost = ", smcf.optimal_cost()) - print() + print(f"Total cost = {smcf.optimal_cost()}") for arc in range(smcf.num_arcs()): # Can ignore arcs leading out of source or into sink. if smcf.tail(arc) != source and smcf.head(arc) != sink: @@ -77,8 +76,8 @@ def main(): # give an assignment of worker to task. if smcf.flow(arc) > 0: print( - "Worker %d assigned to task %d. Cost = %d" - % (smcf.tail(arc), smcf.head(arc), smcf.unit_cost(arc)) + f"Worker {smcf.tail(arc)} assigned to task {smcf.head(arc)}. " + f"Cost = {smcf.unit_cost(arc)}" ) else: print("There was an issue with the min cost flow input.") diff --git a/ortools/graph/samples/simple_min_cost_flow_program.py b/ortools/graph/samples/simple_min_cost_flow_program.py index b3b06856bf..569058e037 100755 --- a/ortools/graph/samples/simple_min_cost_flow_program.py +++ b/ortools/graph/samples/simple_min_cost_flow_program.py @@ -68,7 +68,8 @@ def main(): costs = solution_flows * unit_costs for arc, flow, cost in zip(all_arcs, solution_flows, costs): print( - f"{smcf.tail(arc):1} -> {smcf.head(arc)} {flow:3} / {smcf.capacity(arc):3} {cost}" + f"{smcf.tail(arc):1} -> " + f"{smcf.head(arc)} {flow:3} / {smcf.capacity(arc):3} {cost}" ) # [END print_solution] From 781a98a402b7c7cce47bd29566a41523c175b310 Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Thu, 3 Oct 2024 11:00:53 +0200 Subject: [PATCH 041/105] routing: export from google3 --- ortools/base/BUILD.bazel | 9 + ortools/base/proto_enum_utils.h | 209 ++++++++++++++++++ .../constraint_solver/constraint_solver.cc | 1 + ortools/constraint_solver/local_search.cc | 129 +++++++---- ortools/constraint_solver/search_stats.proto | 11 +- ortools/routing/BUILD.bazel | 39 ++-- ortools/routing/filters.cc | 75 +++++-- ortools/routing/ils.cc | 11 +- ortools/routing/insertion_lns.cc | 2 + ortools/routing/parameters.cc | 22 ++ ortools/routing/parameters.proto | 40 ++-- ortools/routing/parameters_utils.cc | 46 ++++ ortools/routing/parameters_utils.h | 33 +++ ortools/routing/routing.cc | 45 ++-- ortools/routing/search.cc | 56 +++-- ortools/routing/search.h | 18 +- 16 files changed, 598 insertions(+), 148 deletions(-) create mode 100644 ortools/base/proto_enum_utils.h create mode 100644 ortools/routing/parameters_utils.cc create mode 100644 ortools/routing/parameters_utils.h diff --git a/ortools/base/BUILD.bazel b/ortools/base/BUILD.bazel index a74cdf1575..e4bea63389 100644 --- a/ortools/base/BUILD.bazel +++ b/ortools/base/BUILD.bazel @@ -460,6 +460,15 @@ cc_library( ], ) +cc_library( + name = "proto_enum_utils", + hdrs = ["proto_enum_utils.h"], + deps = [ + "@com_google_absl//absl/types:span", + "@com_google_protobuf//:protobuf", + ], +) + cc_library( name = "ptr_util", hdrs = ["ptr_util.h"], diff --git a/ortools/base/proto_enum_utils.h b/ortools/base/proto_enum_utils.h new file mode 100644 index 0000000000..301e998869 --- /dev/null +++ b/ortools/base/proto_enum_utils.h @@ -0,0 +1,209 @@ +// Copyright 2010-2024 Google LLC +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef OR_TOOLS_BASE_PROTO_ENUM_UTILS_H_ +#define OR_TOOLS_BASE_PROTO_ENUM_UTILS_H_ + +// Provides utility functions that help with handling Protocol Buffer enums. +// +// Examples: +// +// A function to easily iterate over all defined values of an enum known at +// compile-time: +// +// for (Proto::Enum e : EnumerateEnumValues()) { +// ... +// } +// + +#include +#include + +#include "absl/types/span.h" +#include "google/protobuf/descriptor.pb.h" + +namespace google::protobuf::contrib::utils { + +using google::protobuf::GetEnumDescriptor; +using google::protobuf::RepeatedField; + +template +class ProtoEnumIterator; + +template +class EnumeratedProtoEnumView; + +template +bool operator==(const ProtoEnumIterator& a, const ProtoEnumIterator& b); + +template +bool operator!=(const ProtoEnumIterator& a, const ProtoEnumIterator& b); + +// Generic Proto enum iterator. +template +class ProtoEnumIterator { + public: + typedef E value_type; + typedef std::forward_iterator_tag iterator_category; + typedef int difference_type; + typedef E* pointer; + typedef E& reference; + + ProtoEnumIterator() : current_(0) {} + + ProtoEnumIterator(const ProtoEnumIterator& other) + : current_(other.current_) {} + + ProtoEnumIterator& operator=(const ProtoEnumIterator& other) { + current_ = other.current_; + return *this; + } + + ProtoEnumIterator operator++(int) { + ProtoEnumIterator other(*this); + ++(*this); + return other; + } + + ProtoEnumIterator& operator++() { + ++current_; + return *this; + } + + E operator*() const { + return static_cast(GetEnumDescriptor()->value(current_)->number()); + } + + private: + explicit ProtoEnumIterator(int current) : current_(current) {} + + int current_; + + // Only EnumeratedProtoEnumView can instantiate ProtoEnumIterator. + friend class EnumeratedProtoEnumView; + friend bool operator== + <>(const ProtoEnumIterator& a, const ProtoEnumIterator& b); + friend bool operator!= + <>(const ProtoEnumIterator& a, const ProtoEnumIterator& b); +}; + +template +bool operator==(const ProtoEnumIterator& a, const ProtoEnumIterator& b) { + return a.current_ == b.current_; +} + +template +bool operator!=(const ProtoEnumIterator& a, const ProtoEnumIterator& b) { + return a.current_ != b.current_; +} + +template +class EnumeratedProtoEnumView { + public: + typedef E value_type; + typedef ProtoEnumIterator iterator; + iterator begin() const { return iterator(0); } + iterator end() const { + return iterator(GetEnumDescriptor()->value_count()); + } +}; + +// Returns an EnumeratedProtoEnumView that can be iterated over: +// for (Proto::Enum e : EnumerateEnumValues()) { +// ... +// } +template +EnumeratedProtoEnumView EnumerateEnumValues() { + return EnumeratedProtoEnumView(); +} + +// Returns a view that allows to iterate directly over the enum values +// in an enum repeated field, wrapping the repeated field with a type-safe +// iterator that provides access to the enum values. +// +// for (Enum enum : +// REPEATED_ENUM_ADAPTER(message, repeated_enum_field)) { +// ... +// } +// +// It provides greater safety than iterating over the enum directly, as the +// following will fail to type-check: +// +// .proto +// RightEnum enum = 5; +// +// client .cc +// for (WrongEnum e : REPEATED_ENUM_ADAPTER(proto, enum)) { <- Error: Cannot +// cast from +// RightEnum to +// WrongEnum +// } +// +// NOTE: As per http://shortn/_CYfjpruK6N, unrecognized enum values are treated +// differently between proto2 and proto3. +// +// For proto2, they are stripped out from the message when read, so all +// unrecognized enum values from the wire format will be skipped when iterating +// over the wrapper (this is the same behavior as iterating over the +// RepeatedField directly). +// +// For proto3, they are left as-is, so unrecognized enum values from the wire +// format will still be returned when iterating over the wrapper (this is the +// same behavior as iterating over the RepeatedField directly). +// +#define REPEATED_ENUM_ADAPTER(var, field) \ + google::protobuf::contrib::utils::internal::RepeatedEnumView< \ + decltype(var.field(0))>(var.field()) + +// ==== WARNING TO USERS ==== +// Below are internal implementations, not public API, and may change without +// notice. Do NOT use directly. + +namespace internal { + +// Implementation for REPEATED_ENUM_ADAPTER. This does not provide type safety +// thus should be used through REPEATED_ENUM_ADAPTER only. See cr/246914845 for +// context. +template +class RepeatedEnumView { + public: + class Iterator : public std::iterator { + public: + explicit Iterator(RepeatedField::const_iterator ptr) : ptr_(ptr) {} + bool operator==(const Iterator& it) const { return ptr_ == it.ptr_; } + bool operator!=(const Iterator& it) const { return ptr_ != it.ptr_; } + Iterator& operator++() { + ++ptr_; + return *this; + } + E operator*() const { return static_cast(*ptr_); } + + private: + RepeatedField::const_iterator ptr_; + }; + + explicit RepeatedEnumView(const RepeatedField& repeated_field) + : repeated_field_(repeated_field) {} + + Iterator begin() const { return Iterator(repeated_field_.begin()); } + Iterator end() const { return Iterator(repeated_field_.end()); } + + private: + const RepeatedField& repeated_field_; +}; + +} // namespace internal + +} // namespace google::protobuf::contrib::utils + +#endif // OR_TOOLS_BASE_PROTO_ENUM_UTILS_H_ diff --git a/ortools/constraint_solver/constraint_solver.cc b/ortools/constraint_solver/constraint_solver.cc index 09f5812b95..3a1cee76fd 100644 --- a/ortools/constraint_solver/constraint_solver.cc +++ b/ortools/constraint_solver/constraint_solver.cc @@ -3280,6 +3280,7 @@ Decision* ProfiledDecisionBuilder::Next(Solver* const solver) { Decision* const decision = db_->Next(solver); timer_.Stop(); seconds_ += timer_.Get(); + solver->set_context(""); return decision; } diff --git a/ortools/constraint_solver/local_search.cc b/ortools/constraint_solver/local_search.cc index ec33c28127..3bcd9f993b 100644 --- a/ortools/constraint_solver/local_search.cc +++ b/ortools/constraint_solver/local_search.cc @@ -1418,11 +1418,21 @@ class MakeChainInactiveOperator : public PathOperator { MakeChainInactiveOperator(const std::vector& vars, const std::vector& secondary_vars, std::function start_empty_path_class) - : PathOperator(vars, secondary_vars, 2, true, false, + : PathOperator(vars, secondary_vars, 2, + /*skip_locally_optimal_paths=*/true, + /*accept_path_end_base=*/false, std::move(start_empty_path_class), nullptr) {} ~MakeChainInactiveOperator() override {} bool MakeNeighbor() override { - return MakeChainInactive(BaseNode(0), BaseNode(1)); + const int64_t chain_end = BaseNode(1); + if (!IsPathEnd(chain_end) && chain_end != BaseNode(0) && + !Var(chain_end)->Contains(chain_end)) { + // Move to the next before_chain since an unskippable node has been + // encountered. + SetNextBaseToIncrement(0); + return false; + } + return MakeChainInactive(BaseNode(0), chain_end); } std::string DebugString() const override { @@ -3683,13 +3693,13 @@ class LocalSearchProfiler : public LocalSearchMonitor { std::string DebugString() const override { return "LocalSearchProfiler"; } void RestartSearch() override { operator_stats_.clear(); - filter_stats_.clear(); + filter_stats_per_context_.clear(); + last_operator_ = nullptr; } void ExitSearch() override { // Update times for current operator when the search ends. - if (solver()->TopLevelSearch() == solver()->ActiveSearch()) { - UpdateTime(); - } + UpdateTime(); + last_operator_ = nullptr; } template void ParseFirstSolutionStatistics(const Callback& callback) const { @@ -3714,26 +3724,26 @@ class LocalSearchProfiler : public LocalSearchMonitor { for (const LocalSearchOperator* const op : operators) { const OperatorStats& stats = gtl::FindOrDie(operator_stats_, op); callback(op->DebugString(), stats.neighbors, stats.filtered_neighbors, - stats.accepted_neighbors, stats.seconds); + stats.accepted_neighbors, stats.seconds, + stats.make_next_neighbor_seconds, stats.accept_neighbor_seconds); } } template void ParseLocalSearchFilterStatistics(const Callback& callback) const { - absl::flat_hash_map> - filters_per_context; - for (const auto& stat : filter_stats_) { - filters_per_context[stat.second.context].push_back(stat.first); - } - for (auto& [context, filters] : filters_per_context) { + for (const auto& [context, filter_stats] : filter_stats_per_context_) { + std::vector filters; + for (const auto& [filter, stats] : filter_stats) { + filters.push_back(filter); + } std::sort(filters.begin(), filters.end(), - [this](const LocalSearchFilter* filter1, - const LocalSearchFilter* filter2) { - return gtl::FindOrDie(filter_stats_, filter1).calls > - gtl::FindOrDie(filter_stats_, filter2).calls; + [&filter_stats](const LocalSearchFilter* filter1, + const LocalSearchFilter* filter2) { + return gtl::FindOrDie(filter_stats, filter1).calls > + gtl::FindOrDie(filter_stats, filter2).calls; }); for (const LocalSearchFilter* const filter : filters) { - const FilterStats& stats = gtl::FindOrDie(filter_stats_, filter); + const FilterStats& stats = gtl::FindOrDie(filter_stats, filter); callback(context, filter->DebugString(), stats.calls, stats.rejects, stats.seconds); } @@ -3749,23 +3759,30 @@ class LocalSearchProfiler : public LocalSearchMonitor { first_solution_statistics->set_strategy(name); first_solution_statistics->set_duration_seconds(duration_seconds); }); - ParseLocalSearchOperatorStatistics([&statistics_proto]( - absl::string_view name, - int64_t num_neighbors, - int64_t num_filtered_neighbors, - int64_t num_accepted_neighbors, - double duration_seconds) { - LocalSearchStatistics::LocalSearchOperatorStatistics* const - local_search_operator_statistics = - statistics_proto.add_local_search_operator_statistics(); - local_search_operator_statistics->set_local_search_operator(name); - local_search_operator_statistics->set_num_neighbors(num_neighbors); - local_search_operator_statistics->set_num_filtered_neighbors( - num_filtered_neighbors); - local_search_operator_statistics->set_num_accepted_neighbors( - num_accepted_neighbors); - local_search_operator_statistics->set_duration_seconds(duration_seconds); - }); + ParseLocalSearchOperatorStatistics( + [&statistics_proto]( + absl::string_view name, int64_t num_neighbors, + int64_t num_filtered_neighbors, int64_t num_accepted_neighbors, + double duration_seconds, double make_next_neighbor_duration_seconds, + double accept_neighbor_duration_seconds) { + LocalSearchStatistics::LocalSearchOperatorStatistics* const + local_search_operator_statistics = + statistics_proto.add_local_search_operator_statistics(); + local_search_operator_statistics->set_local_search_operator(name); + local_search_operator_statistics->set_num_neighbors(num_neighbors); + local_search_operator_statistics->set_num_filtered_neighbors( + num_filtered_neighbors); + local_search_operator_statistics->set_num_accepted_neighbors( + num_accepted_neighbors); + local_search_operator_statistics->set_duration_seconds( + duration_seconds); + local_search_operator_statistics + ->set_make_next_neighbor_duration_seconds( + make_next_neighbor_duration_seconds); + local_search_operator_statistics + ->set_accept_neighbor_duration_seconds( + accept_neighbor_duration_seconds); + }); ParseLocalSearchFilterStatistics([&statistics_proto]( absl::string_view context, absl::string_view name, @@ -3808,11 +3825,11 @@ class LocalSearchProfiler : public LocalSearchMonitor { }); } max_name_size = 0; - ParseLocalSearchOperatorStatistics([&max_name_size](absl::string_view name, - int64_t, int64_t, - int64_t, double) { - max_name_size = std::max(max_name_size, name.length()); - }); + ParseLocalSearchOperatorStatistics( + [&max_name_size](absl::string_view name, int64_t, int64_t, int64_t, + double, double, double) { + max_name_size = std::max(max_name_size, name.length()); + }); if (max_name_size > 0) { absl::StrAppendFormat( &overview, @@ -3824,7 +3841,11 @@ class LocalSearchProfiler : public LocalSearchMonitor { [&overview, &total_stats, max_name_size]( absl::string_view name, int64_t num_neighbors, int64_t num_filtered_neighbors, int64_t num_accepted_neighbors, - double duration_seconds) { + double duration_seconds, + double make_next_neighbor_duration_seconds, + double accept_neighbor_duration_seconds) { + // TODO(user): Add make_next_neighbor_duration_seconds and + // accept_neighbor_duration_seconds to stats. absl::StrAppendFormat( &overview, "%*s | %9ld | %8ld | %8ld | %7.2g\n", max_name_size, name, num_neighbors, num_filtered_neighbors, @@ -3893,9 +3914,13 @@ class LocalSearchProfiler : public LocalSearchMonitor { UpdateTime(); last_operator_ = op->Self(); } + make_next_neighbor_timer_.Start(); } void EndMakeNextNeighbor(const LocalSearchOperator* op, bool neighbor_found, const Assignment*, const Assignment*) override { + make_next_neighbor_timer_.Stop(); + operator_stats_[op->Self()].make_next_neighbor_seconds += + make_next_neighbor_timer_.Get(); if (neighbor_found) { operator_stats_[op->Self()].neighbors++; } @@ -3907,22 +3932,27 @@ class LocalSearchProfiler : public LocalSearchMonitor { operator_stats_[op->Self()].filtered_neighbors++; } } - void BeginAcceptNeighbor(const LocalSearchOperator*) override {} + void BeginAcceptNeighbor(const LocalSearchOperator*) override { + accept_neighbor_timer_.Start(); + } void EndAcceptNeighbor(const LocalSearchOperator* op, bool neighbor_found) override { + accept_neighbor_timer_.Stop(); + operator_stats_[op->Self()].accept_neighbor_seconds += + accept_neighbor_timer_.Get(); if (neighbor_found) { operator_stats_[op->Self()].accepted_neighbors++; } } void BeginFiltering(const LocalSearchFilter* filter) override { - FilterStats& filter_stats = filter_stats_[filter]; + FilterStats& filter_stats = + filter_stats_per_context_[solver()->context()][filter]; filter_stats.calls++; - filter_stats.context = solver()->context(); filter_timer_.Start(); } void EndFiltering(const LocalSearchFilter* filter, bool reject) override { filter_timer_.Stop(); - auto& stats = filter_stats_[filter]; + auto& stats = filter_stats_per_context_[solver()->context()][filter]; stats.seconds += filter_timer_.Get(); if (reject) { stats.rejects++; @@ -3949,20 +3979,25 @@ class LocalSearchProfiler : public LocalSearchMonitor { int64_t filtered_neighbors = 0; int64_t accepted_neighbors = 0; double seconds = 0; + double make_next_neighbor_seconds = 0; + double accept_neighbor_seconds = 0; }; struct FilterStats { int64_t calls = 0; int64_t rejects = 0; double seconds = 0; - std::string context; }; WallTimer timer_; + WallTimer make_next_neighbor_timer_; + WallTimer accept_neighbor_timer_; WallTimer filter_timer_; const LocalSearchOperator* last_operator_ = nullptr; absl::flat_hash_map operator_stats_; - absl::flat_hash_map filter_stats_; + absl::flat_hash_map< + std::string, absl::flat_hash_map> + filter_stats_per_context_; // Profiled decision builders. std::vector profiled_decision_builders_; }; diff --git a/ortools/constraint_solver/search_stats.proto b/ortools/constraint_solver/search_stats.proto index 9e300d9796..e8ae1a167e 100644 --- a/ortools/constraint_solver/search_stats.proto +++ b/ortools/constraint_solver/search_stats.proto @@ -44,6 +44,11 @@ message LocalSearchStatistics { int64 num_accepted_neighbors = 4; // Time spent in the operator. double duration_seconds = 5; + // Time spent in creating neighbors (calling MakeNextNeighbor). + double make_next_neighbor_duration_seconds = 6; + // Time spent in accepting a neighbor (restoration and storage, not + // including filtering). + double accept_neighbor_duration_seconds = 7; } // Statistics for each operator called during the search. repeated LocalSearchOperatorStatistics local_search_operator_statistics = 1; @@ -86,8 +91,8 @@ message ConstraintSolverStatistics { // Search statistics. message SearchStatistics { - // Local search statistics. - LocalSearchStatistics local_search_statistics = 1; + // Local search statistics for each solver context. + repeated LocalSearchStatistics local_search_statistics = 1; // Constraint solver statistics. - ConstraintSolverStatistics constraint_solver_statistics = 2; + repeated ConstraintSolverStatistics constraint_solver_statistics = 2; } diff --git a/ortools/routing/BUILD.bazel b/ortools/routing/BUILD.bazel index 02f2e514b8..a143d92313 100644 --- a/ortools/routing/BUILD.bazel +++ b/ortools/routing/BUILD.bazel @@ -31,17 +31,6 @@ config_setting( constraint_values = ["@platforms//os:windows"], ) -proto_library( - name = "ils_proto", - srcs = ["ils.proto"], - deps = [":enums_proto"], -) - -cc_proto_library( - name = "ils_cc_proto", - deps = ["ils_proto"], -) - proto_library( name = "enums_proto", srcs = ["enums.proto"], @@ -57,6 +46,17 @@ cc_proto_library( # deps = [":enums_proto"], # ) +proto_library( + name = "ils_proto", + srcs = ["ils.proto"], + deps = [":enums_proto"], +) + +cc_proto_library( + name = "ils_cc_proto", + deps = ["ils_proto"], +) + proto_library( name = "parameters_proto", srcs = ["parameters.proto"], @@ -91,6 +91,7 @@ cc_library( hdrs = ["parameters.h"], deps = [ "//ortools/base", + "//ortools/base:proto_enum_utils", "//ortools/base:protoutil", "//ortools/constraint_solver:cp", "//ortools/constraint_solver:solver_parameters_cc_proto", @@ -115,6 +116,16 @@ cc_library( ], ) +cc_library( + name = "parameters_utils", + srcs = ["parameters_utils.cc"], + hdrs = ["parameters_utils.h"], + deps = [ + ":parameters_cc_proto", + "@com_google_absl//absl/types:span", + ], +) + cc_library( name = "utils", srcs = ["utils.cc"], @@ -184,9 +195,13 @@ cc_library( "//conditions:default": [], }), deps = [ + ":enums_cc_proto", + ":ils_cc_proto", ":index_manager", ":neighborhoods", ":parameters", + ":parameters_cc_proto", + ":parameters_utils", ":types", ":utils", "//ortools/base", @@ -209,8 +224,6 @@ cc_library( "//ortools/graph:topologicalsorter", "//ortools/lp_data", "//ortools/lp_data:base", - "//ortools/routing:enums_cc_proto", - "//ortools/routing:parameters_cc_proto", "//ortools/sat:boolean_problem", "//ortools/sat:cp_constraints", "//ortools/sat:cp_model", diff --git a/ortools/routing/filters.cc b/ortools/routing/filters.cc index 29778789e8..6382937902 100644 --- a/ortools/routing/filters.cc +++ b/ortools/routing/filters.cc @@ -4266,41 +4266,78 @@ void LightVehicleBreaksChecker::Relax() const { bool LightVehicleBreaksChecker::Check() const { for (const int path : path_state_->ChangedPaths()) { if (!path_data_[path].span.Exists()) continue; - const int64_t total_transit = path_data_[path].total_transit.Min(); - // Compute lower bound of path span from break and path time windows. const PathData& data = path_data_[path]; + const int64_t total_transit = data.total_transit.Min(); + int64_t lb_span = data.span.Min(); + // Improve bounds on span/start max/end min using time windows: breaks that + // must occur inside the path have their duration accumulated into + // lb_span_tw, they also widen [start_max, end_min). int64_t lb_span_tw = total_transit; - const int64_t start_max = data.start_cumul.Max(); - const int64_t end_min = data.end_cumul.Min(); + int64_t start_max = data.start_cumul.Max(); + int64_t end_min = data.end_cumul.Min(); for (const auto& br : data.vehicle_breaks) { if (!br.is_performed_min) continue; if (br.start_max < end_min && start_max < br.end_min) { CapAddTo(br.duration_min, &lb_span_tw); + start_max = std::min(start_max, br.start_max); + end_min = std::max(end_min, br.end_min); } } - int64_t lb_span_interbreak = 0; + lb_span = std::max({lb_span, lb_span_tw, CapSub(end_min, start_max)}); + // Compute num_feasible_breaks = number of breaks that may fit into route, + // and [breaks_start_min, breaks_end_max) = max coverage of breaks. + int64_t break_start_min = kint64max; + int64_t break_end_max = kint64min; + int64_t start_min = data.start_cumul.Min(); + start_min = std::max(start_min, CapSub(end_min, data.span.Max())); + int64_t end_max = data.end_cumul.Max(); + end_max = std::min(end_max, CapAdd(start_max, data.span.Max())); + int num_feasible_breaks = 0; + for (const auto& br : data.vehicle_breaks) { + if (start_min <= br.start_max && br.end_min <= end_max) { + break_start_min = std::min(break_start_min, br.start_min); + break_end_max = std::max(break_end_max, br.end_max); + ++num_feasible_breaks; + } + } + // Improve span/start min/end max using interbreak limits: there must be + // enough breaks inside the path, so that for each limit, the union of + // [br.start - max_interbreak, br.end + max_interbreak) covers [start, end), + // or [start, end) is shorter than max_interbreak. for (const auto& [max_interbreak, min_break_duration] : data.interbreak_limits) { // Minimal number of breaks depends on total transit: // 0 breaks for 0 <= total transit <= limit, // 1 break for limit + 1 <= total transit <= 2 * limit, // i breaks for i * limit + 1 <= total transit <= (i+1) * limit, ... - if (total_transit == 0) continue; - if (max_interbreak == 0) return false; - const int min_num_breaks = (total_transit - 1) / max_interbreak; - if (min_num_breaks > data.vehicle_breaks.size()) return false; - lb_span_interbreak = std::max( - lb_span_interbreak, CapProd(min_num_breaks, min_break_duration)); + if (max_interbreak == 0) { + if (total_transit > 0) return false; + continue; + } + int64_t min_num_breaks = + std::max(0, (total_transit - 1) / max_interbreak); + if (lb_span > max_interbreak) { + min_num_breaks = std::max(min_num_breaks, 1); + } + if (min_num_breaks > num_feasible_breaks) return false; + lb_span = std::max( + lb_span, + CapAdd(total_transit, CapProd(min_num_breaks, min_break_duration))); + if (min_num_breaks > 0) { + if (!data.start_cumul.SetMin(CapSub(break_start_min, max_interbreak))) { + return false; + } + if (!data.end_cumul.SetMax(CapAdd(break_end_max, max_interbreak))) { + return false; + } + } } - lb_span_interbreak = CapAdd(lb_span_interbreak, total_transit); - const int64_t lb_span = std::max(lb_span_tw, lb_span_interbreak); if (!data.span.SetMin(lb_span)) return false; - if (!data.start_cumul.SetMax(CapSub(data.end_cumul.Max(), lb_span))) { - return false; - } - if (!data.end_cumul.SetMin(CapAdd(data.start_cumul.Min(), lb_span))) { - return false; - } + // Merge span lb information directly in start/end variables. + start_max = std::min(start_max, CapSub(end_max, lb_span)); + if (!data.start_cumul.SetMax(start_max)) return false; + end_min = std::max(end_min, CapAdd(start_min, lb_span)); + if (!data.end_cumul.SetMin(end_min)) return false; } return true; } diff --git a/ortools/routing/ils.cc b/ortools/routing/ils.cc index 7727b23161..47c5e237e6 100644 --- a/ortools/routing/ils.cc +++ b/ortools/routing/ils.cc @@ -34,6 +34,7 @@ #include "ortools/constraint_solver/constraint_solver.h" #include "ortools/routing/ils.pb.h" #include "ortools/routing/parameters.pb.h" +#include "ortools/routing/parameters_utils.h" #include "ortools/routing/routing.h" #include "ortools/routing/search.h" #include "ortools/routing/types.h" @@ -233,15 +234,17 @@ std::unique_ptr MakeRecreateProcedure( model, std::move(stop_search), absl::bind_front(&RoutingModel::GetArcCostForVehicle, model), parameters.local_cheapest_cost_insertion_pickup_delivery_strategy(), - parameters.local_cheapest_insertion_sorting_mode(), filter_manager, - model->GetBinCapacities()); + GetLocalCheapestInsertionSortingProperties( + parameters.local_cheapest_insertion_sorting_properties()), + filter_manager, model->GetBinCapacities()); case FirstSolutionStrategy::LOCAL_CHEAPEST_COST_INSERTION: return std::make_unique( model, std::move(stop_search), /*evaluator=*/nullptr, parameters.local_cheapest_cost_insertion_pickup_delivery_strategy(), - parameters.local_cheapest_insertion_sorting_mode(), filter_manager, - model->GetBinCapacities()); + GetLocalCheapestInsertionSortingProperties( + parameters.local_cheapest_insertion_sorting_properties()), + filter_manager, model->GetBinCapacities()); case FirstSolutionStrategy::SEQUENTIAL_CHEAPEST_INSERTION: { GlobalCheapestInsertionFilteredHeuristic:: GlobalCheapestInsertionParameters gci_parameters = diff --git a/ortools/routing/insertion_lns.cc b/ortools/routing/insertion_lns.cc index bbb39dced6..5399083a6d 100644 --- a/ortools/routing/insertion_lns.cc +++ b/ortools/routing/insertion_lns.cc @@ -74,8 +74,10 @@ bool FilteredHeuristicLocalSearchOperator::MakeChangesAndInsertNodes() { if (next_accessor == nullptr) { return false; } + model_->solver()->set_context(DebugString()); const Assignment* const result_assignment = heuristic_->BuildSolutionFromRoutes(next_accessor); + model_->solver()->set_context(""); if (result_assignment == nullptr) { return false; diff --git a/ortools/routing/parameters.cc b/ortools/routing/parameters.cc index 57b93f3651..264d6464a4 100644 --- a/ortools/routing/parameters.cc +++ b/ortools/routing/parameters.cc @@ -18,6 +18,7 @@ #include #include +#include "absl/container/flat_hash_map.h" #include "absl/strings/str_cat.h" #include "absl/strings/str_format.h" #include "absl/time/time.h" @@ -25,6 +26,7 @@ #include "google/protobuf/duration.pb.h" #include "google/protobuf/message.h" #include "ortools/base/logging.h" +#include "ortools/base/proto_enum_utils.h" #include "ortools/base/protoutil.h" #include "ortools/base/types.h" #include "ortools/constraint_solver/constraint_solver.h" @@ -523,6 +525,26 @@ std::vector FindErrorsInRoutingSearchParameters( "Invalid cheapest_insertion_ls_operator_min_neighbors: ", min_neighbors, ". Must be greater or equal to 1.")); } + { + absl::flat_hash_map + sorting_properties_map; + for (const RoutingSearchParameters::InsertionSortingProperty property : + REPEATED_ENUM_ADAPTER(search_parameters, + local_cheapest_insertion_sorting_properties)) { + if (property == RoutingSearchParameters::SORTING_PROPERTY_UNSPECIFIED) { + errors.emplace_back( + StrCat("Invalid local cheapest insertion sorting property: ", + RoutingSearchParameters::InsertionSortingProperty_Name( + RoutingSearchParameters::SORTING_PROPERTY_UNSPECIFIED))); + } + const int occurrences = sorting_properties_map[property]++; + if (occurrences == 2) { + errors.emplace_back(StrCat( + "Duplicate local cheapest insertion sorting property: ", + RoutingSearchParameters::InsertionSortingProperty_Name(property))); + } + } + } if (const double ratio = search_parameters.ls_operator_neighbors_ratio(); std::isnan(ratio) || ratio <= 0 || ratio > 1) { errors.emplace_back(StrCat("Invalid ls_operator_neighbors_ratio: ", ratio)); diff --git a/ortools/routing/parameters.proto b/ortools/routing/parameters.proto index 96a4fc9b17..e414802b09 100644 --- a/ortools/routing/parameters.proto +++ b/ortools/routing/parameters.proto @@ -36,9 +36,9 @@ package operations_research.routing; // then the routing library will pick its preferred value for that parameter // automatically: this should be the case for most parameters. // To see those "default" parameters, call GetDefaultRoutingSearchParameters(). -// Next ID: 67 +// Next ID: 68 message RoutingSearchParameters { - reserved 19; + reserved 19, 65; // First solution strategies, used as starting point of local search. FirstSolutionStrategy.Value first_solution_strategy = 1; @@ -132,24 +132,28 @@ message RoutingSearchParameters { PairInsertionStrategy local_cheapest_cost_insertion_pickup_delivery_strategy = 55; - // A mode to select in which order nodes or node pairs are considered in - // insertion heuristics. - enum InsertionSortingMode { - // Default mode, equivalent to SORT_BY_ALLOWED_VEHICLES_THEN_PENALTY. - SORTING_MODE_UNSET = 0; - // Selects nodes with the least number of allowed vehicles first, then the - // ones with the highest penalty. - SORT_BY_ALLOWED_VEHICLES_THEN_PENALTY = 1; - // Selects nodes with the highest penalty first, then the ones with the - // least number of allowed vehicles. - SORT_BY_PENALTY_THEN_ALLOWED_VEHICLES = 2; + // Properties used to select in which order nodes or node pairs are considered + // in insertion heuristics. + enum InsertionSortingProperty { + // Invalid property. + SORTING_PROPERTY_UNSPECIFIED = 0; + // Selects nodes with the least number of allowed vehicles. + SORTING_PROPERTY_ALLOWED_VEHICLES = 1; + // Selects nodes with the highest penalty. + SORTING_PROPERTY_PENALTY = 2; // Selects nodes with the highest penalty / number of allowed vehicles - // ratio first, then the ones with the highest penalty. - SORT_BY_PENALTY_ALLOWED_VEHICLES_RATIO_THEN_PENALTY = 3; + // ratio. + SORTING_PROPERTY_PENALTY_OVER_ALLOWED_VEHICLES_RATIO = 3; } - // The node insertion sorting mode used in local cheapest insertion - // heuristics. - InsertionSortingMode local_cheapest_insertion_sorting_mode = 65; + + // The properties used to sort insertion entries in the local cheapest + // insertion heuristic, in *decreasing* order of priority. The properties + // listed here are applied hierarchically, from highest to lowest priority. + // When no properties are provided + // (SORTING_PROPERTY_ALLOWED_VEHICLES, SORTING_PROPERTY_PENALTY) + // is used by default. + repeated InsertionSortingProperty + local_cheapest_insertion_sorting_properties = 67; // If true use minimum matching instead of minimal matching in the // Christofides algorithm. diff --git a/ortools/routing/parameters_utils.cc b/ortools/routing/parameters_utils.cc new file mode 100644 index 0000000000..f4ac03f123 --- /dev/null +++ b/ortools/routing/parameters_utils.cc @@ -0,0 +1,46 @@ +// Copyright 2010-2024 Google LLC +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "ortools/routing/parameters_utils.h" + +#include + +#include "absl/types/span.h" + +namespace operations_research::routing { + +std::vector +GetLocalCheapestInsertionSortingProperties( + absl::Span lci_insertion_sorting_properties) { + std::vector + sorting_properties; + + for (const int property : lci_insertion_sorting_properties) { + sorting_properties.push_back( + static_cast( + property)); + } + + // For historical reasons if no insertion order is specified, we fallback to + // selecting nodes with the least number of allowed vehicles first, then the + // ones with the highest penalty. + if (sorting_properties.empty()) { + sorting_properties.push_back( + RoutingSearchParameters::SORTING_PROPERTY_ALLOWED_VEHICLES); + sorting_properties.push_back( + RoutingSearchParameters::SORTING_PROPERTY_PENALTY); + } + return sorting_properties; +} + +} // namespace operations_research::routing diff --git a/ortools/routing/parameters_utils.h b/ortools/routing/parameters_utils.h new file mode 100644 index 0000000000..c0e8ea276f --- /dev/null +++ b/ortools/routing/parameters_utils.h @@ -0,0 +1,33 @@ +// Copyright 2010-2024 Google LLC +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef OR_TOOLS_ROUTING_PARAMETERS_UTILS_H_ +#define OR_TOOLS_ROUTING_PARAMETERS_UTILS_H_ + +#include + +#include "absl/types/span.h" +#include "ortools/routing/parameters.pb.h" + +namespace operations_research::routing { + +// Takes RoutingSearchParameters::local_cheapest_insertion_sorting_properties in +// input and returns the ordered list of properties that is used to sort nodes +// when performing a local cheapest insertion first heuristic. +std::vector +GetLocalCheapestInsertionSortingProperties( + absl::Span lci_insertion_sorting_properties); + +} // namespace operations_research::routing + +#endif // OR_TOOLS_ROUTING_PARAMETERS_UTILS_H_ diff --git a/ortools/routing/routing.cc b/ortools/routing/routing.cc index bbe5d39088..dcef548e1c 100644 --- a/ortools/routing/routing.cc +++ b/ortools/routing/routing.cc @@ -76,6 +76,7 @@ #include "ortools/routing/neighborhoods.h" #include "ortools/routing/parameters.h" #include "ortools/routing/parameters.pb.h" +#include "ortools/routing/parameters_utils.h" #include "ortools/routing/search.h" #include "ortools/routing/types.h" #include "ortools/routing/utils.h" @@ -4802,7 +4803,8 @@ void RoutingModel::CreateNeighborhoodOperators( this, [this]() { return CheckLimit(time_buffer_); }, GetLocalSearchArcCostCallback(parameters), parameters.local_cheapest_insertion_pickup_delivery_strategy(), - parameters.local_cheapest_insertion_sorting_mode(), + GetLocalCheapestInsertionSortingProperties( + parameters.local_cheapest_insertion_sorting_properties()), GetOrCreateLocalSearchFilterManager( parameters, {/*filter_objective=*/false, /*filter_with_cp_solver=*/false}), @@ -5756,27 +5758,29 @@ void RoutingModel::CreateFirstSolutionDecisionBuilders( } const RoutingSearchParameters::PairInsertionStrategy lci_pair_strategy = search_parameters.local_cheapest_insertion_pickup_delivery_strategy(); - const RoutingSearchParameters::InsertionSortingMode sorting_mode = - search_parameters.local_cheapest_insertion_sorting_mode(); - first_solution_filtered_decision_builders_ - [FirstSolutionStrategy::LOCAL_CHEAPEST_INSERTION] = - CreateIntVarFilteredDecisionBuilder< - LocalCheapestInsertionFilteredHeuristic>( - [this](int64_t i, int64_t j, int64_t vehicle) { - return GetArcCostForVehicle(i, j, vehicle); - }, - lci_pair_strategy, sorting_mode, - GetOrCreateLocalSearchFilterManager( - search_parameters, {/*filter_objective=*/false, - /*filter_with_cp_solver=*/false}), - bin_capacities_.get(), optimize_on_insertion); + first_solution_filtered_decision_builders_[FirstSolutionStrategy:: + LOCAL_CHEAPEST_INSERTION] = + CreateIntVarFilteredDecisionBuilder< + LocalCheapestInsertionFilteredHeuristic>( + [this](int64_t i, int64_t j, int64_t vehicle) { + return GetArcCostForVehicle(i, j, vehicle); + }, + lci_pair_strategy, + GetLocalCheapestInsertionSortingProperties( + search_parameters.local_cheapest_insertion_sorting_properties()), + GetOrCreateLocalSearchFilterManager( + search_parameters, {/*filter_objective=*/false, + /*filter_with_cp_solver=*/false}), + bin_capacities_.get(), optimize_on_insertion); IntVarFilteredDecisionBuilder* const strong_lci = CreateIntVarFilteredDecisionBuilder< LocalCheapestInsertionFilteredHeuristic>( [this](int64_t i, int64_t j, int64_t vehicle) { return GetArcCostForVehicle(i, j, vehicle); }, - lci_pair_strategy, sorting_mode, + lci_pair_strategy, + GetLocalCheapestInsertionSortingProperties( + search_parameters.local_cheapest_insertion_sorting_properties()), GetOrCreateLocalSearchFilterManager(search_parameters, {/*filter_objective=*/false, /*filter_with_cp_solver=*/true}), @@ -5797,7 +5801,10 @@ void RoutingModel::CreateFirstSolutionDecisionBuilders( [FirstSolutionStrategy::LOCAL_CHEAPEST_COST_INSERTION] = CreateIntVarFilteredDecisionBuilder< LocalCheapestInsertionFilteredHeuristic>( - /*evaluator=*/nullptr, lcci_pair_strategy, sorting_mode, + /*evaluator=*/nullptr, lcci_pair_strategy, + GetLocalCheapestInsertionSortingProperties( + search_parameters + .local_cheapest_insertion_sorting_properties()), GetOrCreateLocalSearchFilterManager( search_parameters, {/*filter_objective=*/true, /*filter_with_cp_solver=*/false}), @@ -5805,7 +5812,9 @@ void RoutingModel::CreateFirstSolutionDecisionBuilders( IntVarFilteredDecisionBuilder* const strong_lcci = CreateIntVarFilteredDecisionBuilder< LocalCheapestInsertionFilteredHeuristic>( - /*evaluator=*/nullptr, lcci_pair_strategy, sorting_mode, + /*evaluator=*/nullptr, lcci_pair_strategy, + GetLocalCheapestInsertionSortingProperties( + search_parameters.local_cheapest_insertion_sorting_properties()), GetOrCreateLocalSearchFilterManager(search_parameters, {/*filter_objective=*/true, /*filter_with_cp_solver=*/true}), diff --git a/ortools/routing/search.cc b/ortools/routing/search.cc index 76e9298caf..078b636dd3 100644 --- a/ortools/routing/search.cc +++ b/ortools/routing/search.cc @@ -38,6 +38,7 @@ #include "absl/base/attributes.h" #include "absl/container/flat_hash_map.h" #include "absl/container/flat_hash_set.h" +#include "absl/container/inlined_vector.h" #include "absl/flags/flag.h" #include "absl/log/check.h" #include "absl/log/die_if_null.h" @@ -620,7 +621,7 @@ void CheapestInsertionFilteredHeuristic::AddSeedNodeToQueue( } const int64_t num_allowed_vehicles = model()->VehicleVar(node)->Size(); const int64_t neg_penalty = CapOpp(model()->UnperformedPenalty(node)); - sq->priority_queue.push({.key = {num_allowed_vehicles, neg_penalty}, + sq->priority_queue.push({.properties = {num_allowed_vehicles, neg_penalty}, .start_end_value = start_end_value, .is_node_index = true, .index = node}); @@ -2277,7 +2278,8 @@ LocalCheapestInsertionFilteredHeuristic:: RoutingModel* model, std::function stop_search, std::function evaluator, RoutingSearchParameters::PairInsertionStrategy pair_insertion_strategy, - RoutingSearchParameters::InsertionSortingMode insertion_sorting_mode, + std::vector + insertion_sorting_properties, LocalSearchFilterManager* filter_manager, BinCapacities* bin_capacities, std::function&, std::vector*)> @@ -2286,9 +2288,11 @@ LocalCheapestInsertionFilteredHeuristic:: std::move(evaluator), nullptr, filter_manager), pair_insertion_strategy_(pair_insertion_strategy), - insertion_sorting_mode_(insertion_sorting_mode), + insertion_sorting_properties_(std::move(insertion_sorting_properties)), bin_capacities_(bin_capacities), - optimize_on_insertion_(std::move(optimize_on_insertion)) {} + optimize_on_insertion_(std::move(optimize_on_insertion)) { + DCHECK(!insertion_sorting_properties_.empty()); +} void LocalCheapestInsertionFilteredHeuristic::Initialize() { // NOTE(user): Keeping the code in a separate function as opposed to @@ -2396,22 +2400,34 @@ void LocalCheapestInsertionFilteredHeuristic::ComputeInsertionOrder() { insertion_order_.reserve(model.Size() + model.GetPickupAndDeliveryPairs().size()); - auto get_insertion_key = - [this](int64_t penalty, - int64_t num_allowed_vehicles) -> std::tuple { + auto get_insertion_properties = [this](int64_t penalty, + int64_t num_allowed_vehicles) { DCHECK_NE(0, num_allowed_vehicles); - switch (insertion_sorting_mode_) { - case RoutingSearchParameters::SORT_BY_PENALTY_THEN_ALLOWED_VEHICLES: - return {CapOpp(penalty), num_allowed_vehicles}; - case RoutingSearchParameters:: - SORT_BY_PENALTY_ALLOWED_VEHICLES_RATIO_THEN_PENALTY: - return {CapOpp(penalty / num_allowed_vehicles), CapOpp(penalty)}; - default: - return {num_allowed_vehicles, CapOpp(penalty)}; + absl::InlinedVector properties; + properties.reserve(insertion_sorting_properties_.size()); + for (const int property : insertion_sorting_properties_) { + switch (property) { + case RoutingSearchParameters::SORTING_PROPERTY_ALLOWED_VEHICLES: + properties.push_back(num_allowed_vehicles); + break; + case RoutingSearchParameters::SORTING_PROPERTY_PENALTY: + properties.push_back(CapOpp(penalty)); + break; + case RoutingSearchParameters:: + SORTING_PROPERTY_PENALTY_OVER_ALLOWED_VEHICLES_RATIO: + properties.push_back(CapOpp(penalty / num_allowed_vehicles)); + break; + default: + LOG(DFATAL) + << "Unknown RoutingSearchParameter::InsertionSortingProperty " + "used!"; + break; + } } + return properties; }; - // Iterating on pickup and delivery pairs + // Iterating on pickup and delivery pairs. const std::vector& pairs = model.GetPickupAndDeliveryPairs(); @@ -2435,8 +2451,8 @@ void LocalCheapestInsertionFilteredHeuristic::ComputeInsertionOrder() { std::max(delivery_penalty, model.UnperformedPenalty(delivery)); } insertion_order_.push_back( - {.key = get_insertion_key(CapAdd(pickup_penalty, delivery_penalty), - num_allowed_vehicles), + {.properties = get_insertion_properties( + CapAdd(pickup_penalty, delivery_penalty), num_allowed_vehicles), .start_end_value = {GetNegMaxDistanceFromVehicles(model, pair_index), 0}, .is_node_index = false, @@ -2457,8 +2473,8 @@ void LocalCheapestInsertionFilteredHeuristic::ComputeInsertionOrder() { }, vehicle_set); insertion_order_.push_back( - {.key = get_insertion_key(model.UnperformedPenalty(node), - model.VehicleVar(node)->Size()), + {.properties = get_insertion_properties(model.UnperformedPenalty(node), + model.VehicleVar(node)->Size()), .start_end_value = {CapOpp(min_distance), 0}, .is_node_index = true, .index = node}); diff --git a/ortools/routing/search.h b/ortools/routing/search.h index a38dfb5539..2057c5b064 100644 --- a/ortools/routing/search.h +++ b/ortools/routing/search.h @@ -33,6 +33,7 @@ #include #include "absl/container/flat_hash_set.h" +#include "absl/container/inlined_vector.h" #include "absl/log/check.h" #include "absl/types/span.h" #include "ortools/base/adjustable_priority_queue.h" @@ -345,7 +346,7 @@ class CheapestInsertionFilteredHeuristic : public RoutingFilteredHeuristic { } }; struct Seed { - std::tuple key; + absl::InlinedVector properties; StartEndValue start_end_value; /// Indicates whether this Seed corresponds to a pair or a single node. /// If false, the 'index' is the pair_index, otherwise it's the node index. @@ -353,9 +354,12 @@ class CheapestInsertionFilteredHeuristic : public RoutingFilteredHeuristic { int index; bool operator>(const Seed& other) const { - return std::tie(key, start_end_value, is_node_index, index) > - std::tie(other.key, other.start_end_value, other.is_node_index, - other.index); + for (size_t i = 0; i < properties.size(); ++i) { + if (properties[i] == other.properties[i]) continue; + return properties[i] > other.properties[i]; + } + return std::tie(start_end_value, is_node_index, index) > + std::tie(other.start_end_value, other.is_node_index, other.index); } }; // clang-format off @@ -1067,7 +1071,8 @@ class LocalCheapestInsertionFilteredHeuristic RoutingModel* model, std::function stop_search, std::function evaluator, RoutingSearchParameters::PairInsertionStrategy pair_insertion_strategy, - RoutingSearchParameters::InsertionSortingMode insertion_sorting_mode, + std::vector + insertion_sorting_properties, LocalSearchFilterManager* filter_manager, BinCapacities* bin_capacities = nullptr, std::function&, @@ -1130,7 +1135,8 @@ class LocalCheapestInsertionFilteredHeuristic std::vector insertion_order_; const RoutingSearchParameters::PairInsertionStrategy pair_insertion_strategy_; - const RoutingSearchParameters::InsertionSortingMode insertion_sorting_mode_; + std::vector + insertion_sorting_properties_; InsertionSequenceContainer insertion_container_; InsertionSequenceGenerator insertion_generator_; From d5d6520406d7b9cde78567dbb4d1d588b910d978 Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Thu, 3 Oct 2024 15:32:10 +0200 Subject: [PATCH 042/105] bazel try fix python build --- WORKSPACE | 75 +++++++++++++++++++++++++++++-------------------------- 1 file changed, 39 insertions(+), 36 deletions(-) diff --git a/WORKSPACE b/WORKSPACE index 9cd96f58a5..20028d1505 100644 --- a/WORKSPACE +++ b/WORKSPACE @@ -75,7 +75,7 @@ git_repository( git_repository( name = "rules_python", - tag = "0.34.0", + tag = "0.36.0", remote = "https://github.com/bazelbuild/rules_python.git", ) @@ -105,6 +105,43 @@ git_repository( repo_mapping = {"@abseil-cpp": "@com_google_absl"}, ) +## Python +load("@rules_python//python:repositories.bzl", "py_repositories") +py_repositories() + +load("@rules_python//python:repositories.bzl", "python_register_toolchains") +DEFAULT_PYTHON = "3.12" +python_register_toolchains( + name = "python3_12", + python_version = DEFAULT_PYTHON, + ignore_root_user_error=True, +) +load("@python3_12//:defs.bzl", "interpreter") + +# Create a central external repo, @pip_deps, that contains Bazel targets for all the +# third-party packages specified in the bazel/requirements.txt file. +load("@rules_python//python:pip.bzl", "pip_parse") +pip_parse( + name = "pip_deps", + python_interpreter_target = interpreter, + requirements_lock = "//bazel:ortools_requirements.txt", +) + +load("@pip_deps//:requirements.bzl", + install_pip_deps="install_deps") +install_pip_deps() + +# Add a second repo @ortools_notebook_deps for jupyter notebooks. +pip_parse( + name = "ortools_notebook_deps", + python_interpreter_target = interpreter, + requirements_lock = "//bazel:notebook_requirements.txt", +) + +load("@ortools_notebook_deps//:requirements.bzl", + install_notebook_deps="install_deps") +install_notebook_deps() + ## Protobuf # proto_library, cc_proto_library, and java_proto_library rules implicitly # depend on @com_google_protobuf for protoc and proto runtimes. @@ -205,40 +242,6 @@ new_git_repository( remote = "https://github.com/swig/swig.git", ) -## Python -load("@rules_python//python:repositories.bzl", "py_repositories") -py_repositories() - -load("@rules_python//python:repositories.bzl", "python_register_toolchains") -DEFAULT_PYTHON = "3.12" -python_register_toolchains( - name = "python3_12", - python_version = DEFAULT_PYTHON, - ignore_root_user_error=True, -) - -# Create a central external repo, @pip_deps, that contains Bazel targets for all the -# third-party packages specified in the bazel/requirements.txt file. -load("@rules_python//python:pip.bzl", "pip_parse") -pip_parse( - name = "pip_deps", - requirements_lock = "//bazel:ortools_requirements.txt", -) - -load("@pip_deps//:requirements.bzl", - install_pip_deps="install_deps") -install_pip_deps() - -# Add a second repo @ortools_notebook_deps for jupyter notebooks. -pip_parse( - name = "ortools_notebook_deps", - requirements_lock = "//bazel:notebook_requirements.txt", -) - -load("@ortools_notebook_deps//:requirements.bzl", - install_notebook_deps="install_deps") -install_notebook_deps() - # Protobuf load("@com_google_protobuf//bazel:system_python.bzl", "system_python") system_python( @@ -283,7 +286,7 @@ new_git_repository( new_git_repository( name = "pybind11_protobuf", - commit = "84653a591aea5df482dc2bde42c19efafbd53a57", # 2024/06/28 + commit = "ed430af1814a97e4017f2f808d3ba28cc10802f1", # 2024/10/02 remote = "https://github.com/pybind/pybind11_protobuf.git", ) From eb3684f7a96547c2021b115aac81b6b45cfad6f7 Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Fri, 4 Oct 2024 15:18:08 +0200 Subject: [PATCH 043/105] fix #4392 --- ortools/flatzinc/presolve.cc | 151 +---------------------------------- ortools/flatzinc/presolve.h | 20 ----- 2 files changed, 2 insertions(+), 169 deletions(-) diff --git a/ortools/flatzinc/presolve.cc b/ortools/flatzinc/presolve.cc index 7b5984dee4..2887f8db46 100644 --- a/ortools/flatzinc/presolve.cc +++ b/ortools/flatzinc/presolve.cc @@ -121,27 +121,6 @@ void Presolver::PresolveInt2Float(Constraint* ct) { ct->MarkAsInactive(); } -// Minizinc flattens 2d element constraints (x = A[y][z]) into 1d element -// constraint with an affine mapping between y, z and the new index. -// This rule stores the mapping to reconstruct the 2d element constraint. -// This mapping can involve 1 or 2 variables depending if y or z in A[y][z] -// is a constant in the model). -void Presolver::PresolveStoreAffineMapping(Constraint* ct) { - CHECK_EQ(2, ct->arguments[1].variables.size()); - Variable* const var0 = ct->arguments[1].variables[0]; - Variable* const var1 = ct->arguments[1].variables[1]; - const int64_t coeff0 = ct->arguments[0].values[0]; - const int64_t coeff1 = ct->arguments[0].values[1]; - const int64_t rhs = ct->arguments[2].Value(); - if (coeff0 == -1 && !affine_map_.contains(var0)) { - affine_map_[var0] = AffineMapping(var1, coeff0, -rhs, ct); - UpdateRuleStats("int_lin_eq: store affine mapping"); - } else if (coeff1 == -1 && !affine_map_.contains(var1)) { - affine_map_[var1] = AffineMapping(var0, coeff0, -rhs, ct); - UpdateRuleStats("int_lin_eq: store affine mapping"); - } -} - void Presolver::PresolveStoreFlatteningMapping(Constraint* ct) { CHECK_EQ(3, ct->arguments[1].variables.size()); Variable* const var0 = ct->arguments[1].variables[0]; @@ -210,19 +189,11 @@ bool IsStrictPrefix(const std::vector& v1, const std::vector& v2) { // Rewrite array element: array_int_element: // // Rule 1: -// Input : array_int_element(x0, [c1, .., cn], y) with x0 = a * x + b -// Output: array_int_element(x, [c_a1, .., c_am], b) with a * i = b = ai -// -// Rule 2: // Input : array_int_element(x, [c1, .., cn], y) with x = a * x1 + x2 + b // Output: array_int_element([x1, x2], [c_a1, .., c_am], b, [a, b]) // to be interpreted by the extraction process. // -// Rule 3: -// Input: array_int_element(x, [c1, .., cn], y) -// Output array_int_element(x, [c1, .., c{max(x)}], y) -// -// Rule 4: +// Rule 2: // Input : array_int_element(x, [c1, .., cn], y) with x0 ci = c0 + i // Output: int_lin_eq([-1, 1], [y, x], 1 - c) (e.g. y = x + c - 1) void Presolver::PresolveSimplifyElement(Constraint* ct) { @@ -230,62 +201,6 @@ void Presolver::PresolveSimplifyElement(Constraint* ct) { Variable* const index_var = ct->arguments[0].Var(); // Rule 1. - if (affine_map_.contains(index_var)) { - const AffineMapping& mapping = affine_map_[index_var]; - const Domain& domain = mapping.variable->domain; - if (domain.is_interval && domain.values.empty()) { - // Invalid case. Ignore it. - return; - } - if (domain.values[0] == 0 && mapping.coefficient == 1 && - mapping.offset > 1 && index_var->domain.is_interval) { - // Simple translation - const int offset = mapping.offset - 1; - const int size = ct->arguments[1].values.size(); - for (int i = 0; i < size - offset; ++i) { - ct->arguments[1].values[i] = ct->arguments[1].values[i + offset]; - } - ct->arguments[1].values.resize(size - offset); - affine_map_[index_var].constraint->arguments[2].values[0] = -1; - affine_map_[index_var].offset = 1; - index_var->domain.values[0] -= offset; - index_var->domain.values[1] -= offset; - UpdateRuleStats("array_int_element: simplify using affine mapping."); - return; - } else if (mapping.offset + mapping.coefficient > 0 && - domain.values[0] > 0) { - const std::vector& values = ct->arguments[1].values; - std::vector new_values; - for (int64_t i = 1; i <= domain.values.back(); ++i) { - const int64_t index = i * mapping.coefficient + mapping.offset - 1; - if (index < 0) { - return; - } - if (index > values.size()) { - break; - } - new_values.push_back(values[index]); - } - // Rewrite constraint. - UpdateRuleStats("array_int_element: simplify using affine mapping."); - ct->arguments[0].variables[0] = mapping.variable; - ct->arguments[0].variables[0]->domain.IntersectWithInterval( - 1, new_values.size()); - // TODO(user): Encapsulate argument setters. - ct->arguments[1].values.swap(new_values); - if (ct->arguments[1].values.size() == 1) { - ct->arguments[1].type = Argument::INT_VALUE; - } - // Reset propagate flag. - ct->presolve_propagation_done = false; - // Mark old index var and affine constraint as presolved out. - mapping.constraint->MarkAsInactive(); - index_var->active = false; - return; - } - } - - // Rule 2. if (array2d_index_map_.contains(index_var)) { UpdateRuleStats("array_int_element: rewrite as a 2d element"); const Array2DIndexMapping& mapping = array2d_index_map_[index_var]; @@ -302,16 +217,7 @@ void Presolver::PresolveSimplifyElement(Constraint* ct) { return; } - // Rule 3. - if (index_var->domain.Max() < ct->arguments[1].values.size()) { - // Reduce array of values. - ct->arguments[1].values.resize(index_var->domain.Max()); - ct->presolve_propagation_done = false; - UpdateRuleStats("array_int_element: reduce array"); - return; - } - - // Rule 4. + // Rule 2. if (IsIncreasingAndContiguous(ct->arguments[1].values) && ct->arguments[2].type == Argument::VAR_REF) { const int64_t start = ct->arguments[1].values.front(); @@ -332,51 +238,6 @@ void Presolver::PresolveSimplifyElement(Constraint* ct) { } } -// Simplifies array_var_int_element -// -// Input : array_var_int_element(x0, [x1, .., xn], y) with x0 = a * x + b -// Output: array_var_int_element(x, [x_a1, .., x_an], b) with a * i = b = ai -void Presolver::PresolveSimplifyExprElement(Constraint* ct) { - if (ct->arguments[0].variables.size() != 1) return; - - Variable* const index_var = ct->arguments[0].Var(); - if (affine_map_.contains(index_var)) { - const AffineMapping& mapping = affine_map_[index_var]; - const Domain& domain = mapping.variable->domain; - if ((domain.is_interval && domain.values.empty()) || - domain.values[0] != 1 || mapping.offset + mapping.coefficient <= 0) { - // Invalid case. Ignore it. - return; - } - const std::vector& vars = ct->arguments[1].variables; - std::vector new_vars; - for (int64_t i = domain.values.front(); i <= domain.values.back(); ++i) { - const int64_t index = i * mapping.coefficient + mapping.offset - 1; - if (index < 0) { - return; - } - if (index >= vars.size()) { - break; - } - new_vars.push_back(vars[index]); - } - // Rewrite constraint. - UpdateRuleStats("array_var_int_element: simplify using affine mapping."); - ct->arguments[0].variables[0] = mapping.variable; - // TODO(user): Encapsulate argument setters. - ct->arguments[1].variables.swap(new_vars); - // Mark old index var and affine constraint as presolved out. - mapping.constraint->MarkAsInactive(); - index_var->active = false; - } else if (index_var->domain.is_interval && - index_var->domain.values.size() == 2 && - index_var->domain.Max() < ct->arguments[1].variables.size()) { - // Reduce array of variables. - ct->arguments[1].variables.resize(index_var->domain.Max()); - UpdateRuleStats("array_var_int_element: reduce array"); - } -} - void Presolver::Run(Model* model) { // Should rewrite float constraints. if (absl::GetFlag(FLAGS_fz_floats_are_ints)) { @@ -441,10 +302,6 @@ void Presolver::Run(Model* model) { PresolveBool2Int(ct); } else if (ct->active && ct->type == "int2float") { PresolveInt2Float(ct); - } else if (ct->active && ct->type == "int_lin_eq" && - ct->arguments[1].variables.size() == 2 && - ct->strong_propagation) { - PresolveStoreAffineMapping(ct); } else if (ct->active && ct->type == "int_lin_eq" && ct->arguments[1].variables.size() == 3 && ct->strong_propagation) { @@ -463,10 +320,6 @@ void Presolver::Run(Model* model) { if (ct->type == "array_int_element" || ct->type == "array_bool_element") { PresolveSimplifyElement(ct); } - if (ct->type == "array_var_int_element" || - ct->type == "array_var_bool_element") { - PresolveSimplifyExprElement(ct); - } } // Third pass: process objective with floating point coefficients. diff --git a/ortools/flatzinc/presolve.h b/ortools/flatzinc/presolve.h index ea3e6a7c87..38675968c7 100644 --- a/ortools/flatzinc/presolve.h +++ b/ortools/flatzinc/presolve.h @@ -47,21 +47,6 @@ class Presolver { void Run(Model* model); private: - // This struct stores the affine mapping of one variable: - // it represents new_var = var * coefficient + offset. It also stores the - // constraint that defines this mapping. - struct AffineMapping { - Variable* variable; - int64_t coefficient; - int64_t offset; - Constraint* constraint; - - AffineMapping() - : variable(nullptr), coefficient(0), offset(0), constraint(nullptr) {} - AffineMapping(Variable* v, int64_t c, int64_t o, Constraint* ct) - : variable(v), coefficient(c), offset(o), constraint(ct) {} - }; - // This struct stores the mapping of two index variables (of a 2D array; not // included here) onto a single index variable (of the flattened 1D array). // The original 2D array could be trimmed in the process; so we also need an @@ -96,10 +81,8 @@ class Presolver { // Presolve rules. void PresolveBool2Int(Constraint* ct); void PresolveInt2Float(Constraint* ct); - void PresolveStoreAffineMapping(Constraint* ct); void PresolveStoreFlatteningMapping(Constraint* ct); void PresolveSimplifyElement(Constraint* ct); - void PresolveSimplifyExprElement(Constraint* ct); // Helpers. void UpdateRuleStats(const std::string& rule_name) { @@ -117,9 +100,6 @@ class Presolver { absl::flat_hash_map var_representative_map_; std::vector var_representative_vector_; - // Stores affine_map_[x] = a * y + b. - absl::flat_hash_map affine_map_; - // Stores array2d_index_map_[z] = a * x + y + b. absl::flat_hash_map array2d_index_map_; From b681feb0ffc7ff5cbbd191e6a848f4ce12317476 Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Sat, 5 Oct 2024 10:54:51 +0200 Subject: [PATCH 044/105] add __repr__ to Domain: fix #4399 --- ortools/util/python/BUILD.bazel | 1 + ortools/util/python/sorted_interval_list.cc | 5 +++++ ortools/util/python/sorted_interval_list_test.py | 5 +++++ 3 files changed, 11 insertions(+) diff --git a/ortools/util/python/BUILD.bazel b/ortools/util/python/BUILD.bazel index 925cf571dc..c67d864d57 100644 --- a/ortools/util/python/BUILD.bazel +++ b/ortools/util/python/BUILD.bazel @@ -31,6 +31,7 @@ pybind_extension( deps = [ ":sorted_interval_list_doc", "//ortools/util:sorted_interval_list", + "@com_google_absl//absl/strings", ], ) diff --git a/ortools/util/python/sorted_interval_list.cc b/ortools/util/python/sorted_interval_list.cc index 55e82d5444..d2a15ec55e 100644 --- a/ortools/util/python/sorted_interval_list.cc +++ b/ortools/util/python/sorted_interval_list.cc @@ -15,6 +15,7 @@ #include +#include "absl/strings/str_cat.h" #include "ortools/util/python/sorted_interval_list_doc.h" #include "pybind11/cast.h" #include "pybind11/pybind11.h" @@ -57,6 +58,10 @@ PYBIND11_MODULE(sorted_interval_list, m) { .def("union_with", &Domain::UnionWith, DOC(operations_research, Domain, UnionWith), arg("domain")) .def("__str__", &Domain::ToString) + .def("__repr__", + [](const Domain& domain) { + return absl::StrCat("Domain(", domain.ToString(), ")"); + }) // Compatibility with pre PEP8 APIs. .def_static("AllValues", &Domain::AllValues, DOC(operations_research, Domain, AllValues)) diff --git a/ortools/util/python/sorted_interval_list_test.py b/ortools/util/python/sorted_interval_list_test.py index b33eb7d878..2f03099842 100755 --- a/ortools/util/python/sorted_interval_list_test.py +++ b/ortools/util/python/sorted_interval_list_test.py @@ -85,6 +85,11 @@ class SortedIntervalListTest(absltest.TestCase): self.assertEqual([-9223372036854775808, 5], d1.flattened_intervals()) self.assertEqual([6, 9223372036854775807], d2.flattened_intervals()) + def testStr(self): + d1 = sorted_interval_list.Domain(0, 5) + self.assertEqual(str(d1), "[0,5]") + self.assertEqual(repr(d1), "Domain([0,5])") + if __name__ == "__main__": absltest.main() From 59d9c4acdd7730542e8dff9a7aac6646f0a1d3dd Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Sat, 5 Oct 2024 11:44:53 +0200 Subject: [PATCH 045/105] fz accepts -i --- ortools/flatzinc/cp-sat.msc.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ortools/flatzinc/cp-sat.msc.in b/ortools/flatzinc/cp-sat.msc.in index 82bfbf842a..f06573430c 100644 --- a/ortools/flatzinc/cp-sat.msc.in +++ b/ortools/flatzinc/cp-sat.msc.in @@ -6,7 +6,7 @@ "mznlib": "../cp-sat", "executable": "@FZ_REL_INSTALL_BINARY@", "tags": ["cp-sat", "cp", "lcg", "int"], - "stdFlags": ["-a", "-f", "-p", "-r", "-s", "-v"], + "stdFlags": ["-a", "-i", "-f", "-p", "-r", "-s", "-v"], "extraFlags": [ ["--params", "Provide parameters interpreted as a text SatParameters proto", "string", ""] ], From b9c5e21289826d5a3d8678dd02c6a991621b0121 Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Mon, 7 Oct 2024 08:40:04 +0200 Subject: [PATCH 046/105] sat: Export from google3 --- ortools/base/parse_text_proto.h | 27 + ortools/glop/revised_simplex.cc | 109 ++-- ortools/glop/revised_simplex.h | 36 +- ortools/glop/variables_info.cc | 10 + ortools/glop/variables_info.h | 6 + ortools/lp_data/lp_data_utils.cc | 134 ++++- ortools/lp_data/lp_data_utils.h | 35 +- ortools/lp_data/sparse.cc | 16 +- ortools/lp_data/sparse.h | 4 + ortools/sat/2d_rectangle_presolve_test.cc | 5 +- ortools/sat/BUILD.bazel | 22 + ortools/sat/cp_model_checker.cc | 6 +- ortools/sat/cp_model_expand.cc | 3 +- ortools/sat/cp_model_lns.cc | 158 +++++- ortools/sat/cp_model_lns.h | 26 +- ortools/sat/cp_model_presolve.cc | 75 +++ ortools/sat/cp_model_solver.cc | 6 + ortools/sat/feasibility_jump.cc | 3 +- ortools/sat/feasibility_pump.cc | 4 +- ortools/sat/linear_constraint_manager.cc | 14 +- ortools/sat/linear_programming_constraint.cc | 323 ++++++++--- ortools/sat/linear_programming_constraint.h | 14 +- ortools/sat/lp_utils.cc | 4 +- ortools/sat/precedences.cc | 5 +- ortools/sat/probing.cc | 5 +- ortools/sat/sat_base.h | 5 +- ortools/sat/sat_inprocessing.cc | 7 +- ortools/sat/var_domination.cc | 4 +- ortools/sat/work_assignment_test.cc | 545 +++++++++++++++++++ 29 files changed, 1416 insertions(+), 195 deletions(-) create mode 100644 ortools/sat/work_assignment_test.cc diff --git a/ortools/base/parse_text_proto.h b/ortools/base/parse_text_proto.h index 625ad29ad1..5e78d3dfc3 100644 --- a/ortools/base/parse_text_proto.h +++ b/ortools/base/parse_text_proto.h @@ -14,6 +14,8 @@ #ifndef OR_TOOLS_BASE_PARSE_TEXT_PROTO_H_ #define OR_TOOLS_BASE_PARSE_TEXT_PROTO_H_ +#include + #include "absl/log/absl_check.h" #include "google/protobuf/message.h" #include "google/protobuf/text_format.h" @@ -32,6 +34,31 @@ T ParseTextOrDie(const std::string& input) { return result; } +namespace text_proto_internal { + +class ParseProtoHelper { + public: + explicit ParseProtoHelper(std::string_view asciipb) : asciipb_(asciipb) {} + template + operator T() { // NOLINT(runtime/explicit) + T result; + const bool ok = ::google::protobuf::TextFormat::TextFormat::ParseFromString( + asciipb_, &result); + CHECK(ok) << "Failed to parse text proto: " << asciipb_; + return result; + } + + private: + const std::string asciipb_; +}; + +} // namespace text_proto_internal + +text_proto_internal::ParseProtoHelper ParseTextProtoOrDie( + std::string_view input) { + return text_proto_internal::ParseProtoHelper(input); +} + } // namespace google::protobuf::contrib::parse_proto #endif // OR_TOOLS_BASE_PARSE_TEXT_PROTO_H_ diff --git a/ortools/glop/revised_simplex.cc b/ortools/glop/revised_simplex.cc index 88549afa30..2a7e20dc8a 100644 --- a/ortools/glop/revised_simplex.cc +++ b/ortools/glop/revised_simplex.cc @@ -154,29 +154,72 @@ void RevisedSimplex::SetStartingVariableValuesForNextSolve( variable_starting_values_ = values; } -void RevisedSimplex::NotifyThatMatrixIsUnchangedForNextSolve() { - notify_that_matrix_is_unchanged_ = true; -} +Status RevisedSimplex::MinimizeFromTransposedMatrixWithSlack( + const DenseRow& objective, Fractional objective_scaling_factor, + Fractional objective_offset, TimeLimit* time_limit) { + const double start_time = time_limit->GetElapsedTime(); + default_logger_.EnableLogging(parameters_.log_search_progress()); + default_logger_.SetLogToStdOut(parameters_.log_to_stdout()); + parameters_ = initial_parameters_; + PropagateParameters(); -void RevisedSimplex::NotifyThatMatrixIsChangedForNextSolve() { - notify_that_matrix_is_unchanged_ = false; + // The source of truth is the transposed matrix. + if (transpose_was_changed_) { + compact_matrix_.PopulateFromTranspose(transposed_matrix_); + num_rows_ = compact_matrix_.num_rows(); + num_cols_ = compact_matrix_.num_cols(); + first_slack_col_ = num_cols_ - RowToColIndex(num_rows_); + } + + DCHECK_EQ(num_cols_, objective.size()); + + // Copy objective + objective_scaling_factor_ = objective_scaling_factor; + objective_offset_ = objective_offset; + const bool objective_is_unchanged = objective_ == objective; + objective_ = objective; + InitializeObjectiveLimit(); + + // Initialize variable infos from the mutated bounds. + variables_info_.InitializeFromMutatedState(); + + if (objective_is_unchanged && parameters_.use_dual_simplex() && + !transpose_was_changed_ && !solution_state_has_been_set_externally_ && + !solution_state_.IsEmpty()) { + // Fast track if we just changed variable bounds. + primal_edge_norms_.Clear(); + variables_info_.InitializeFromBasisState(first_slack_col_, ColIndex(0), + solution_state_); + variable_values_.ResetAllNonBasicVariableValues(variable_starting_values_); + variable_values_.RecomputeBasicVariableValues(); + return SolveInternal(start_time, false, objective, time_limit); + } else { + GLOP_RETURN_IF_ERROR(FinishInitialization(true)); + } + + return SolveInternal(start_time, false, objective, time_limit); } Status RevisedSimplex::Solve(const LinearProgram& lp, TimeLimit* time_limit) { - SCOPED_TIME_STAT(&function_stats_); + const double start_time = time_limit->GetElapsedTime(); + default_logger_.EnableLogging(parameters_.log_search_progress()); + default_logger_.SetLogToStdOut(parameters_.log_to_stdout()); + DCHECK(lp.IsCleanedUp()); + GLOP_RETURN_IF_ERROR(Initialize(lp)); + return SolveInternal(start_time, lp.IsMaximizationProblem(), + lp.objective_coefficients(), time_limit); +} + +ABSL_MUST_USE_RESULT Status RevisedSimplex::SolveInternal( + double start_time, bool is_maximization_problem, + const DenseRow& objective_coefficients, TimeLimit* time_limit) { + SCOPED_TIME_STAT(&function_stats_); GLOP_RETURN_ERROR_IF_NULL(time_limit); Cleanup update_deterministic_time_on_return( [this, time_limit]() { AdvanceDeterministicTime(time_limit); }); - default_logger_.EnableLogging(parameters_.log_search_progress()); - default_logger_.SetLogToStdOut(parameters_.log_to_stdout()); SOLVER_LOG(logger_, ""); - - // Initialization. Note That Initialize() must be called first since it - // analyzes the current solver state. - const double start_time = time_limit->GetElapsedTime(); - GLOP_RETURN_IF_ERROR(Initialize(lp)); if (logger_->LoggingIsEnabled()) { DisplayBasicVariableStatistics(); } @@ -310,7 +353,13 @@ Status RevisedSimplex::Solve(const LinearProgram& lp, TimeLimit* time_limit) { // After the primal phase I, we need to restore the objective. if (problem_status_ != ProblemStatus::PRIMAL_INFEASIBLE) { - InitializeObjectiveAndTestIfUnchanged(lp); + objective_ = objective_coefficients; + if (is_maximization_problem) { + for (Fractional& value : objective_) { + value = -value; + } + } + objective_.resize(num_cols_, 0.0); // For the slack. reduced_costs_.ResetForNewObjective(); } } @@ -639,7 +688,7 @@ Status RevisedSimplex::Solve(const LinearProgram& lp, TimeLimit* time_limit) { solution_reduced_costs_ = reduced_costs_.GetReducedCosts(); SaveState(); - if (lp.IsMaximizationProblem()) { + if (is_maximization_problem) { ChangeSign(&solution_dual_values_); ChangeSign(&solution_reduced_costs_); } @@ -650,7 +699,7 @@ Status RevisedSimplex::Solve(const LinearProgram& lp, TimeLimit* time_limit) { solution_objective_value_ = (problem_status_ == ProblemStatus::DUAL_UNBOUNDED) ? kInfinity : -kInfinity; - if (lp.IsMaximizationProblem()) { + if (is_maximization_problem) { solution_objective_value_ = -solution_objective_value_; } } @@ -1379,21 +1428,13 @@ Status RevisedSimplex::Initialize(const LinearProgram& lp) { ColIndex num_new_cols(0); bool only_change_is_new_rows = false; bool only_change_is_new_cols = false; - bool matrix_is_unchanged = true; - bool only_new_bounds = false; - if (solution_state_.IsEmpty() || !notify_that_matrix_is_unchanged_) { - matrix_is_unchanged = InitializeMatrixAndTestIfUnchanged( - lp, lp_is_in_equation_form, &only_change_is_new_rows, - &only_change_is_new_cols, &num_new_cols); - only_new_bounds = only_change_is_new_cols && num_new_cols > 0 && - OldBoundsAreUnchangedAndNewVariablesHaveOneBoundAtZero( - lp, lp_is_in_equation_form, num_new_cols); - } else if (DEBUG_MODE) { - CHECK(InitializeMatrixAndTestIfUnchanged( - lp, lp_is_in_equation_form, &only_change_is_new_rows, - &only_change_is_new_cols, &num_new_cols)); - } - notify_that_matrix_is_unchanged_ = false; + const bool matrix_is_unchanged = InitializeMatrixAndTestIfUnchanged( + lp, lp_is_in_equation_form, &only_change_is_new_rows, + &only_change_is_new_cols, &num_new_cols); + const bool only_new_bounds = + only_change_is_new_cols && num_new_cols > 0 && + OldBoundsAreUnchangedAndNewVariablesHaveOneBoundAtZero( + lp, lp_is_in_equation_form, num_new_cols); // TODO(user): move objective with ReducedCosts class. const bool objective_is_unchanged = InitializeObjectiveAndTestIfUnchanged(lp); @@ -1509,6 +1550,10 @@ Status RevisedSimplex::Initialize(const LinearProgram& lp) { } } + return FinishInitialization(solve_from_scratch); +} + +Status RevisedSimplex::FinishInitialization(bool solve_from_scratch) { // If we couldn't perform a "quick" warm start above, we can at least try to // reuse the variable statuses. if (solve_from_scratch && !solution_state_.IsEmpty()) { @@ -1589,6 +1634,8 @@ Status RevisedSimplex::Initialize(const LinearProgram& lp) { SOLVER_LOG(logger_, "Starting basis: incremental solve."); } DCHECK(BasisIsConsistent()); + + transpose_was_changed_ = false; return Status::OK(); } diff --git a/ortools/glop/revised_simplex.h b/ortools/glop/revised_simplex.h index a4fa4b51ad..90e55698e4 100644 --- a/ortools/glop/revised_simplex.h +++ b/ortools/glop/revised_simplex.h @@ -170,14 +170,6 @@ class RevisedSimplex { // variables. void SetStartingVariableValuesForNextSolve(const DenseRow& values); - // Advanced usage. Tells the next Solve() that the matrix inside the linear - // program will not change compared to the one used the last time Solve() was - // called. This allows to bypass the somewhat costly check of comparing both - // matrices. Note that this call will be ignored if Solve() was never called - // or if ClearStateForNextSolve() was called. - void NotifyThatMatrixIsUnchangedForNextSolve(); - void NotifyThatMatrixIsChangedForNextSolve(); - // Getters to retrieve all the information computed by the last Solve(). RowIndex GetProblemNumRows() const; ColIndex GetProblemNumCols() const; @@ -252,6 +244,24 @@ class RevisedSimplex { void SetLogger(SolverLogger* logger) { logger_ = logger; } + // Advanced usage. For fast incremental call to the solver, it is better not + // to use LinearProgram at all. This api allows to directly modify the + // internal data of glop and then call solve. + const CompactSparseMatrix& MatrixWithSlack() const { return compact_matrix_; } + CompactSparseMatrix* MutableTransposedMatrixWithSlack() { + transpose_was_changed_ = true; + return &transposed_matrix_; + } + DenseRow* MutableLowerBounds() { + return variables_info_.MutableLowerBounds(); + } + DenseRow* MutableUpperBounds() { + return variables_info_.MutableUpperBounds(); + } + ABSL_MUST_USE_RESULT Status MinimizeFromTransposedMatrixWithSlack( + const DenseRow& objective, Fractional objective_scaling_factor, + Fractional objective_offset, TimeLimit* time_limit); + private: struct IterationStats : public StatsGroup { IterationStats() @@ -303,6 +313,10 @@ class RevisedSimplex { FINAL_CHECK }; + ABSL_MUST_USE_RESULT Status SolveInternal(double start_time, bool maximize, + const DenseRow& objective, + TimeLimit* time_limit); + // Propagates parameters_ to all the other classes that need it. // // TODO(user): Maybe a better design is for them to have a reference to a @@ -427,6 +441,7 @@ class RevisedSimplex { // Entry point for the solver initialization. ABSL_MUST_USE_RESULT Status Initialize(const LinearProgram& lp); + ABSL_MUST_USE_RESULT Status FinishInitialization(bool solve_from_scratch); // Saves the current variable statuses in solution_state_. void SaveState(); @@ -715,9 +730,8 @@ class RevisedSimplex { // If this is cleared, we assume they are none. DenseRow variable_starting_values_; - // Flag used by NotifyThatMatrixIsUnchangedForNextSolve() and changing - // the behavior of Initialize(). - bool notify_that_matrix_is_unchanged_ = false; + // See MutableTransposedMatrixWithSlack(). + bool transpose_was_changed_ = false; // This is known as 'd' in the literature and is set during each pivot to the // right inverse of the basic entering column of A by ComputeDirection(). diff --git a/ortools/glop/variables_info.cc b/ortools/glop/variables_info.cc index d100f15eea..dd5b0d8d71 100644 --- a/ortools/glop/variables_info.cc +++ b/ortools/glop/variables_info.cc @@ -46,6 +46,16 @@ bool VariablesInfo::LoadBoundsAndReturnTrueIfUnchanged( return false; } +void VariablesInfo::InitializeFromMutatedState() { + const ColIndex num_cols = matrix_.num_cols(); + DCHECK_EQ(num_cols, lower_bounds_.size()); + DCHECK_EQ(num_cols, upper_bounds_.size()); + variable_type_.resize(num_cols, VariableType::UNCONSTRAINED); + for (ColIndex col(0); col < num_cols; ++col) { + variable_type_[col] = ComputeVariableType(col); + } +} + bool VariablesInfo::LoadBoundsAndReturnTrueIfUnchanged( const DenseRow& variable_lower_bounds, const DenseRow& variable_upper_bounds, diff --git a/ortools/glop/variables_info.h b/ortools/glop/variables_info.h index 2a8fa4f7e6..944d39cfc4 100644 --- a/ortools/glop/variables_info.h +++ b/ortools/glop/variables_info.h @@ -174,6 +174,12 @@ class VariablesInfo { void EndDualPhaseI(Fractional dual_feasibility_tolerance, DenseRow::ConstView reduced_costs); + // Advanced incremental API to reuse directly the internal storage. + // This saves two copy per solves, and only matter on large easy problems. + void InitializeFromMutatedState(); + DenseRow* MutableLowerBounds() { return &lower_bounds_; } + DenseRow* MutableUpperBounds() { return &upper_bounds_; } + private: // Computes the initial/default variable status from its type. A constrained // variable is set to the lowest of its 2 bounds in absolute value. diff --git a/ortools/lp_data/lp_data_utils.cc b/ortools/lp_data/lp_data_utils.cc index 25d03a176a..126121e65b 100644 --- a/ortools/lp_data/lp_data_utils.cc +++ b/ortools/lp_data/lp_data_utils.cc @@ -13,7 +13,12 @@ #include "ortools/lp_data/lp_data_utils.h" +#include +#include +#include + #include "absl/log/check.h" +#include "absl/types/span.h" #include "ortools/glop/parameters.pb.h" #include "ortools/lp_data/lp_data.h" #include "ortools/lp_data/lp_types.h" @@ -84,14 +89,40 @@ void Scale(LinearProgram* lp, SparseMatrixScaler* scaler, void LpScalingHelper::Scale(LinearProgram* lp) { Scale(GlopParameters(), lp); } void LpScalingHelper::Scale(const GlopParameters& params, LinearProgram* lp) { - scaler_.Clear(); - ::operations_research::glop::Scale(lp, &scaler_, params.scaling_method()); + SparseMatrixScaler scaler; + ::operations_research::glop::Scale(lp, &scaler, params.scaling_method()); bound_scaling_factor_ = 1.0 / lp->ScaleBounds(); objective_scaling_factor_ = 1.0 / lp->ScaleObjective(params.cost_scaling()); + + matrix_is_scaled_ = true; + row_unscaling_factors_ = scaler.row_scales(); + col_unscaling_factors_ = scaler.col_scales(); + + // It is possible the scaler didn't do anything. + // we still allocate the vector though since we don't test that below. + row_unscaling_factors_.resize(lp->num_constraints(), 1.0); + col_unscaling_factors_.resize(lp->num_variables(), 1.0); +} + +void LpScalingHelper::ConfigureFromFactors( + absl::Span row_factors, + absl::Span col_factors) { + matrix_is_scaled_ = true; + const RowIndex num_rows(row_factors.size()); + row_unscaling_factors_.resize(num_rows, 1.0); + for (RowIndex row(0); row < num_rows; ++row) { + row_unscaling_factors_[row] = 1.0 / row_factors[row.value()]; + } + + const ColIndex num_cols(col_factors.size()); + col_unscaling_factors_.resize(num_cols, 1.0); + for (ColIndex col(0); col < num_cols; ++col) { + col_unscaling_factors_[col] = 1.0 / col_factors[col.value()]; + } } void LpScalingHelper::Clear() { - scaler_.Clear(); + matrix_is_scaled_ = false; bound_scaling_factor_ = 1.0; objective_scaling_factor_ = 1.0; } @@ -99,71 +130,81 @@ void LpScalingHelper::Clear() { Fractional LpScalingHelper::VariableScalingFactor(ColIndex col) const { // During scaling a col was multiplied by ColScalingFactor() and the variable // bounds divided by it. - return scaler_.ColUnscalingFactor(col) * bound_scaling_factor_; + return ColUnscalingFactor(col) * bound_scaling_factor_; +} + +Fractional LpScalingHelper::VariableScalingFactorWithSlack(ColIndex col) const { + if (!matrix_is_scaled_) return bound_scaling_factor_; + const ColIndex num_cols = col_unscaling_factors_.size(); + if (col < num_cols) { + return col_unscaling_factors_[col] * bound_scaling_factor_; + } + return row_unscaling_factors_[ColToRowIndex(col - num_cols)] * + bound_scaling_factor_; } Fractional LpScalingHelper::ScaleVariableValue(ColIndex col, Fractional value) const { - return value * scaler_.ColUnscalingFactor(col) * bound_scaling_factor_; + return value * ColUnscalingFactor(col) * bound_scaling_factor_; } Fractional LpScalingHelper::ScaleReducedCost(ColIndex col, Fractional value) const { // The reduced cost move like the objective and the col scale. - return value / scaler_.ColUnscalingFactor(col) * objective_scaling_factor_; + return value / ColUnscalingFactor(col) * objective_scaling_factor_; } Fractional LpScalingHelper::ScaleDualValue(RowIndex row, Fractional value) const { // The dual value move like the objective and the inverse of the row scale. - return value * (scaler_.RowUnscalingFactor(row) * objective_scaling_factor_); + return value * (RowUnscalingFactor(row) * objective_scaling_factor_); } Fractional LpScalingHelper::ScaleConstraintActivity(RowIndex row, Fractional value) const { // The activity move with the row_scale and the bound_scaling_factor. - return value / scaler_.RowUnscalingFactor(row) * bound_scaling_factor_; + return value / RowUnscalingFactor(row) * bound_scaling_factor_; } Fractional LpScalingHelper::UnscaleVariableValue(ColIndex col, Fractional value) const { // Just the opposite of ScaleVariableValue(). - return value / (scaler_.ColUnscalingFactor(col) * bound_scaling_factor_); + return value / (ColUnscalingFactor(col) * bound_scaling_factor_); } Fractional LpScalingHelper::UnscaleReducedCost(ColIndex col, Fractional value) const { // The reduced cost move like the objective and the col scale. - return value * scaler_.ColUnscalingFactor(col) / objective_scaling_factor_; + return value * ColUnscalingFactor(col) / objective_scaling_factor_; } Fractional LpScalingHelper::UnscaleDualValue(RowIndex row, Fractional value) const { // The dual value move like the objective and the inverse of the row scale. - return value / (scaler_.RowUnscalingFactor(row) * objective_scaling_factor_); + return value / (RowUnscalingFactor(row) * objective_scaling_factor_); } Fractional LpScalingHelper::UnscaleConstraintActivity(RowIndex row, Fractional value) const { // The activity move with the row_scale and the bound_scaling_factor. - return value * scaler_.RowUnscalingFactor(row) / bound_scaling_factor_; + return value * RowUnscalingFactor(row) / bound_scaling_factor_; } void LpScalingHelper::UnscaleUnitRowLeftSolve( ColIndex basis_col, ScatteredRow* left_inverse) const { - const Fractional global_factor = scaler_.ColUnscalingFactor(basis_col); + const Fractional global_factor = ColUnscalingFactor(basis_col); // We have left_inverse * [RowScale * B * ColScale] = unit_row. if (left_inverse->non_zeros.empty()) { const ColIndex num_rows = left_inverse->values.size(); for (ColIndex col(0); col < num_rows; ++col) { left_inverse->values[col] /= - scaler_.RowUnscalingFactor(ColToRowIndex(col)) * global_factor; + RowUnscalingFactor(ColToRowIndex(col)) * global_factor; } } else { for (const ColIndex col : left_inverse->non_zeros) { left_inverse->values[col] /= - scaler_.RowUnscalingFactor(ColToRowIndex(col)) * global_factor; + RowUnscalingFactor(ColToRowIndex(col)) * global_factor; } } } @@ -171,7 +212,7 @@ void LpScalingHelper::UnscaleUnitRowLeftSolve( void LpScalingHelper::UnscaleColumnRightSolve( const RowToColMapping& basis, ColIndex col, ScatteredColumn* right_inverse) const { - const Fractional global_factor = scaler_.ColScalingFactor(col); + const Fractional global_factor = 1.0 / ColUnscalingFactor(col); // [RowScale * B * BColScale] * inverse = RowScale * column * ColScale. // That is B * (BColScale * inverse) = column * ColScale[col]. @@ -179,15 +220,72 @@ void LpScalingHelper::UnscaleColumnRightSolve( const RowIndex num_rows = right_inverse->values.size(); for (RowIndex row(0); row < num_rows; ++row) { right_inverse->values[row] /= - scaler_.ColUnscalingFactor(basis[row]) * global_factor; + ColUnscalingFactor(basis[row]) * global_factor; } } else { for (const RowIndex row : right_inverse->non_zeros) { right_inverse->values[row] /= - scaler_.ColUnscalingFactor(basis[row]) * global_factor; + ColUnscalingFactor(basis[row]) * global_factor; } } } +void LpScalingHelper::AverageCostScaling(DenseRow* objective) { + Fractional sum = 0.0; + int num_terms = 0; + for (const Fractional f : *objective) { + if (f == 0) continue; + ++num_terms; + sum += std::abs(f); + } + if (num_terms == 0) { + objective_scaling_factor_ = 1.0; + return; + } + + const Fractional average = sum / static_cast(num_terms); + objective_scaling_factor_ = 1.0 / average; + for (Fractional& f : *objective) { + f *= objective_scaling_factor_; + } +} + +void LpScalingHelper::ContainOneBoundScaling(DenseRow* upper_bounds, + DenseRow* lower_bounds) { + const double infinity = std::numeric_limits::infinity(); + Fractional min_magnitude = infinity; + Fractional max_magnitude = 0.0; + for (const Fractional f : *lower_bounds) { + const Fractional m = std::abs(f); + if (m == 0 || m == infinity) continue; + min_magnitude = std::min(min_magnitude, m); + max_magnitude = std::max(max_magnitude, m); + } + for (const Fractional f : *upper_bounds) { + const Fractional m = std::abs(f); + if (m == 0 || m == infinity) continue; + min_magnitude = std::min(min_magnitude, m); + max_magnitude = std::max(max_magnitude, m); + } + + bound_scaling_factor_ = 1.0; + if (min_magnitude != infinity) { + CHECK_LE(min_magnitude, max_magnitude); + if (min_magnitude > 1.0) { + bound_scaling_factor_ = 1.0 / min_magnitude; + } else if (max_magnitude < 1.0) { + bound_scaling_factor_ = 1.0 / max_magnitude; + } + } + + if (bound_scaling_factor_ == 1.0) return; + for (Fractional& f : *lower_bounds) { + f *= bound_scaling_factor_; + } + for (Fractional& f : *upper_bounds) { + f *= bound_scaling_factor_; + } +} + } // namespace glop } // namespace operations_research diff --git a/ortools/lp_data/lp_data_utils.h b/ortools/lp_data/lp_data_utils.h index d63eb52a3e..37e94249ce 100644 --- a/ortools/lp_data/lp_data_utils.h +++ b/ortools/lp_data/lp_data_utils.h @@ -51,12 +51,14 @@ void Scale(LinearProgram* lp, SparseMatrixScaler* scaler); // sense to have a single place where all the scaling formulas are kept. class LpScalingHelper { public: + // Clear all scaling coefficients. + void Clear(); + // Scale the given LP. void Scale(LinearProgram* lp); void Scale(const GlopParameters& params, LinearProgram* lp); - - // Clear all scaling coefficients. - void Clear(); + void ConfigureFromFactors(absl::Span row_factors, + absl::Span col_factors); // Transforms value from unscaled domain to the scaled one. Fractional ScaleVariableValue(ColIndex col, Fractional value) const; @@ -83,18 +85,37 @@ class LpScalingHelper { // to be in the scaled domain. Fractional VariableScalingFactor(ColIndex col) const; - // Visible for testing. All objective coefficients of the original LP where - // multiplied by this factor. Nothing else changed. - Fractional BoundsScalingFactor() const { return bound_scaling_factor_; } + // Same as VariableScalingFactor() except that ColIndex greater than the + // number of columns will be interpreted as "slack" variable whose scaling + // factor depends on the row. + Fractional VariableScalingFactorWithSlack(ColIndex col) const; + + // Extra scaling function, to scale objective/bounds. + void AverageCostScaling(DenseRow* objective); + void ContainOneBoundScaling(DenseRow* upper_bounds, DenseRow* lower_bounds); // Visible for testing. All variable/constraint bounds of the original LP // where multiplied by this factor. Nothing else changed. + Fractional BoundsScalingFactor() const { return bound_scaling_factor_; } + + // Visible for testing. All objective coefficients of the original LP where + // multiplied by this factor. Nothing else changed. Fractional ObjectiveScalingFactor() const { return objective_scaling_factor_; } private: - SparseMatrixScaler scaler_; + Fractional RowUnscalingFactor(RowIndex row) const { + return matrix_is_scaled_ ? row_unscaling_factors_[row] : 1.0; + } + Fractional ColUnscalingFactor(ColIndex col) const { + return matrix_is_scaled_ ? col_unscaling_factors_[col] : 1.0; + } + + bool matrix_is_scaled_ = false; + DenseColumn row_unscaling_factors_; + DenseRow col_unscaling_factors_; + Fractional bound_scaling_factor_ = 1.0; Fractional objective_scaling_factor_ = 1.0; }; diff --git a/ortools/lp_data/sparse.cc b/ortools/lp_data/sparse.cc index 1928aa7d55..585744ecd9 100644 --- a/ortools/lp_data/sparse.cc +++ b/ortools/lp_data/sparse.cc @@ -577,6 +577,17 @@ void TriangularMatrix::Reset(RowIndex num_rows, ColIndex col_capacity) { starts_[ColIndex(0)] = 0; } +void CompactSparseMatrix::AddEntryToCurrentColumn(RowIndex row, + Fractional coeff) { + rows_.push_back(row); + coefficients_.push_back(coeff); +} + +void CompactSparseMatrix::CloseCurrentColumn() { + starts_.push_back(rows_.size()); + ++num_cols_; +} + ColIndex CompactSparseMatrix::AddDenseColumn(const DenseColumn& dense_column) { return AddDenseColumnPrefix(dense_column.const_view(), RowIndex(0)); } @@ -832,6 +843,7 @@ void TriangularMatrix::UpperSolveInternal(DenseColumn::View rhs) const { const auto entry_rows = rows_.view(); const auto entry_coefficients = coefficients_.view(); const auto diagonal_coefficients = diagonal_coefficients_.view(); + const auto starts = starts_.view(); for (ColIndex col(diagonal_coefficients.size() - 1); col >= end; --col) { const Fractional value = rhs[ColToRowIndex(col)]; if (value == 0.0) continue; @@ -844,8 +856,8 @@ void TriangularMatrix::UpperSolveInternal(DenseColumn::View rhs) const { // It is faster to iterate this way (instead of i : Column(col)) because of // cache locality. Note that the floating-point computations are exactly the // same in both cases. - const EntryIndex i_end = starts_[col]; - for (EntryIndex i(starts_[col + 1] - 1); i >= i_end; --i) { + const EntryIndex i_end = starts[col]; + for (EntryIndex i(starts[col + 1] - 1); i >= i_end; --i) { rhs[entry_rows[i]] -= coeff * entry_coefficients[i]; } } diff --git a/ortools/lp_data/sparse.h b/ortools/lp_data/sparse.h index 96c4571444..19053dba32 100644 --- a/ortools/lp_data/sparse.h +++ b/ortools/lp_data/sparse.h @@ -369,6 +369,10 @@ class CompactSparseMatrix { // Add*() functions below. void Reset(RowIndex num_rows); + // Api to add columns one at the time. + void AddEntryToCurrentColumn(RowIndex row, Fractional coeff); + void CloseCurrentColumn(); + // Adds a dense column to the CompactSparseMatrix (only the non-zero will be // actually stored). This work in O(input.size()) and returns the index of the // added column. diff --git a/ortools/sat/2d_rectangle_presolve_test.cc b/ortools/sat/2d_rectangle_presolve_test.cc index 899dc9cd37..20700a826d 100644 --- a/ortools/sat/2d_rectangle_presolve_test.cc +++ b/ortools/sat/2d_rectangle_presolve_test.cc @@ -48,13 +48,14 @@ using ::testing::IsEmpty; std::vector BuildFromAsciiArt(std::string_view input) { std::vector rectangles; std::vector lines = absl::StrSplit(input, '\n'); + const int num_lines = lines.size(); for (int i = 0; i < lines.size(); i++) { for (int j = 0; j < lines[i].size(); j++) { if (lines[i][j] != ' ') { rectangles.push_back({.x_min = j, .x_max = j + 1, - .y_min = 2 * lines.size() - 2 * i, - .y_max = 2 * lines.size() - 2 * i + 2}); + .y_min = 2 * num_lines - 2 * i, + .y_max = 2 * num_lines - 2 * i + 2}); } } } diff --git a/ortools/sat/BUILD.bazel b/ortools/sat/BUILD.bazel index 5e1b5ba3d7..e4d9c5c1f9 100644 --- a/ortools/sat/BUILD.bazel +++ b/ortools/sat/BUILD.bazel @@ -2146,6 +2146,7 @@ cc_library( "//ortools/lp_data:base", "//ortools/lp_data:lp_data_utils", "//ortools/lp_data:scattered_vector", + "//ortools/lp_data:sparse", "//ortools/lp_data:sparse_column", "//ortools/util:bitset", "//ortools/util:rev", @@ -2156,6 +2157,7 @@ cc_library( "@com_google_absl//absl/log:check", "@com_google_absl//absl/numeric:int128", "@com_google_absl//absl/strings", + "@com_google_absl//absl/strings:str_format", "@com_google_absl//absl/types:span", ], ) @@ -3404,6 +3406,26 @@ cc_library( ], ) +cc_test( + name = "work_assignment_test", + srcs = ["work_assignment_test.cc"], + deps = [ + ":cp_model", + ":cp_model_cc_proto", + ":cp_model_checker", + ":cp_model_loader", + ":cp_model_solver", + ":integer", + ":model", + ":sat_parameters_cc_proto", + ":synchronization", + ":work_assignment", + "//ortools/base:gmock_main", + "//ortools/base:parse_text_proto", + "@com_google_absl//absl/strings:string_view", + ], +) + cc_test( name = "inclusion_test", size = "small", diff --git a/ortools/sat/cp_model_checker.cc b/ortools/sat/cp_model_checker.cc index 926d58d04a..8645013cd0 100644 --- a/ortools/sat/cp_model_checker.cc +++ b/ortools/sat/cp_model_checker.cc @@ -854,9 +854,9 @@ std::string ValidateSolutionHint(const CpModelProto& model) { if (hint.vars().size() != hint.values().size()) { return "Invalid solution hint: vars and values do not have the same size."; } - for (const int ref : hint.vars()) { - if (!VariableReferenceIsValid(model, ref)) { - return absl::StrCat("Invalid variable reference in solution hint: ", ref); + for (const int var : hint.vars()) { + if (!VariableIndexIsValid(model, var)) { + return absl::StrCat("Invalid variable in solution hint: ", var); } } diff --git a/ortools/sat/cp_model_expand.cc b/ortools/sat/cp_model_expand.cc index e9cceb9d4d..eaa97cdc80 100644 --- a/ortools/sat/cp_model_expand.cc +++ b/ortools/sat/cp_model_expand.cc @@ -1768,7 +1768,8 @@ void CompressAndExpandPositiveTable(ConstraintProto* ct, } } - VLOG(2) << "Table compression" << " var=" << vars.size() + VLOG(2) << "Table compression" + << " var=" << vars.size() << " cost=" << domain_sizes.size() - vars.size() << " tuples= " << num_tuples_before_compression << " -> " << num_tuples_after_first_compression << " -> " diff --git a/ortools/sat/cp_model_lns.cc b/ortools/sat/cp_model_lns.cc index a3ccabe003..48404abdfd 100644 --- a/ortools/sat/cp_model_lns.cc +++ b/ortools/sat/cp_model_lns.cc @@ -448,7 +448,7 @@ std::vector NeighborhoodGeneratorHelper::GetActiveIntervals( initial_solution); } -std::vector> +std::vector NeighborhoodGeneratorHelper::GetActiveRectangles( const CpSolverResponse& initial_solution) const { const std::vector active_intervals = @@ -456,7 +456,7 @@ NeighborhoodGeneratorHelper::GetActiveRectangles( const absl::flat_hash_set active_intervals_set(active_intervals.begin(), active_intervals.end()); - std::vector> active_rectangles; + absl::flat_hash_map, std::vector> active_rectangles; for (const int ct_index : TypeToConstraints(ConstraintProto::kNoOverlap2D)) { const NoOverlap2DConstraintProto& ct = model_proto_.constraints(ct_index).no_overlap_2d(); @@ -465,12 +465,20 @@ NeighborhoodGeneratorHelper::GetActiveRectangles( const int y_i = ct.y_intervals(i); if (active_intervals_set.contains(x_i) || active_intervals_set.contains(y_i)) { - active_rectangles.push_back({x_i, y_i}); + active_rectangles[{x_i, y_i}].push_back(ct_index); } } } - return active_rectangles; + std::vector results; + for (const auto& [rectangle, no_overlap_2d_constraints] : active_rectangles) { + ActiveRectangle& result = results.emplace_back(); + result.x_interval = rectangle.first; + result.y_interval = rectangle.second; + result.no_overlap_2d_constraints = {no_overlap_2d_constraints.begin(), + no_overlap_2d_constraints.end()}; + } + return results; } std::vector> @@ -1718,8 +1726,8 @@ Neighborhood DecompositionGraphNeighborhoodGenerator::Generate( VLOG(2) << "#relaxed " << relaxed_variables.size() << " #zero_score " << num_zero_score << " max_width " << max_width << " (size,min_width)_after_100 (" << size_at_min_width_after_100 - << "," << min_width_after_100 << ") " << " final_width " - << pq.Size(); + << "," << min_width_after_100 << ") " + << " final_width " << pq.Size(); } return helper_.RelaxGivenVariables(initial_solution, relaxed_variables); @@ -2256,14 +2264,125 @@ Neighborhood SchedulingResourceWindowsNeighborhoodGenerator::Generate( Neighborhood RandomRectanglesPackingNeighborhoodGenerator::Generate( const CpSolverResponse& initial_solution, SolveData& data, absl::BitGenRef random) { - std::vector> rectangles_to_freeze = + std::vector rectangles_to_freeze = helper_.GetActiveRectangles(initial_solution); GetRandomSubset(1.0 - data.difficulty, &rectangles_to_freeze, random); absl::flat_hash_set variables_to_freeze; - for (const auto& [x, y] : rectangles_to_freeze) { - InsertVariablesFromConstraint(helper_.ModelProto(), x, variables_to_freeze); - InsertVariablesFromConstraint(helper_.ModelProto(), y, variables_to_freeze); + for (const ActiveRectangle& rectangle : rectangles_to_freeze) { + InsertVariablesFromConstraint(helper_.ModelProto(), rectangle.x_interval, + variables_to_freeze); + InsertVariablesFromConstraint(helper_.ModelProto(), rectangle.y_interval, + variables_to_freeze); + } + + return helper_.FixGivenVariables(initial_solution, variables_to_freeze); +} + +Neighborhood RectanglesPackingRelaxTwoNeighborhoodsGenerator::Generate( + const CpSolverResponse& initial_solution, SolveData& data, + absl::BitGenRef random) { + // First pick a pair of rectangles. + std::vector all_active_rectangles = + helper_.GetActiveRectangles(initial_solution); + if (all_active_rectangles.size() <= 2) return helper_.FullNeighborhood(); + + const int first_idx = + absl::Uniform(random, 0, all_active_rectangles.size()); + int second_idx = + absl::Uniform(random, 0, all_active_rectangles.size() - 1); + if (second_idx >= first_idx) { + second_idx++; + } + + const ActiveRectangle& chosen_rectangle_1 = all_active_rectangles[first_idx]; + const ActiveRectangle& chosen_rectangle_2 = all_active_rectangles[second_idx]; + + const auto get_rectangle = [&initial_solution, helper = &helper_]( + const ActiveRectangle& rectangle) { + const int x_interval_idx = rectangle.x_interval; + const int y_interval_idx = rectangle.y_interval; + const ConstraintProto& x_interval_ct = + helper->ModelProto().constraints(x_interval_idx); + const ConstraintProto& y_interval_ct = + helper->ModelProto().constraints(y_interval_idx); + return Rectangle{.x_min = GetLinearExpressionValue( + x_interval_ct.interval().start(), initial_solution), + .x_max = GetLinearExpressionValue( + x_interval_ct.interval().end(), initial_solution), + .y_min = GetLinearExpressionValue( + y_interval_ct.interval().start(), initial_solution), + .y_max = GetLinearExpressionValue( + y_interval_ct.interval().end(), initial_solution)}; + }; + + // TODO(user): This computes the distance between the center of the + // rectangles. We could use the real distance between the closest points, but + // not sure it is worth the extra complexity. + const auto compute_rectangle_distance = [](const Rectangle& rect1, + const Rectangle& rect2) { + return (static_cast(rect1.x_min.value()) + rect1.x_max.value() - + rect2.x_min.value() - rect2.x_max.value()) * + (static_cast(rect1.y_min.value()) + rect1.y_max.value() - + rect2.y_min.value() - rect2.y_max.value()); + }; + const Rectangle rect1 = get_rectangle(chosen_rectangle_1); + const Rectangle rect2 = get_rectangle(chosen_rectangle_2); + + // Now compute a neighborhood around each rectangle. Note that we only + // consider two rectangles as potential neighbors if they are part of the same + // no_overlap_2d constraint. + absl::flat_hash_set variables_to_freeze; + std::vector> distances1; + std::vector> distances2; + distances1.reserve(all_active_rectangles.size()); + distances2.reserve(all_active_rectangles.size()); + for (int i = 0; i < all_active_rectangles.size(); ++i) { + const ActiveRectangle& rectangle = all_active_rectangles[i]; + InsertVariablesFromConstraint(helper_.ModelProto(), rectangle.x_interval, + variables_to_freeze); + InsertVariablesFromConstraint(helper_.ModelProto(), rectangle.y_interval, + variables_to_freeze); + + const Rectangle rect = get_rectangle(rectangle); + const bool same_no_overlap_as_rect1 = + absl::c_any_of(chosen_rectangle_1.no_overlap_2d_constraints, + [&rectangle](const int c) { + return rectangle.no_overlap_2d_constraints.contains(c); + }); + const bool same_no_overlap_as_rect2 = + absl::c_any_of(chosen_rectangle_2.no_overlap_2d_constraints, + [&rectangle](const int c) { + return rectangle.no_overlap_2d_constraints.contains(c); + }); + if (same_no_overlap_as_rect1) { + distances1.push_back({i, compute_rectangle_distance(rect1, rect)}); + } + if (same_no_overlap_as_rect2) { + distances2.push_back({i, compute_rectangle_distance(rect2, rect)}); + } + } + const int num_to_sample_each = + data.difficulty * all_active_rectangles.size() / 2; + std::sort(distances1.begin(), distances1.end(), + [](const auto& a, const auto& b) { return a.second < b.second; }); + std::sort(distances2.begin(), distances2.end(), + [](const auto& a, const auto& b) { return a.second < b.second; }); + absl::flat_hash_set variables_to_relax; + for (auto& samples : {distances1, distances2}) { + const int num_potential_samples = samples.size(); + for (int i = 0; i < std::min(num_potential_samples, num_to_sample_each); + ++i) { + const int rectangle_idx = samples[i].first; + const ActiveRectangle& rectangle = all_active_rectangles[rectangle_idx]; + InsertVariablesFromConstraint(helper_.ModelProto(), rectangle.x_interval, + variables_to_relax); + InsertVariablesFromConstraint(helper_.ModelProto(), rectangle.y_interval, + variables_to_relax); + } + } + for (const int v : variables_to_relax) { + variables_to_freeze.erase(v); } return helper_.FixGivenVariables(initial_solution, variables_to_freeze); @@ -2272,13 +2391,13 @@ Neighborhood RandomRectanglesPackingNeighborhoodGenerator::Generate( Neighborhood RandomPrecedencesPackingNeighborhoodGenerator::Generate( const CpSolverResponse& initial_solution, SolveData& data, absl::BitGenRef random) { - std::vector> rectangles_to_relax = + std::vector rectangles_to_relax = helper_.GetActiveRectangles(initial_solution); GetRandomSubset(data.difficulty, &rectangles_to_relax, random); std::vector intervals_to_relax; - for (const auto& [x, y] : rectangles_to_relax) { - intervals_to_relax.push_back(x); - intervals_to_relax.push_back(y); + for (const ActiveRectangle& rect : rectangles_to_relax) { + intervals_to_relax.push_back(rect.x_interval); + intervals_to_relax.push_back(rect.y_interval); } gtl::STLSortAndRemoveDuplicates(&intervals_to_relax); @@ -2289,13 +2408,14 @@ Neighborhood RandomPrecedencesPackingNeighborhoodGenerator::Generate( Neighborhood SlicePackingNeighborhoodGenerator::Generate( const CpSolverResponse& initial_solution, SolveData& data, absl::BitGenRef random) { - const std::vector> active_rectangles = + const std::vector active_rectangles = helper_.GetActiveRectangles(initial_solution); const bool use_first_dimension = absl::Bernoulli(random, 0.5); std::vector projected_intervals; projected_intervals.reserve(active_rectangles.size()); - for (const auto& [x, y] : active_rectangles) { - projected_intervals.push_back(use_first_dimension ? x : y); + for (const ActiveRectangle& rect : active_rectangles) { + projected_intervals.push_back(use_first_dimension ? rect.x_interval + : rect.y_interval); } const TimePartition partition = PartitionIndicesAroundRandomTimeWindow( @@ -2310,10 +2430,10 @@ Neighborhood SlicePackingNeighborhoodGenerator::Generate( for (int index = 0; index < active_rectangles.size(); ++index) { if (indices_to_fix[index]) { InsertVariablesFromConstraint(helper_.ModelProto(), - active_rectangles[index].first, + active_rectangles[index].x_interval, variables_to_freeze); InsertVariablesFromConstraint(helper_.ModelProto(), - active_rectangles[index].second, + active_rectangles[index].y_interval, variables_to_freeze); } } diff --git a/ortools/sat/cp_model_lns.h b/ortools/sat/cp_model_lns.h index dd92ff04d3..00856cbe1c 100644 --- a/ortools/sat/cp_model_lns.h +++ b/ortools/sat/cp_model_lns.h @@ -219,7 +219,14 @@ class NeighborhoodGeneratorHelper : public SubSolver { // lns_focus_on_performed_intervals. If true, this method returns the list of // performed rectangles in the solution. If false, it returns all rectangles // of the model. - std::vector> GetActiveRectangles( + struct ActiveRectangle { + int x_interval; + int y_interval; + // The set of no_overlap_2d constraints that both x_interval and y_interval + // are participating in. + absl::flat_hash_set no_overlap_2d_constraints; + }; + std::vector GetActiveRectangles( const CpSolverResponse& initial_solution) const; // Returns the set of unique intervals list appearing in a no_overlap, @@ -356,6 +363,8 @@ class NeighborhoodGenerator { : name_(name), helper_(*helper), difficulty_(0.5) {} virtual ~NeighborhoodGenerator() = default; + using ActiveRectangle = NeighborhoodGeneratorHelper::ActiveRectangle; + // Adds solve data about one "solved" neighborhood. struct SolveData { // The status of the sub-solve. @@ -706,6 +715,21 @@ class RandomRectanglesPackingNeighborhoodGenerator SolveData& data, absl::BitGenRef random) final; }; +// Only make sense for problems with no_overlap_2d constraints. This selects two +// random rectangles and relax them alongside the closest rectangles to each one +// of them. The idea is that this will find a better solution when there is a +// cost function that would be improved by swapping the two rectangles. +class RectanglesPackingRelaxTwoNeighborhoodsGenerator + : public NeighborhoodGenerator { + public: + explicit RectanglesPackingRelaxTwoNeighborhoodsGenerator( + NeighborhoodGeneratorHelper const* helper, absl::string_view name) + : NeighborhoodGenerator(name, helper) {} + + Neighborhood Generate(const CpSolverResponse& initial_solution, + SolveData& data, absl::BitGenRef random) final; +}; + // Only make sense for problems with no_overlap_2d constraints. This select a // random set of rectangles (i.e. a pair of intervals) of the problem according // to the difficulty. Then add all implied precedences from the current diff --git a/ortools/sat/cp_model_presolve.cc b/ortools/sat/cp_model_presolve.cc index 4b6720e106..1a1eeb358d 100644 --- a/ortools/sat/cp_model_presolve.cc +++ b/ortools/sat/cp_model_presolve.cc @@ -1704,6 +1704,60 @@ bool CpModelPresolver::PresolveIntProd(ConstraintProto* ct) { } } + if (ct->int_prod().exprs().size() == 2) { + const auto is_boolean_affine = + [context = context_](const LinearExpressionProto& expr) { + return expr.vars().size() == 1 && context->MinOf(expr.vars(0)) == 0 && + context->MaxOf(expr.vars(0)) == 1; + }; + const LinearExpressionProto* boolean_linear = nullptr; + const LinearExpressionProto* other_linear = nullptr; + if (is_boolean_affine(ct->int_prod().exprs(0))) { + boolean_linear = &ct->int_prod().exprs(0); + other_linear = &ct->int_prod().exprs(1); + } else if (is_boolean_affine(ct->int_prod().exprs(1))) { + boolean_linear = &ct->int_prod().exprs(1); + other_linear = &ct->int_prod().exprs(0); + } + if (boolean_linear) { + // We have: + // (u + b * v) * other_expr = B, where `b` is a boolean variable. + // + // We can rewrite this as: + // u * other_expr = B, if b = false; + // (u + v) * other_expr = B, if b = true + ConstraintProto* constraint_for_false = + context_->working_model->add_constraints(); + ConstraintProto* constraint_for_true = + context_->working_model->add_constraints(); + constraint_for_true->add_enforcement_literal(boolean_linear->vars(0)); + constraint_for_false->add_enforcement_literal( + NegatedRef(boolean_linear->vars(0))); + LinearConstraintProto* linear_for_false = + constraint_for_false->mutable_linear(); + LinearConstraintProto* linear_for_true = + constraint_for_true->mutable_linear(); + + linear_for_false->add_domain(0); + linear_for_false->add_domain(0); + AddLinearExpressionToLinearConstraint( + *other_linear, boolean_linear->offset(), linear_for_false); + AddLinearExpressionToLinearConstraint(ct->int_prod().target(), -1, + linear_for_false); + + linear_for_true->add_domain(0); + linear_for_true->add_domain(0); + AddLinearExpressionToLinearConstraint( + *other_linear, boolean_linear->offset() + boolean_linear->coeffs(0), + linear_for_true); + AddLinearExpressionToLinearConstraint(ct->int_prod().target(), -1, + linear_for_true); + context_->UpdateRuleStats("int_prod: boolean affine term"); + context_->UpdateNewConstraintsVariableUsage(); + return RemoveConstraint(ct); + } + } + // For now, we only presolve the case where all variables are Booleans. const LinearExpressionProto target_expr = ct->int_prod().target(); int target; @@ -12472,6 +12526,27 @@ void CopyEverythingExceptVariablesAndConstraintsFieldsIntoContext( } if (in_model.has_solution_hint()) { *context->working_model->mutable_solution_hint() = in_model.solution_hint(); + + // We make sure the hint is within the variables domain. + // + // This allows to avoid overflow because we know evaluating constraints on + // the variables domains should be safe thanks to the initial validation. + const int num_terms = in_model.solution_hint().vars().size(); + for (int i = 0; i < num_terms; ++i) { + const int var = in_model.solution_hint().vars(i); + const int64_t value = in_model.solution_hint().values(i); + const auto& domain = in_model.variables(var).domain(); + if (domain.empty()) continue; // UNSAT. + const int64_t min = domain[0]; + const int64_t max = domain[domain.size() - 1]; + if (value < min) { + context->UpdateRuleStats("hint: moved var hint within its domain."); + context->working_model->mutable_solution_hint()->set_values(i, min); + } else if (value > max) { + context->working_model->mutable_solution_hint()->set_values(i, max); + context->UpdateRuleStats("hint: moved var hint within its domain."); + } + } } } diff --git a/ortools/sat/cp_model_solver.cc b/ortools/sat/cp_model_solver.cc index 728524ed36..dd60f67fa3 100644 --- a/ortools/sat/cp_model_solver.cc +++ b/ortools/sat/cp_model_solver.cc @@ -1663,6 +1663,12 @@ void SolveCpModelParallel(SharedClasses* shared, Model* global_model) { helper, name_filter.LastName()), lns_params, helper, shared)); } + if (name_filter.Keep("packing_swap_lns")) { + reentrant_interleaved_subsolvers.push_back(std::make_unique( + std::make_unique( + helper, name_filter.LastName()), + lns_params, helper, shared)); + } if (name_filter.Keep("packing_precedences_lns")) { reentrant_interleaved_subsolvers.push_back(std::make_unique( std::make_unique( diff --git a/ortools/sat/feasibility_jump.cc b/ortools/sat/feasibility_jump.cc index 8219baf393..1083b3e6e0 100644 --- a/ortools/sat/feasibility_jump.cc +++ b/ortools/sat/feasibility_jump.cc @@ -86,7 +86,8 @@ bool JumpTable::JumpIsUpToDate(int var) const { if (abs(score - scores_[var]) / std::max(abs(score), 1.0) > 1e-2) { score_ok = false; LOG(ERROR) << "Incorrect score for var " << var << ": " << scores_[var] - << " (should be " << score << ") " << " delta = " << delta; + << " (should be " << score << ") " + << " delta = " << delta; } return delta == deltas_[var] && score_ok; } diff --git a/ortools/sat/feasibility_pump.cc b/ortools/sat/feasibility_pump.cc index 5ff2c7dd85..9a0cb4a761 100644 --- a/ortools/sat/feasibility_pump.cc +++ b/ortools/sat/feasibility_pump.cc @@ -360,14 +360,14 @@ void FeasibilityPump::L1DistanceMinimize() { const ColIndex norm_lhs_slack_variable = lp_data_.GetSlackVariable(norm_lhs_constraints_[col]); const double lhs_scaling_factor = - scaler_.VariableScalingFactor(norm_lhs_slack_variable); + scaler_.VariableScalingFactorWithSlack(norm_lhs_slack_variable); lp_data_.SetVariableBounds( norm_lhs_slack_variable, -glop::kInfinity, lhs_scaling_factor * integer_solution_[col.value()]); const ColIndex norm_rhs_slack_variable = lp_data_.GetSlackVariable(norm_rhs_constraints_[col]); const double rhs_scaling_factor = - scaler_.VariableScalingFactor(norm_rhs_slack_variable); + scaler_.VariableScalingFactorWithSlack(norm_rhs_slack_variable); lp_data_.SetVariableBounds( norm_rhs_slack_variable, -glop::kInfinity, -rhs_scaling_factor * integer_solution_[col.value()]); diff --git a/ortools/sat/linear_constraint_manager.cc b/ortools/sat/linear_constraint_manager.cc index 1f5288284e..ec2d67e595 100644 --- a/ortools/sat/linear_constraint_manager.cc +++ b/ortools/sat/linear_constraint_manager.cc @@ -92,6 +92,15 @@ bool LinearConstraintManager::MaybeRemoveSomeInactiveConstraints( int new_size = 0; for (int i = 0; i < num_rows; ++i) { const ConstraintIndex constraint_index = lp_constraints_[i]; + if (constraint_infos_[constraint_index].constraint.num_terms == 0) { + // Remove empty constraint. + // + // TODO(user): If the constraint is infeasible we could detect unsat + // right away, but hopefully this is a case where the propagation part + // of the solver can detect that too. + constraint_infos_[constraint_index].is_in_lp = false; + continue; + } // Constraints that are not tight in the current solution have a basic // status. We remove the ones that have been inactive in the last recent @@ -245,7 +254,8 @@ bool LinearConstraintManager::AddCut(LinearConstraint ct, std::string type_name, // Only add cut with sufficient efficacy. if (violation / l2_norm < 1e-4) { - VLOG(3) << "BAD Cut '" << type_name << "'" << " size=" << ct.num_terms + VLOG(3) << "BAD Cut '" << type_name << "'" + << " size=" << ct.num_terms << " max_magnitude=" << ComputeInfinityNorm(ct) << " norm=" << l2_norm << " violation=" << violation << " eff=" << violation / l2_norm << " " << extra_info; @@ -764,6 +774,8 @@ bool LinearConstraintManager::ChangeLp(glop::BasisState* solution_state, void LinearConstraintManager::AddAllConstraintsToLp() { for (ConstraintIndex i(0); i < constraint_infos_.size(); ++i) { if (constraint_infos_[i].is_in_lp) continue; + if (constraint_infos_[i].constraint.num_terms == 0) continue; + constraint_infos_[i].is_in_lp = true; lp_constraints_.push_back(i); } diff --git a/ortools/sat/linear_programming_constraint.cc b/ortools/sat/linear_programming_constraint.cc index 63237ee2f7..d308b1ae9d 100644 --- a/ortools/sat/linear_programming_constraint.cc +++ b/ortools/sat/linear_programming_constraint.cc @@ -31,6 +31,7 @@ #include "absl/log/check.h" #include "absl/numeric/int128.h" #include "absl/strings/str_cat.h" +#include "absl/strings/str_format.h" #include "absl/strings/string_view.h" #include "absl/types/span.h" #include "ortools/algorithms/binary_search.h" @@ -45,6 +46,7 @@ #include "ortools/lp_data/lp_data_utils.h" #include "ortools/lp_data/lp_types.h" #include "ortools/lp_data/scattered_vector.h" +#include "ortools/lp_data/sparse.h" #include "ortools/lp_data/sparse_column.h" #include "ortools/sat/cp_model_mapping.h" #include "ortools/sat/cuts.h" @@ -373,8 +375,6 @@ void LinearProgrammingConstraint::SetObjectiveCoefficient(IntegerVariable ivar, // for TSP for instance where the number of edges is large, but only a small // fraction will be used in the optimal solution. bool LinearProgrammingConstraint::CreateLpFromConstraintManager() { - simplex_.NotifyThatMatrixIsChangedForNextSolve(); - // Fill integer_lp_. integer_lp_.clear(); integer_lp_cols_.clear(); @@ -384,6 +384,10 @@ bool LinearProgrammingConstraint::CreateLpFromConstraintManager() { const auto& all_constraints = constraint_manager_.AllConstraints(); for (const auto index : constraint_manager_.LpConstraints()) { const LinearConstraint& ct = all_constraints[index].constraint; + if (ct.lb > ct.ub) { + VLOG(1) << "Trivial infeasible bound in an LP constraint"; + return false; + } integer_lp_.push_back(LinearConstraintInternal()); LinearConstraintInternal& new_ct = integer_lp_.back(); @@ -391,16 +395,15 @@ bool LinearProgrammingConstraint::CreateLpFromConstraintManager() { new_ct.ub = ct.ub; new_ct.lb_is_trivial = all_constraints[index].lb_is_trivial; new_ct.ub_is_trivial = all_constraints[index].ub_is_trivial; - const int size = ct.num_terms; - if (ct.lb > ct.ub) { - VLOG(1) << "Trivial infeasible bound in an LP constraint"; - return false; - } IntegerValue infinity_norm = 0; infinity_norm = std::max(infinity_norm, IntTypeAbs(ct.lb)); infinity_norm = std::max(infinity_norm, IntTypeAbs(ct.ub)); new_ct.start_in_buffer = integer_lp_cols_.size(); + + // TODO(user): Make sure we don't have empty constraint! + // this currently can happen in some corner cases. + const int size = ct.num_terms; new_ct.num_terms = size; for (int i = 0; i < size; ++i) { // We only use positive variable inside this class. @@ -418,12 +421,6 @@ bool LinearProgrammingConstraint::CreateLpFromConstraintManager() { integer_lp_cols_.data() + new_ct.start_in_buffer + new_ct.num_terms)); } - // Copy the integer_lp_ into lp_data_. - lp_data_.Clear(); - for (int i = 0; i < integer_variables_.size(); ++i) { - CHECK_EQ(glop::ColIndex(i), lp_data_.CreateNewVariable()); - } - // We remove fixed variables from the objective. This should help the LP // scaling, but also our integer reason computation. int new_size = 0; @@ -438,49 +435,26 @@ bool LinearProgrammingConstraint::CreateLpFromConstraintManager() { objective_infinity_norm_ = std::max(objective_infinity_norm_, IntTypeAbs(entry.second)); integer_objective_[new_size++] = entry; - lp_data_.SetObjectiveCoefficient(entry.first, ToDouble(entry.second)); } + integer_objective_.resize(new_size); objective_infinity_norm_ = std::max(objective_infinity_norm_, IntTypeAbs(integer_objective_offset_)); - integer_objective_.resize(new_size); - lp_data_.SetObjectiveOffset(ToDouble(integer_objective_offset_)); - - for (const LinearConstraintInternal& ct : integer_lp_) { - const ConstraintIndex row = lp_data_.CreateNewConstraint(); - - // TODO(user): Using trivial bound might be good for things like - // sum bool <= 1 since setting the slack in [0, 1] can lead to bound flip in - // the simplex. However if the bound is large, maybe it make more sense to - // use +/- infinity. - const double infinity = std::numeric_limits::infinity(); - lp_data_.SetConstraintBounds( - row, ct.lb_is_trivial ? -infinity : ToDouble(ct.lb), - ct.ub_is_trivial ? +infinity : ToDouble(ct.ub)); - for (int i = 0; i < ct.num_terms; ++i) { - const int index = ct.start_in_buffer + i; - lp_data_.SetCoefficient(row, integer_lp_cols_[index], - ToDouble(integer_lp_coeffs_[index])); - } - } - lp_data_.NotifyThatColumnsAreClean(); - - // We scale the LP using the level zero bounds that we later override - // with the current ones. - // - // TODO(user): As part of the scaling, we may also want to shift the initial - // variable bounds so that each variable contain the value zero in their - // domain. Maybe just once and for all at the beginning. - const int num_vars = integer_variables_.size(); - for (int i = 0; i < num_vars; i++) { - const IntegerVariable cp_var = integer_variables_[i]; - const double lb = ToDouble(integer_trail_->LevelZeroLowerBound(cp_var)); - const double ub = ToDouble(integer_trail_->LevelZeroUpperBound(cp_var)); - lp_data_.SetVariableBounds(glop::ColIndex(i), lb, ub); - } + // Scale everything. // TODO(user): As we have an idea of the LP optimal after the first solves, // maybe we can adapt the scaling accordingly. - scaler_.Scale(simplex_params_, &lp_data_); + ComputeIntegerLpScalingFactors(); + + // Tricky: we use level zero bounds here for the second scaling step below. + FillLpData(); + + // Fills the helper. + scaler_.ConfigureFromFactors(row_factors_, col_factors_); + scaler_.AverageCostScaling(&obj_with_slack_); + scaler_.ContainOneBoundScaling(simplex_.MutableLowerBounds(), + simplex_.MutableUpperBounds()); + + // Since we used level zero bounds above, fix them. UpdateBoundsOfLpVariables(); // Set the information for the step to polish the LP basis. All our variables @@ -488,6 +462,7 @@ bool LinearProgrammingConstraint::CreateLpFromConstraintManager() { // binary variables. if (parameters_.polish_lp_solution()) { simplex_.ClearIntegralityScales(); + const int num_vars = integer_variables_.size(); for (int i = 0; i < num_vars; ++i) { const IntegerVariable cp_var = integer_variables_[i]; const IntegerValue lb = integer_trail_->LevelZeroLowerBound(cp_var); @@ -499,13 +474,184 @@ bool LinearProgrammingConstraint::CreateLpFromConstraintManager() { } } - lp_data_.NotifyThatColumnsAreClean(); - VLOG(3) << "LP relaxation: " << lp_data_.GetDimensionString() << ". " + VLOG(3) << "LP relaxation: " << integer_lp_.size() << " x " + << integer_variables_.size() << ". " << constraint_manager_.AllConstraints().size() << " Managed constraints."; return true; } +// TODO(user): This is a duplicate of glop scaling code, but it allows to +// work directly on our representation... +void LinearProgrammingConstraint::ComputeIntegerLpScalingFactors() { + const int num_rows = integer_lp_.size(); + const int num_cols = integer_variables_.size(); + + // Assign vectors. + const double infinity = std::numeric_limits::infinity(); + row_factors_.assign(num_rows, 1.0); + col_factors_.assign(num_cols, 1.0); + + // Cache pointers to avoid refetching them. + IntegerValue* coeffs = integer_lp_coeffs_.data(); + glop::ColIndex* cols = integer_lp_cols_.data(); + double* row_factors = row_factors_.data(); + double* col_factors = col_factors_.data(); + + col_min_.assign(num_cols, infinity); + col_max_.assign(num_cols, 0.0); + double* col_min = col_min_.data(); + double* col_max = col_max_.data(); + + for (int i = 0; i < 4; ++i) { + // Scale row geometrically. + for (int row = 0; row < num_rows; ++row) { + double min_scaled = +infinity; + double max_scaled = 0.0; + const LinearConstraintInternal& ct = integer_lp_[RowIndex(row)]; + for (int i = 0; i < ct.num_terms; ++i) { + const int index = ct.start_in_buffer + i; + const int col = cols[index].value(); + const double coeff = static_cast(coeffs[index].value()); + const double scaled_magnitude = col_factors[col] * std::abs(coeff); + min_scaled = std::min(min_scaled, scaled_magnitude); + max_scaled = std::max(max_scaled, scaled_magnitude); + } + + if (ct.num_terms == 0) continue; + const Fractional factor(std::sqrt(max_scaled * min_scaled)); + row_factors[row] = 1.0 / factor; + } + + // Scale columns geometrically. + for (int row = 0; row < num_rows; ++row) { + const double row_factor = row_factors[row]; + const LinearConstraintInternal& ct = integer_lp_[RowIndex(row)]; + for (int i = 0; i < ct.num_terms; ++i) { + const int index = ct.start_in_buffer + i; + const int col = cols[index].value(); + const double coeff = static_cast(coeffs[index].value()); + const double scaled_magnitude = row_factor * std::abs(coeff); + col_min[col] = std::min(col_min[col], scaled_magnitude); + col_max[col] = std::max(col_max[col], scaled_magnitude); + } + } + for (int col = 0; col < num_cols; ++col) { + if (col_min[col] == infinity) continue; // Empty. + col_factors[col] = 1.0 / std::sqrt(col_min[col] * col_max[col]); + + // Reset, in case we have many fixed variable, faster than assign again. + col_min[col] = infinity; + col_max[col] = 0; + } + } + + // Now we equilibrate (i.e. just divide by the max) the row + for (int row = 0; row < num_rows; ++row) { + double max_scaled = 0.0; + const LinearConstraintInternal& ct = integer_lp_[RowIndex(row)]; + for (int i = 0; i < ct.num_terms; ++i) { + const int index = ct.start_in_buffer + i; + const int col = cols[index].value(); + const double coeff = static_cast(coeffs[index].value()); + const double scaled_magnitude = col_factors[col] * std::abs(coeff); + max_scaled = std::max(max_scaled, scaled_magnitude); + } + if (ct.num_terms == 0) continue; + row_factors[row] = 1.0 / max_scaled; + } + + // And finally the columns. + for (int row = 0; row < num_rows; ++row) { + const double row_factor = row_factors[row]; + const LinearConstraintInternal& ct = integer_lp_[RowIndex(row)]; + for (int i = 0; i < ct.num_terms; ++i) { + const int index = ct.start_in_buffer + i; + const int col = cols[index].value(); + const double coeff = static_cast(coeffs[index].value()); + const double scaled_magnitude = row_factor * std::abs(coeff); + col_max[col] = std::max(col_max[col], scaled_magnitude); + } + } + for (int col = 0; col < num_cols; ++col) { + if (col_max[col] == 0) continue; // Empty. + col_factors[col] = 1.0 / col_max[col]; + } +} + +void LinearProgrammingConstraint::FillLpData() { + const int num_rows = integer_lp_.size(); + const int num_cols = integer_variables_.size(); + IntegerValue* coeffs = integer_lp_coeffs_.data(); + glop::ColIndex* cols = integer_lp_cols_.data(); + double* row_factors = row_factors_.data(); + double* col_factors = col_factors_.data(); + + // Now fill the tranposed matrix + glop::CompactSparseMatrix* data = simplex_.MutableTransposedMatrixWithSlack(); + data->Reset(glop::RowIndex(num_cols + num_rows)); + for (int row = 0; row < num_rows; ++row) { + const LinearConstraintInternal& ct = integer_lp_[RowIndex(row)]; + const double row_factor = row_factors[row]; + for (int i = 0; i < ct.num_terms; ++i) { + const int index = ct.start_in_buffer + i; + const int col = cols[index].value(); + const double coeff = static_cast(coeffs[index].value()); + const double scaled_coeff = row_factor * col_factors[col] * coeff; + data->AddEntryToCurrentColumn(RowIndex(col), scaled_coeff); + } + + // Add slack. + data->AddEntryToCurrentColumn(RowIndex(num_cols + row), 1.0); + + // Close column. + data->CloseCurrentColumn(); + } + + // Fill and scale the objective. + const glop::ColIndex num_cols_with_slacks(num_rows + num_cols); + obj_with_slack_.assign(num_cols_with_slacks, 0.0); + for (const auto [col, value] : integer_objective_) { + obj_with_slack_[col] = ToDouble(value) * col_factors[col.value()]; + } + + // Fill and scales the bound. + simplex_.MutableLowerBounds()->resize(num_cols_with_slacks); + simplex_.MutableUpperBounds()->resize(num_cols_with_slacks); + Fractional* lb_with_slack = simplex_.MutableLowerBounds()->data(); + Fractional* ub_with_slack = simplex_.MutableUpperBounds()->data(); + const double infinity = std::numeric_limits::infinity(); + for (int row = 0; row < integer_lp_.size(); ++row) { + const LinearConstraintInternal& ct = integer_lp_[glop::RowIndex(row)]; + + // TODO(user): Using trivial bound might be good for things like + // sum bool <= 1 since setting the slack in [0, 1] can lead to bound flip in + // the simplex. However if the bound is large, maybe it make more sense to + // use +/- infinity. + const double factor = row_factors[row]; + lb_with_slack[num_cols + row] = + ct.ub_is_trivial ? -infinity : ToDouble(-ct.ub) * factor; + ub_with_slack[num_cols + row] = + ct.lb_is_trivial ? +infinity : ToDouble(-ct.lb) * factor; + } + + // We scale the LP using the level zero bounds that we later override + // with the current ones. + // + // TODO(user): As part of the scaling, we may also want to shift the initial + // variable bounds so that each variable contain the value zero in their + // domain. Maybe just once and for all at the beginning. + const int num_vars = integer_variables_.size(); + for (int i = 0; i < num_vars; i++) { + const IntegerVariable cp_var = integer_variables_[i]; + const double factor = col_factors[i]; + lb_with_slack[i] = + ToDouble(integer_trail_->LevelZeroLowerBound(cp_var)) * factor; + ub_with_slack[i] = + ToDouble(integer_trail_->LevelZeroUpperBound(cp_var)) * factor; + } +} + void LinearProgrammingConstraint::FillReducedCostReasonIn( const glop::DenseRow& reduced_costs, std::vector* integer_reason) { @@ -682,12 +828,17 @@ double LinearProgrammingConstraint::GetSolutionReducedCost( void LinearProgrammingConstraint::UpdateBoundsOfLpVariables() { const int num_vars = integer_variables_.size(); + Fractional* lb_with_slack = simplex_.MutableLowerBounds()->data(); + Fractional* ub_with_slack = simplex_.MutableUpperBounds()->data(); for (int i = 0; i < num_vars; i++) { const IntegerVariable cp_var = integer_variables_[i]; - const double lb = ToDouble(integer_trail_->LowerBound(cp_var)); - const double ub = ToDouble(integer_trail_->UpperBound(cp_var)); + const double lb = + static_cast(integer_trail_->LowerBound(cp_var).value()); + const double ub = + static_cast(integer_trail_->UpperBound(cp_var).value()); const double factor = scaler_.VariableScalingFactor(glop::ColIndex(i)); - lp_data_.SetVariableBounds(glop::ColIndex(i), lb * factor, ub * factor); + lb_with_slack[i] = lb * factor; + ub_with_slack[i] = ub * factor; } } @@ -697,7 +848,12 @@ bool LinearProgrammingConstraint::SolveLp() { lp_at_level_zero_is_final_ = false; } - const auto status = simplex_.Solve(lp_data_, time_limit_); + const double unscaling_factor = 1.0 / scaler_.ObjectiveScalingFactor(); + const double offset_before_unscaling = + ToDouble(integer_objective_offset_) * scaler_.ObjectiveScalingFactor(); + const auto status = simplex_.MinimizeFromTransposedMatrixWithSlack( + obj_with_slack_, unscaling_factor, offset_before_unscaling, time_limit_); + state_ = simplex_.GetState(); total_num_simplex_iterations_ += simplex_.GetNumberOfIterations(); if (!status.ok()) { @@ -711,19 +867,14 @@ bool LinearProgrammingConstraint::SolveLp() { << average_degeneracy_.CurrentAverage(); } - // By default we assume the matrix is unchanged. - // This will be reset by CreateLpFromConstraintManager(). - simplex_.NotifyThatMatrixIsUnchangedForNextSolve(); - const int status_as_int = static_cast(simplex_.GetProblemStatus()); if (status_as_int >= num_solves_by_status_.size()) { num_solves_by_status_.resize(status_as_int + 1); } num_solves_++; num_solves_by_status_[status_as_int]++; - VLOG(2) << lp_data_.GetDimensionString() - << " lvl:" << trail_->CurrentDecisionLevel() << " " - << simplex_.GetProblemStatus() + VLOG(2) << DimensionString() << " lvl:" << trail_->CurrentDecisionLevel() + << " " << simplex_.GetProblemStatus() << " iter:" << simplex_.GetNumberOfIterations() << " obj:" << simplex_.GetObjectiveValue() << " scaled:" << objective_definition_->ScaleObjective( @@ -1285,11 +1436,17 @@ void LinearProgrammingConstraint::AddCGCuts() { const bool old_gomory = true; // Note that the index is permuted and do not correspond to a row. - const RowIndex num_rows = lp_data_.num_constraints(); + const RowIndex num_rows(integer_lp_.size()); for (RowIndex index(0); index < num_rows; ++index) { if (time_limit_->LimitReached()) break; const ColIndex basis_col = simplex_.GetBasis(index); + + // If this variable is a slack, we ignore it. This is because the + // corresponding row is not tight under the given lp values. + if (old_gomory && basis_col >= integer_variables_.size()) continue; + + // TODO(user): If the variable is a slack, the unscaling is wrong! const Fractional lp_value = GetVariableValueAtCpScale(basis_col); // Only consider fractional basis element. We ignore element that are close @@ -1300,10 +1457,6 @@ void LinearProgrammingConstraint::AddCGCuts() { // also be just under it. if (std::abs(lp_value - std::round(lp_value)) < 0.01) continue; - // If this variable is a slack, we ignore it. This is because the - // corresponding row is not tight under the given lp values. - if (old_gomory && basis_col >= integer_variables_.size()) continue; - // TODO(user): Avoid code duplication between the sparse/dense path. tmp_lp_multipliers_.clear(); const glop::ScatteredRow& lambda = simplex_.GetUnitRowLeftInverse(index); @@ -1464,9 +1617,12 @@ void LinearProgrammingConstraint::AddMirCuts() { // We compute all the rows that are tight, these will be used as the base row // for the MIR_n procedure below. - const int num_rows = lp_data_.num_constraints().value(); + const int num_cols = integer_variables_.size(); + const int num_rows = integer_lp_.size(); std::vector> base_rows; util_intops::StrongVector row_weights(num_rows, 0.0); + Fractional* lb_with_slack = simplex_.MutableLowerBounds()->data(); + Fractional* ub_with_slack = simplex_.MutableUpperBounds()->data(); for (RowIndex row(0); row < num_rows; ++row) { // We only consider tight rows. // We use both the status and activity to have as much options as possible. @@ -1476,12 +1632,14 @@ void LinearProgrammingConstraint::AddMirCuts() { // cannot be good. const auto status = simplex_.GetConstraintStatus(row); const double activity = simplex_.GetConstraintActivity(row); - if (activity > lp_data_.constraint_upper_bounds()[row] - 1e-4 || + const double ct_lb = -ub_with_slack[num_cols + row.value()]; + const double ct_ub = -lb_with_slack[num_cols + row.value()]; + if (activity > ct_ub - 1e-4 || status == glop::ConstraintStatus::AT_UPPER_BOUND || status == glop::ConstraintStatus::FIXED_VALUE) { base_rows.push_back({row, IntegerValue(1)}); } - if (activity < lp_data_.constraint_lower_bounds()[row] + 1e-4 || + if (activity < ct_lb + 1e-4 || status == glop::ConstraintStatus::AT_LOWER_BOUND || status == glop::ConstraintStatus::FIXED_VALUE) { base_rows.push_back({row, IntegerValue(-1)}); @@ -1514,6 +1672,7 @@ void LinearProgrammingConstraint::AddMirCuts() { std::vector weights; util_intops::StrongVector used_rows; std::vector> integer_multipliers; + const auto matrix = simplex_.MatrixWithSlack().view(); for (const std::pair& entry : base_rows) { if (time_limit_->LimitReached()) break; if (dtime_num_entries > 1e7) break; @@ -1567,8 +1726,7 @@ void LinearProgrammingConstraint::AddMirCuts() { if (dense_cut[col] == 0) continue; max_magnitude = std::max(max_magnitude, IntTypeAbs(dense_cut[col])); - const int col_degree = - lp_data_.GetSparseColumn(col).num_entries().value(); + const int col_degree = matrix.ColumnNumEntries(col).value(); if (col_degree <= 1) continue; if (simplex_.GetVariableStatus(col) != glop::VariableStatus::BASIC) { continue; @@ -1592,8 +1750,9 @@ void LinearProgrammingConstraint::AddMirCuts() { // What rows can we add to eliminate var_to_eliminate? std::vector possible_rows; weights.clear(); - for (const auto entry : lp_data_.GetSparseColumn(var_to_eliminate)) { - const RowIndex row = entry.row(); + for (const auto entry_index : matrix.Column(var_to_eliminate)) { + const RowIndex row = matrix.EntryRow(entry_index); + const glop::Fractional coeff = matrix.EntryCoefficient(entry_index); // We disallow all the rows that contain a variable that we already // eliminated (or are about to). This mean that we choose rows that @@ -1608,14 +1767,14 @@ void LinearProgrammingConstraint::AddMirCuts() { // still be chosen after the tight-one in most situation. bool add_row = false; if (!integer_lp_[row].ub_is_trivial) { - if (entry.coefficient() > 0.0) { + if (coeff > 0.0) { if (dense_cut[var_to_eliminate] < 0) add_row = true; } else { if (dense_cut[var_to_eliminate] > 0) add_row = true; } } if (!integer_lp_[row].lb_is_trivial) { - if (entry.coefficient() > 0.0) { + if (coeff > 0.0) { if (dense_cut[var_to_eliminate] > 0) add_row = true; } else { if (dense_cut[var_to_eliminate] < 0) add_row = true; @@ -2409,11 +2568,8 @@ void LinearProgrammingConstraint::ReducedCostStrengtheningDeductions( double cp_objective_delta) { deductions_.clear(); - // TRICKY: while simplex_.GetObjectiveValue() use the objective scaling factor - // stored in the lp_data_, all the other functions like GetReducedCost() or - // GetVariableValue() do not. const double lp_objective_delta = - cp_objective_delta / lp_data_.objective_scaling_factor(); + cp_objective_delta / scaler_.ObjectiveScalingFactor(); const int num_vars = integer_variables_.size(); for (int i = 0; i < num_vars; i++) { const IntegerVariable cp_var = integer_variables_[i]; @@ -2596,5 +2752,10 @@ absl::Span LinearProgrammingConstraint::IntegerLpRowCoeffs( return {integer_lp_coeffs_.data() + start, num_terms}; } +std::string LinearProgrammingConstraint::DimensionString() const { + return absl::StrFormat("%d rows, %d columns, %d entries", integer_lp_.size(), + integer_variables_.size(), integer_lp_coeffs_.size()); +} + } // namespace sat } // namespace operations_research diff --git a/ortools/sat/linear_programming_constraint.h b/ortools/sat/linear_programming_constraint.h index ae98877708..bdf34fc686 100644 --- a/ortools/sat/linear_programming_constraint.h +++ b/ortools/sat/linear_programming_constraint.h @@ -190,7 +190,7 @@ class LinearProgrammingConstraint : public PropagatorInterface, const std::vector& integer_variables() const { return integer_variables_; } - std::string DimensionString() const { return lp_data_.GetDimensionString(); } + std::string DimensionString() const; // Returns a IntegerLiteral guided by the underlying LP constraints. // @@ -408,6 +408,15 @@ class LinearProgrammingConstraint : public PropagatorInterface, absl::Span IntegerLpRowCols(glop::RowIndex row) const; absl::Span IntegerLpRowCoeffs(glop::RowIndex row) const; + void ComputeIntegerLpScalingFactors(); + void FillLpData(); + + // For ComputeIntegerLpScalingFactors(). + std::vector row_factors_; + std::vector col_factors_; + std::vector col_max_; + std::vector col_min_; + // This epsilon is related to the precision of the value/reduced_cost returned // by the LP once they have been scaled back into the CP domain. So for large // domain or cost coefficient, we may have some issues. @@ -456,8 +465,9 @@ class LinearProgrammingConstraint : public PropagatorInterface, // Underlying LP solver API. glop::GlopParameters simplex_params_; glop::BasisState state_; - glop::LinearProgram lp_data_; + glop::DenseRow obj_with_slack_; glop::RevisedSimplex simplex_; + int64_t next_simplex_iter_ = 500; // For the scaling. diff --git a/ortools/sat/lp_utils.cc b/ortools/sat/lp_utils.cc index 847d64cf08..a95b8b70b1 100644 --- a/ortools/sat/lp_utils.cc +++ b/ortools/sat/lp_utils.cc @@ -1507,8 +1507,8 @@ bool ConvertBinaryMPModelProtoToBooleanProblem(const MPModelProto& mp_model, // Abort if the variable is not binary. if (!is_binary) { LOG(WARNING) << "The variable #" << var_id << " with name " - << mp_var.name() << " is not binary. " << "lb: " << lb - << " ub: " << ub; + << mp_var.name() << " is not binary. " + << "lb: " << lb << " ub: " << ub; return false; } } diff --git a/ortools/sat/precedences.cc b/ortools/sat/precedences.cc index 34b1559a0d..18b2a363d0 100644 --- a/ortools/sat/precedences.cc +++ b/ortools/sat/precedences.cc @@ -647,8 +647,9 @@ void PrecedencesPropagator::AddArc( // A self-arc is either plain SAT or plain UNSAT or it forces something on // the given offset_var or presence_literal_index. In any case it could be // presolved in something more efficient. - VLOG(1) << "Self arc! This could be presolved. " << "var:" << tail - << " offset:" << offset << " offset_var:" << offset_var + VLOG(1) << "Self arc! This could be presolved. " + << "var:" << tail << " offset:" << offset + << " offset_var:" << offset_var << " conditioned_by:" << presence_literals; } diff --git a/ortools/sat/probing.cc b/ortools/sat/probing.cc index 941c78c830..37d9b7d9d3 100644 --- a/ortools/sat/probing.cc +++ b/ortools/sat/probing.cc @@ -890,8 +890,9 @@ bool FailedLiteralProbingRound(ProbingOptions options, Model* model) { const bool limit_reached = time_limit->LimitReached() || time_limit->GetElapsedDeterministicTime() > limit; LOG_IF(INFO, options.log_info) - << "Probing. " << " num_probed: " << num_probed << " num_fixed: +" - << num_newly_fixed << " (" << num_fixed << "/" << num_variables << ")" + << "Probing. " + << " num_probed: " << num_probed << " num_fixed: +" << num_newly_fixed + << " (" << num_fixed << "/" << num_variables << ")" << " explicit_fix:" << num_explicit_fix << " num_conflicts:" << num_conflicts << " new_binary_clauses: " << num_new_binary diff --git a/ortools/sat/sat_base.h b/ortools/sat/sat_base.h index c342af7105..ced510384b 100644 --- a/ortools/sat/sat_base.h +++ b/ortools/sat/sat_base.h @@ -625,8 +625,9 @@ inline bool SatPropagator::PropagatePreconditionsAreSatisfied( if (propagation_trail_index_ < trail.Index() && trail.Info(trail[propagation_trail_index_].Variable()).level != trail.CurrentDecisionLevel()) { - LOG(INFO) << "Issue in '" << name_ << "':" << " propagation_trail_index_=" - << propagation_trail_index_ << " trail_.Index()=" << trail.Index() + LOG(INFO) << "Issue in '" << name_ << "':" + << " propagation_trail_index_=" << propagation_trail_index_ + << " trail_.Index()=" << trail.Index() << " level_at_propagation_index=" << trail.Info(trail[propagation_trail_index_].Variable()).level << " current_decision_level=" << trail.CurrentDecisionLevel(); diff --git a/ortools/sat/sat_inprocessing.cc b/ortools/sat/sat_inprocessing.cc index 1d6dd85cd2..24a7de25b8 100644 --- a/ortools/sat/sat_inprocessing.cc +++ b/ortools/sat/sat_inprocessing.cc @@ -704,8 +704,8 @@ bool StampingSimplifier::ComputeStampsForNextRound(bool log_info) { // TODO(user): compute some dtime, it is always zero currently. time_limit_->AdvanceDeterministicTime(dtime_); - LOG_IF(INFO, log_info) << "Prestamping." << " num_fixed: " << num_fixed_ - << " dtime: " << dtime_ + LOG_IF(INFO, log_info) << "Prestamping." + << " num_fixed: " << num_fixed_ << " dtime: " << dtime_ << " wtime: " << wall_timer.Get(); return true; } @@ -1259,7 +1259,8 @@ bool BoundedVariableElimination::DoOneRound(bool log_info) { dtime_ += 1e-8 * num_inspected_literals_; time_limit_->AdvanceDeterministicTime(dtime_); log_info |= VLOG_IS_ON(1); - LOG_IF(INFO, log_info) << "BVE." << " num_fixed: " + LOG_IF(INFO, log_info) << "BVE." + << " num_fixed: " << trail_->Index() - saved_trail_index << " num_simplified_literals: " << num_simplifications_ << " num_blocked_clauses_: " << num_blocked_clauses_ diff --git a/ortools/sat/var_domination.cc b/ortools/sat/var_domination.cc index f4368db2d5..ae589ac3e7 100644 --- a/ortools/sat/var_domination.cc +++ b/ortools/sat/var_domination.cc @@ -1304,8 +1304,8 @@ void ScanModelForDominanceDetection(PresolveContext& context, } } if (num_unconstrained_refs == 0 && num_dominated_refs == 0) return; - VLOG(1) << "Dominance:" << " num_unconstrained_refs=" - << num_unconstrained_refs + VLOG(1) << "Dominance:" + << " num_unconstrained_refs=" << num_unconstrained_refs << " num_dominated_refs=" << num_dominated_refs << " num_dominance_relations=" << num_dominance_relations; } diff --git a/ortools/sat/work_assignment_test.cc b/ortools/sat/work_assignment_test.cc new file mode 100644 index 0000000000..ba4a6d9533 --- /dev/null +++ b/ortools/sat/work_assignment_test.cc @@ -0,0 +1,545 @@ +// Copyright 2010-2024 Google LLC +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "ortools/sat/work_assignment.h" + +#include + +#include "absl/strings/string_view.h" +#include "gtest/gtest.h" +#include "ortools/base/gmock.h" +#include "ortools/base/parse_text_proto.h" +#include "ortools/sat/cp_model.h" +#include "ortools/sat/cp_model.pb.h" +#include "ortools/sat/cp_model_checker.h" +#include "ortools/sat/cp_model_loader.h" +#include "ortools/sat/cp_model_solver.h" +#include "ortools/sat/integer.h" +#include "ortools/sat/model.h" +#include "ortools/sat/sat_parameters.pb.h" +#include "ortools/sat/synchronization.h" + +namespace operations_research { +namespace sat { +namespace { + +TEST(ProtoTrailTest, PushLevel) { + ProtoTrail p; + p.PushLevel({0, 0}, 0, 1); + + EXPECT_EQ(p.MaxLevel(), 1); + EXPECT_EQ(p.Decision(1), ProtoLiteral(0, 0)); + EXPECT_EQ(p.ObjectiveLb(1), 0); +} + +TEST(ProtoTrailTest, AddImplications) { + ProtoTrail p; + p.PushLevel({0, 0}, 0, 1); + p.PushLevel({1, 0}, 1, 2); + p.PushLevel({2, 0}, 2, 3); + p.PushLevel({3, 0}, 2, 4); + + p.AddImplication(2, {5, 0}); + p.AddImplication(3, {6, 0}); + + EXPECT_THAT(p.Implications(2), testing::ElementsAre(ProtoLiteral(5, 0))); + EXPECT_THAT(p.Implications(3), testing::ElementsAre(ProtoLiteral(6, 0))); + p.SetLevelImplied(3); + EXPECT_THAT(p.Implications(2), + testing::UnorderedElementsAre( + ProtoLiteral(5, 0), ProtoLiteral(2, 0), ProtoLiteral(6, 0))); +} + +TEST(ProtoTrailTest, SetLevel1Implied) { + ProtoTrail p; + p.PushLevel({0, 0}, 0, 1); + p.PushLevel({1, 0}, 1, 2); + p.PushLevel({2, 0}, 2, 3); + + p.SetLevelImplied(1); + + EXPECT_THAT(p.NodeIds(0), testing::ElementsAre(1)); + EXPECT_THAT(p.NodeIds(1), testing::ElementsAre(2)); + EXPECT_THAT(p.NodeIds(2), testing::ElementsAre(3)); + EXPECT_EQ(p.MaxLevel(), 2); + EXPECT_EQ(p.Decision(1), ProtoLiteral(1, 0)); + EXPECT_EQ(p.Decision(2), ProtoLiteral(2, 0)); + EXPECT_EQ(p.ObjectiveLb(1), 1); + EXPECT_EQ(p.ObjectiveLb(2), 2); +} + +TEST(ProtoTrailTest, SetMidLevelImplied) { + ProtoTrail p; + p.PushLevel({0, 0}, 0, 1); + p.PushLevel({1, 0}, 1, 2); + p.PushLevel({2, 0}, 2, 3); + + p.SetLevelImplied(2); + + EXPECT_THAT(p.NodeIds(0), testing::IsEmpty()); + EXPECT_THAT(p.NodeIds(1), testing::ElementsAre(1, 2)); + EXPECT_THAT(p.NodeIds(2), testing::ElementsAre(3)); + EXPECT_EQ(p.MaxLevel(), 2); + EXPECT_EQ(p.Decision(1), ProtoLiteral(0, 0)); + EXPECT_EQ(p.Decision(2), ProtoLiteral(2, 0)); + EXPECT_EQ(p.ObjectiveLb(1), 1); + EXPECT_EQ(p.ObjectiveLb(2), 2); +} + +TEST(ProtoTrailTest, SetFinalLevelImplied) { + ProtoTrail p; + p.PushLevel({0, 0}, 0, 1); + p.PushLevel({1, 0}, 1, 2); + p.PushLevel({2, 0}, 2, 3); + + p.SetLevelImplied(3); + + EXPECT_THAT(p.NodeIds(0), testing::IsEmpty()); + EXPECT_THAT(p.NodeIds(1), testing::ElementsAre(1)); + EXPECT_THAT(p.NodeIds(2), testing::ElementsAre(2, 3)); + EXPECT_EQ(p.MaxLevel(), 2); + EXPECT_EQ(p.Decision(1), ProtoLiteral(0, 0)); + EXPECT_EQ(p.Decision(2), ProtoLiteral(1, 0)); + EXPECT_EQ(p.ObjectiveLb(1), 0); + EXPECT_EQ(p.ObjectiveLb(2), 2); +} + +TEST(ProtoTrailTest, SetMultiLevelImplied) { + ProtoTrail p; + p.PushLevel({0, 0}, 0, 1); + p.PushLevel({1, 0}, 1, 2); + p.PushLevel({2, 0}, 2, 3); + + p.SetLevelImplied(3); + p.SetLevelImplied(1); + + EXPECT_EQ(p.MaxLevel(), 1); + EXPECT_EQ(p.Decision(1), ProtoLiteral(1, 0)); + EXPECT_EQ(p.ObjectiveLb(1), 2); +} + +TEST(ProtoTrailTest, Clear) { + ProtoTrail p; + p.PushLevel({0, 0}, 0, 1); + p.PushLevel({1, 0}, 1, 2); + p.PushLevel({2, 0}, 2, 3); + + p.Clear(); + + EXPECT_EQ(p.MaxLevel(), 0); +} + +class SharedTreeSolveTest : public testing::TestWithParam { + public: + SatParameters GetParams() { + SatParameters params; + params.set_num_workers(4); + params.set_shared_tree_num_workers(4); + params.set_cp_model_presolve(false); + params.MergeFrom( + google::protobuf::contrib::parse_proto::ParseTextProtoOrDie( + GetParam())); + return params; + } +}; +INSTANTIATE_TEST_SUITE_P( + SharedTreeParams, SharedTreeSolveTest, + testing::Values("shared_tree_worker_enable_trail_sharing:false", + "shared_tree_worker_enable_trail_sharing:true")); + +TEST_P(SharedTreeSolveTest, SmokeTest) { + CpModelBuilder model_builder; + auto bool_var = model_builder.NewBoolVar(); + auto int_var = model_builder.NewIntVar({0, 7}); + model_builder.AddLessOrEqual(int_var, 3).OnlyEnforceIf(bool_var); + model_builder.Maximize(int_var + 5 * bool_var); + Model model; + SatParameters params = GetParams(); + model.Add(NewSatParameters(params)); + + CpSolverResponse response = SolveCpModel(model_builder.Build(), &model); + + EXPECT_EQ(model.GetOrCreate()->NumWorkers(), + params.shared_tree_num_workers()); + ASSERT_EQ(response.status(), OPTIMAL) + << "Validation: " << ValidateCpModel(model_builder.Build()); + EXPECT_EQ(response.objective_value(), 5 + 3); + EXPECT_EQ(SolutionBooleanValue(response, bool_var), true); + EXPECT_EQ(SolutionIntegerValue(response, int_var), 3); +} + +TEST_P(SharedTreeSolveTest, FeasiblePidgeonHoleSmokeTest) { + CpModelBuilder model_builder; + const int pidgeons = 10; + const int holes = 10; + std::vector count_per_hole(holes); + IntVar max_pidgeon_hole_product = + model_builder.NewIntVar({0, pidgeons * holes}); + for (int i = 0; i < pidgeons; ++i) { + LinearExpr count_per_pidgeon; + for (int j = 0; j < holes; ++j) { + auto var = model_builder.NewBoolVar(); + count_per_hole[j] += LinearExpr(var); + count_per_pidgeon += LinearExpr(var); + model_builder + .AddGreaterOrEqual(max_pidgeon_hole_product, (i + 1) * (j + 1)) + .OnlyEnforceIf(var); + } + model_builder.AddEquality(count_per_pidgeon, 1); + } + for (const auto& count : count_per_hole) { + model_builder.AddLessOrEqual(count, 1); + } + Model model; + SatParameters params = GetParams(); + model.Add(NewSatParameters(params)); + + CpSolverResponse response = SolveCpModel(model_builder.Build(), &model); + + EXPECT_EQ(model.GetOrCreate()->NumWorkers(), 4); + EXPECT_EQ(response.status(), OPTIMAL); +} + +TEST_P(SharedTreeSolveTest, InfeasiblePidgeonHoleSmokeTest) { + CpModelBuilder model_builder; + const int pidgeons = 10; + const int holes = 9; + std::vector count_per_hole(holes); + IntVar max_pidgeon_hole_product = + model_builder.NewIntVar({0, pidgeons * holes}); + for (int i = 0; i < pidgeons; ++i) { + LinearExpr count_per_pidgeon; + for (int j = 0; j < holes; ++j) { + auto var = model_builder.NewBoolVar(); + count_per_hole[j] += LinearExpr(var); + count_per_pidgeon += LinearExpr(var); + model_builder + .AddGreaterOrEqual(max_pidgeon_hole_product, (i + 1) * (j + 1)) + .OnlyEnforceIf(var); + } + model_builder.AddEquality(count_per_pidgeon, 1); + } + for (const auto& count : count_per_hole) { + model_builder.AddLessOrEqual(count, 1); + } + Model model; + SatParameters params = GetParams(); + model.Add(NewSatParameters(params)); + + CpSolverResponse response = SolveCpModel(model_builder.Build(), &model); + + EXPECT_EQ(model.GetOrCreate()->NumWorkers(), 4); + EXPECT_EQ(response.status(), INFEASIBLE); +} + +TEST(SharedTreeManagerTest, SplitTest) { + CpModelBuilder model_builder; + auto bool_var = model_builder.NewBoolVar(); + auto int_var = model_builder.NewIntVar({0, 7}); + model_builder.AddLessOrEqual(int_var, 3).OnlyEnforceIf(bool_var); + model_builder.Maximize(int_var); + Model model; + SatParameters params; + params.set_num_workers(4); + params.set_shared_tree_num_workers(4); + params.set_cp_model_presolve(false); + model.Add(NewSatParameters(params)); + LoadVariables(model_builder.Build(), false, &model); + auto* shared_tree_manager = model.GetOrCreate(); + ProtoTrail shared_trail; + + shared_tree_manager->ProposeSplit(shared_trail, {-1, 0}); + + EXPECT_EQ(shared_trail.MaxLevel(), 1); +} + +TEST(SharedTreeManagerTest, RestartTest) { + CpModelBuilder model_builder; + auto bool_var = model_builder.NewBoolVar(); + auto int_var = model_builder.NewIntVar({0, 7}); + model_builder.AddLessOrEqual(int_var, 3).OnlyEnforceIf(bool_var); + model_builder.Maximize(int_var); + Model model; + SatParameters params; + params.set_num_workers(4); + params.set_shared_tree_num_workers(4); + params.set_cp_model_presolve(false); + model.Add(NewSatParameters(params)); + LoadVariables(model_builder.Build(), false, &model); + auto* shared_tree_manager = model.GetOrCreate(); + ProtoTrail shared_trail; + + shared_tree_manager->ProposeSplit(shared_trail, {-1, 0}); + shared_tree_manager->Restart(); + shared_tree_manager->SyncTree(shared_trail); + + EXPECT_EQ(shared_trail.MaxLevel(), 0); +} + +TEST(SharedTreeManagerTest, RestartTestWithLevelZeroImplications) { + CpModelBuilder model_builder; + auto bool_var = model_builder.NewBoolVar(); + auto int_var = model_builder.NewIntVar({0, 7}); + model_builder.AddLessOrEqual(int_var, 3).OnlyEnforceIf(bool_var); + model_builder.Maximize(int_var); + Model model; + SatParameters params; + params.set_num_workers(4); + params.set_shared_tree_num_workers(4); + params.set_cp_model_presolve(false); + model.Add(NewSatParameters(params)); + LoadVariables(model_builder.Build(), false, &model); + auto* shared_tree_manager = model.GetOrCreate(); + ProtoTrail shared_trail; + + shared_tree_manager->ProposeSplit(shared_trail, {-1, 0}); + shared_tree_manager->CloseTree(shared_trail, 1); + shared_tree_manager->SyncTree(shared_trail); + shared_tree_manager->ReplaceTree(shared_trail); + shared_tree_manager->Restart(); + shared_tree_manager->SyncTree(shared_trail); + + EXPECT_EQ(shared_trail.NodeIds(0).size(), 0); + EXPECT_EQ(shared_trail.MaxLevel(), 0); +} + +TEST(SharedTreeManagerTest, SharedBranchingTest) { + CpModelBuilder model_builder; + auto bool_var = model_builder.NewBoolVar(); + auto int_var = model_builder.NewIntVar({0, 7}); + model_builder.AddLessOrEqual(int_var, 3).OnlyEnforceIf(bool_var); + model_builder.Maximize(int_var); + Model model; + SatParameters params; + params.set_num_workers(2); + params.set_shared_tree_num_workers(2); + params.set_cp_model_presolve(false); + model.Add(NewSatParameters(params)); + LoadVariables(model_builder.Build(), false, &model); + auto* shared_tree_manager = model.GetOrCreate(); + ProtoTrail trail1, trail2; + + shared_tree_manager->ProposeSplit(trail1, {-1, 0}); + shared_tree_manager->ReplaceTree(trail2); + + EXPECT_EQ(trail1.MaxLevel(), 1); + EXPECT_EQ(trail2.MaxLevel(), 1); + EXPECT_EQ(trail1.Decision(1), trail2.Decision(1).Negated()); +} + +TEST(SharedTreeManagerTest, ObjectiveLbSplitTest) { + CpModelBuilder model_builder; + auto bool_var = model_builder.NewBoolVar(); + auto int_var = model_builder.NewIntVar({0, 7}); + model_builder.AddLessOrEqual(int_var, 3).OnlyEnforceIf(bool_var); + model_builder.Maximize(int_var); + Model model; + SatParameters params; + params.set_num_workers(4); + params.set_shared_tree_num_workers(4); + params.set_cp_model_presolve(false); + params.set_shared_tree_split_strategy( + SatParameters::SPLIT_STRATEGY_OBJECTIVE_LB); + model.Add(NewSatParameters(params)); + LoadVariables(model_builder.Build(), false, &model); + auto* response_manager = model.GetOrCreate(); + response_manager->InitializeObjective(model_builder.Build()); + auto* shared_tree_manager = model.GetOrCreate(); + ProtoTrail trail1, trail2; + + shared_tree_manager->ProposeSplit(trail1, {-1, 0}); + ASSERT_EQ(trail1.MaxLevel(), 1); + trail1.SetObjectiveLb(1, 2); + shared_tree_manager->SyncTree(trail1); + shared_tree_manager->ReplaceTree(trail2); + ASSERT_EQ(trail2.MaxLevel(), 1); + trail2.SetObjectiveLb(1, 1); + shared_tree_manager->SyncTree(trail2); + // Reject this split because it is not at the global lower bound. + shared_tree_manager->ProposeSplit(trail1, {int_var.index(), 3}); + + EXPECT_EQ(response_manager->GetInnerObjectiveLowerBound(), 1); + EXPECT_EQ(shared_tree_manager->NumNodes(), 3); +} + +TEST(SharedTreeManagerTest, DiscrepancySplitTestOneLeafPerWorker) { + CpModelBuilder model_builder; + auto bool_var = model_builder.NewBoolVar(); + auto int_var = model_builder.NewIntVar({0, 7}); + model_builder.AddLessOrEqual(int_var, 3).OnlyEnforceIf(bool_var); + model_builder.Maximize(int_var); + Model model; + SatParameters params; + params.set_num_workers(4); + params.set_shared_tree_num_workers(4); + params.set_shared_tree_open_leaves_per_worker(1); + params.set_cp_model_presolve(false); + params.set_shared_tree_split_strategy( + SatParameters::SPLIT_STRATEGY_DISCREPANCY); + model.Add(NewSatParameters(params)); + LoadVariables(model_builder.Build(), false, &model); + auto* response_manager = model.GetOrCreate(); + response_manager->InitializeObjective(model_builder.Build()); + auto* shared_tree_manager = model.GetOrCreate(); + ProtoTrail trail1, trail2; + + shared_tree_manager->ProposeSplit(trail1, {-1, 0}); + shared_tree_manager->SyncTree(trail1); + shared_tree_manager->ReplaceTree(trail2); + shared_tree_manager->ProposeSplit(trail2, {int_var.index(), 3}); + shared_tree_manager->ProposeSplit(trail1, {int_var.index(), 3}); + // Reject this split: 2 depth + 1 discrepancy is not minimal. + shared_tree_manager->ProposeSplit(trail2, {int_var.index(), 5}); + // Reject this split: 2 depth + 0 discrepancy is not minimal. + shared_tree_manager->ProposeSplit(trail1, {int_var.index(), 5}); + + EXPECT_EQ(trail1.MaxLevel(), 2); + EXPECT_EQ(trail2.MaxLevel(), 2); + EXPECT_EQ(shared_tree_manager->NumNodes(), 7); +} + +TEST(SharedTreeManagerTest, DiscrepancySplitTest) { + CpModelBuilder model_builder; + auto bool_var = model_builder.NewBoolVar(); + auto int_var = model_builder.NewIntVar({0, 7}); + model_builder.AddLessOrEqual(int_var, 3).OnlyEnforceIf(bool_var); + model_builder.Maximize(int_var); + Model model; + SatParameters params; + params.set_num_workers(2); + params.set_shared_tree_num_workers(2); + params.set_shared_tree_open_leaves_per_worker(2); + params.set_cp_model_presolve(false); + params.set_shared_tree_split_strategy( + SatParameters::SPLIT_STRATEGY_DISCREPANCY); + model.Add(NewSatParameters(params)); + LoadVariables(model_builder.Build(), false, &model); + auto* response_manager = model.GetOrCreate(); + response_manager->InitializeObjective(model_builder.Build()); + auto* shared_tree_manager = model.GetOrCreate(); + ProtoTrail trail1, trail2; + + shared_tree_manager->ProposeSplit(trail1, {-1, 0}); + shared_tree_manager->SyncTree(trail1); + shared_tree_manager->ReplaceTree(trail2); + shared_tree_manager->ProposeSplit(trail2, {int_var.index(), 3}); + shared_tree_manager->ProposeSplit(trail1, {int_var.index(), 3}); + // Reject this split: 2 depth + 1 discrepancy is not minimal. + shared_tree_manager->ProposeSplit(trail2, {int_var.index(), 5}); + // Reject this split: 2 depth + 0 discrepancy is not minimal. + shared_tree_manager->ProposeSplit(trail1, {int_var.index(), 5}); + + EXPECT_EQ(trail1.MaxLevel(), 2); + EXPECT_EQ(trail2.MaxLevel(), 2); + EXPECT_EQ(shared_tree_manager->NumNodes(), 7); +} + +TEST(SharedTreeManagerTest, BalancedSplitTestOneLeafPerWorker) { + CpModelBuilder model_builder; + auto bool_var = model_builder.NewBoolVar(); + auto int_var = model_builder.NewIntVar({0, 7}); + model_builder.AddLessOrEqual(int_var, 3).OnlyEnforceIf(bool_var); + model_builder.Maximize(int_var); + Model model; + SatParameters params; + params.set_num_workers(5); + params.set_shared_tree_num_workers(5); + params.set_shared_tree_open_leaves_per_worker(1); + params.set_cp_model_presolve(false); + params.set_shared_tree_split_strategy( + SatParameters::SPLIT_STRATEGY_BALANCED_TREE); + model.Add(NewSatParameters(params)); + LoadVariables(model_builder.Build(), false, &model); + auto* response_manager = model.GetOrCreate(); + response_manager->InitializeObjective(model_builder.Build()); + auto* shared_tree_manager = model.GetOrCreate(); + ProtoTrail trail1, trail2; + + shared_tree_manager->ProposeSplit(trail1, {-1, 0}); + shared_tree_manager->SyncTree(trail1); + shared_tree_manager->ReplaceTree(trail2); + shared_tree_manager->SyncTree(trail2); + shared_tree_manager->ProposeSplit(trail1, {int_var.index(), 3}); + // Reject this split because it creates an unbalanced tree + shared_tree_manager->ProposeSplit(trail1, {int_var.index(), 5}); + shared_tree_manager->ProposeSplit(trail2, {int_var.index(), 3}); + + EXPECT_EQ(shared_tree_manager->NumNodes(), 7); + EXPECT_EQ(trail1.MaxLevel(), 2); + EXPECT_EQ(trail2.MaxLevel(), 2); +} + +TEST(SharedTreeManagerTest, BalancedSplitTest) { + CpModelBuilder model_builder; + auto bool_var = model_builder.NewBoolVar(); + auto int_var = model_builder.NewIntVar({0, 7}); + model_builder.AddLessOrEqual(int_var, 3).OnlyEnforceIf(bool_var); + model_builder.Maximize(int_var); + Model model; + SatParameters params; + params.set_num_workers(3); + params.set_shared_tree_num_workers(3); + params.set_shared_tree_open_leaves_per_worker(2); + params.set_cp_model_presolve(false); + params.set_shared_tree_split_strategy( + SatParameters::SPLIT_STRATEGY_BALANCED_TREE); + model.Add(NewSatParameters(params)); + LoadVariables(model_builder.Build(), false, &model); + auto* response_manager = model.GetOrCreate(); + response_manager->InitializeObjective(model_builder.Build()); + auto* shared_tree_manager = model.GetOrCreate(); + ProtoTrail trail1, trail2; + + shared_tree_manager->ProposeSplit(trail1, {-1, 0}); + shared_tree_manager->SyncTree(trail1); + shared_tree_manager->ReplaceTree(trail2); + shared_tree_manager->SyncTree(trail2); + shared_tree_manager->ProposeSplit(trail1, {int_var.index(), 3}); + // Reject this split because it creates an unbalanced tree + shared_tree_manager->ProposeSplit(trail1, {int_var.index(), 5}); + shared_tree_manager->ProposeSplit(trail2, {int_var.index(), 3}); + + EXPECT_EQ(shared_tree_manager->NumNodes(), 7); + EXPECT_EQ(trail1.MaxLevel(), 2); + EXPECT_EQ(trail2.MaxLevel(), 2); +} + +TEST(SharedTreeManagerTest, CloseTreeTest) { + CpModelBuilder model_builder; + auto bool_var = model_builder.NewBoolVar(); + auto int_var = model_builder.NewIntVar({0, 7}); + model_builder.AddLessOrEqual(int_var, 3).OnlyEnforceIf(bool_var); + model_builder.Maximize(int_var); + Model model; + SatParameters params; + params.set_num_workers(4); + params.set_shared_tree_num_workers(4); + params.set_cp_model_presolve(false); + model.Add(NewSatParameters(params)); + LoadVariables(model_builder.Build(), false, &model); + auto* shared_tree_manager = model.GetOrCreate(); + ProtoTrail trail1, trail2, trail3; + shared_tree_manager->ProposeSplit(trail1, {-1, 0}); + shared_tree_manager->ReplaceTree(trail2); + shared_tree_manager->ProposeSplit(trail1, {1, 0}); + shared_tree_manager->CloseTree(trail1, 1); + shared_tree_manager->ReplaceTree(trail1); + + EXPECT_EQ(trail1.MaxLevel(), 0); + EXPECT_EQ(trail2.MaxLevel(), 1); + EXPECT_EQ(trail2.Decision(1), ProtoLiteral(0, 1)); +} +// TODO(user): Test objective propagation. +} // namespace +} // namespace sat +} // namespace operations_research From a02548f1787b105c8c4806f92dbcf65d57536436 Mon Sep 17 00:00:00 2001 From: Mizux Seiha Date: Mon, 7 Oct 2024 02:46:27 +0200 Subject: [PATCH 047/105] cmake: Rework ortools_cxx_test() --- cmake/cpp.cmake | 61 ++++++++++++++++++----------------- examples/tests/CMakeLists.txt | 11 +++++-- ortools/sat/CMakeLists.txt | 12 ++++--- 3 files changed, 49 insertions(+), 35 deletions(-) diff --git a/cmake/cpp.cmake b/cmake/cpp.cmake index 4e86448b10..d586dbcdea 100644 --- a/cmake/cpp.cmake +++ b/cmake/cpp.cmake @@ -147,70 +147,73 @@ endif() # ortools_cxx_test() # CMake function to generate and build C++ test. # Parameters: -# FILE_NAME: the C++ filename -# COMPONENT_NAME: name of the ortools/ subdir where the test is located -# note: automatically determined if located in ortools// +# NAME: CMake target name +# SOURCES: List of source files +# [COMPILE_DEFINITIONS]: List of private compile definitions +# [COMPILE_OPTIONS]: List of private compile options +# [LINK_LIBRARIES]: List of private libraries to use when linking +# note: ortools::ortools is always linked to the target +# [LINK_OPTIONS]: List of private link options # e.g.: # ortools_cxx_test( -# FILE_NAME -# ${PROJECT_SOURCE_DIR}/ortools/foo/foo_test.cc -# COMPONENT_NAME -# foo -# DEPS +# NAME +# foo_bar_test +# SOURCES +# bar_test.cc +# ${PROJECT_SOURCE_DIR}/ortools/foo/bar_test.cc +# LINK_LIBRARIES # GTest::gmock # GTest::gtest_main # ) function(ortools_cxx_test) set(options "") - set(oneValueArgs "FILE_NAME;COMPONENT_NAME") - set(multiValueArgs "DEPS") + set(oneValueArgs "NAME") + set(multiValueArgs + "SOURCES;COMPILE_DEFINITIONS;COMPILE_OPTIONS;LINK_LIBRARIES;LINK_OPTIONS") cmake_parse_arguments(TEST "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN} ) -if(NOT TEST_FILE_NAME) - message(FATAL_ERROR "no FILE_NAME provided") + if(NOT TEST_NAME) + message(FATAL_ERROR "no NAME provided") endif() - get_filename_component(TEST_NAME ${TEST_FILE_NAME} NAME_WE) - - message(STATUS "Configuring test ${TEST_FILE_NAME} ...") - - if(NOT TEST_COMPONENT_NAME) - # test is located in ortools// - get_filename_component(COMPONENT_DIR ${TEST_FILE_NAME} DIRECTORY) - get_filename_component(COMPONENT_NAME ${COMPONENT_DIR} NAME) - else() - set(COMPONENT_NAME ${TEST_COMPONENT_NAME}) + if(NOT TEST_SOURCES) + message(FATAL_ERROR "no SOURCES provided") endif() + message(STATUS "Configuring test ${TEST_NAME} ...") - add_executable(${TEST_NAME} ${TEST_FILE_NAME}) + add_executable(${TEST_NAME} "") + target_sources(${TEST_NAME} PRIVATE ${TEST_SOURCES}) target_include_directories(${TEST_NAME} PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}) + target_compile_definitions(${TEST_NAME} PRIVATE ${TEST_COMPILE_DEFINITIONS}) target_compile_features(${TEST_NAME} PRIVATE cxx_std_17) + target_compile_options(${TEST_NAME} PRIVATE ${TEST_COMPILE_OPTIONS}) target_link_libraries(${TEST_NAME} PRIVATE ${PROJECT_NAMESPACE}::ortools - ${TEST_DEPS} + ${TEST_LINK_LIBRARIES} ) + target_link_options(${TEST_NAME} PRIVATE ${TEST_LINK_OPTIONS}) include(GNUInstallDirs) if(APPLE) - set_target_properties(${TEST_NAME} PROPERTIES INSTALL_RPATH - "@loader_path/../${CMAKE_INSTALL_LIBDIR};@loader_path") + set_target_properties(${TEST_NAME} PROPERTIES + INSTALL_RPATH "@loader_path/../${CMAKE_INSTALL_LIBDIR};@loader_path") elseif(UNIX) cmake_path(RELATIVE_PATH CMAKE_INSTALL_FULL_LIBDIR BASE_DIRECTORY ${CMAKE_INSTALL_FULL_BINDIR} OUTPUT_VARIABLE libdir_relative_path) set_target_properties(${TEST_NAME} PROPERTIES - INSTALL_RPATH "$ORIGIN/${libdir_relative_path}") + INSTALL_RPATH "$ORIGIN/${libdir_relative_path}:$ORIGIN") endif() if(BUILD_TESTING) add_test( - NAME cxx_${COMPONENT_NAME}_${TEST_NAME} + NAME cxx_${TEST_NAME} COMMAND ${TEST_NAME}) endif() - message(STATUS "Configuring test ${TEST_FILE_NAME} ...DONE") + message(STATUS "Configuring test ${TEST_NAME} ...DONE") endfunction() ################## diff --git a/examples/tests/CMakeLists.txt b/examples/tests/CMakeLists.txt index 5d49f347e9..9ba53952a1 100644 --- a/examples/tests/CMakeLists.txt +++ b/examples/tests/CMakeLists.txt @@ -17,8 +17,15 @@ endif() if(BUILD_CXX_EXAMPLES) file(GLOB CXX_SRCS "*.cc") - foreach(FILE_NAME IN LISTS CXX_SRCS) - ortools_cxx_test(FILE_NAME ${FILE_NAME}) + foreach(_FULL_FILE_NAME IN LISTS CXX_SRCS) + get_filename_component(_NAME ${_FULL_FILE_NAME} NAME_WE) + get_filename_component(_FILE_NAME ${_FULL_FILE_NAME} NAME) + ortools_cxx_test( + NAME + tests_${_NAME} + SOURCES + ${_FULL_FILE_NAME} + ) endforeach() endif() diff --git a/ortools/sat/CMakeLists.txt b/ortools/sat/CMakeLists.txt index c2a1d46fa2..ec3a1b22c3 100644 --- a/ortools/sat/CMakeLists.txt +++ b/ortools/sat/CMakeLists.txt @@ -42,11 +42,15 @@ target_link_libraries(${NAME} PRIVATE if(BUILD_TESTING) file(GLOB _TEST_SRCS "*_test.cc") - foreach(FILE_NAME IN LISTS _TEST_SRCS) + foreach(_FULL_FILE_NAME IN LISTS _TEST_SRCS) + get_filename_component(_NAME ${_FULL_FILE_NAME} NAME_WE) + get_filename_component(_FILE_NAME ${_FULL_FILE_NAME} NAME) ortools_cxx_test( - FILE_NAME - ${FILE_NAME} - DEPS + NAME + sat_${_NAME} + SOURCES + ${_FILE_NAME} + LINK_LIBRARIES benchmark::benchmark GTest::gmock GTest::gtest_main From ae517c0e12433f5f0aeb5d5d3573d4584616836d Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Mon, 7 Oct 2024 13:51:58 +0200 Subject: [PATCH 048/105] cmake: Add algorithms/ and graph/ C++ tests --- cmake/Makefile | 10 +- ortools/algorithms/BUILD.bazel | 9 + ortools/algorithms/CMakeLists.txt | 22 +- ortools/algorithms/duplicate_remover_test.cc | 184 -------- ortools/algorithms/set_cover_orlib_test.cc | 428 ------------------- ortools/graph/CMakeLists.txt | 18 + 6 files changed, 53 insertions(+), 618 deletions(-) delete mode 100644 ortools/algorithms/duplicate_remover_test.cc delete mode 100644 ortools/algorithms/set_cover_orlib_test.cc diff --git a/cmake/Makefile b/cmake/Makefile index a2e09550c2..d985ec0be8 100644 --- a/cmake/Makefile +++ b/cmake/Makefile @@ -632,7 +632,7 @@ TOOLCHAIN_STAGES := env devel toolchain build test define toolchain-stage-target = #$$(info STAGE: $1) #$$(info Create targets: toolchain_$1 $(addprefix toolchain_, $(addsuffix _$1, $(TOOLCHAIN_TARGETS))).) -targets_toolchain_$1 = $(addprefix toolchain_, $(addsuffix _$1, $(TOOLCHAIN_TARGETS))) +targets_toolchain_$1 := $(addprefix toolchain_, $(addsuffix _$1, $(TOOLCHAIN_TARGETS))) .PHONY: toolchain_$1 $$(targets_toolchain_$1) toolchain_$1: $$(targets_toolchain_$1) $$(targets_toolchain_$1): toolchain_%_$1: docker/toolchain/Dockerfile @@ -645,7 +645,7 @@ $$(targets_toolchain_$1): toolchain_%_$1: docker/toolchain/Dockerfile .. #$$(info Create targets: save_toolchain_$1 $(addprefix save_toolchain_, $(addsuffix _$1, $(TOOLCHAIN_TARGETS))) (debug).) -save_targets_toolchain_$1 = $(addprefix save_toolchain_, $(addsuffix _$1, $(TOOLCHAIN_TARGETS))) +save_targets_toolchain_$1 := $(addprefix save_toolchain_, $(addsuffix _$1, $(TOOLCHAIN_TARGETS))) .PHONY: save_toolchain_$1 $$(save_targets_toolchain_$1) save_toolchain_$1: $$(save_targets_toolchain_$1) $$(save_targets_toolchain_$1): save_toolchain_%_$1: cache/%/docker_$1.tar @@ -727,7 +727,7 @@ VAGRANT_VMS := \ define make-vagrant-target = #$$(info VMS: $1) #$$(info Create target: $1_.) -$1_targets = $(addprefix $1_, $(LANGUAGES)) +$1_targets := $(addprefix $1_, $(LANGUAGES)) .PHONY: $1 $$($1_targets) $1: $$($1_targets) $$($1_targets): $1_%: vagrant/$1/%/Vagrantfile @@ -736,14 +736,14 @@ $$($1_targets): $1_%: vagrant/$1/%/Vagrantfile cd vagrant/$1/$$* && vagrant up #$$(info Create targets: sh_$1_ vagrant machine (debug).) -sh_$1_targets = $(addprefix sh_$1_, $(LANGUAGES)) +sh_$1_targets := $(addprefix sh_$1_, $(LANGUAGES)) .PHONY: $$(sh_$1_targets) $$(sh_$1_targets): sh_$1_%: cd vagrant/$1/$$* && vagrant up cd vagrant/$1/$$* && vagrant ssh #$$(info Create targets: clean_$1) -clean_$1_targets = $(addprefix clean_$1_, $(LANGUAGES)) +clean_$1_targets := $(addprefix clean_$1_, $(LANGUAGES)) .PHONY: clean_$1 $(clean_$1_targets) clean_$1: $$(clean_$1_targets) $$(clean_$1_targets): clean_$1_%: diff --git a/ortools/algorithms/BUILD.bazel b/ortools/algorithms/BUILD.bazel index 3d7f2284f3..970599bb97 100644 --- a/ortools/algorithms/BUILD.bazel +++ b/ortools/algorithms/BUILD.bazel @@ -385,6 +385,15 @@ cc_library( ], ) +cc_test( + name = "dense_doubly_linked_list_test", + srcs = ["dense_doubly_linked_list_test.cc"], + deps = [ + ":dense_doubly_linked_list", + "//ortools/base:gmock_main", + ], +) + cc_library( name = "dynamic_partition", srcs = ["dynamic_partition.cc"], diff --git a/ortools/algorithms/CMakeLists.txt b/ortools/algorithms/CMakeLists.txt index 8f23481d2f..8419297312 100644 --- a/ortools/algorithms/CMakeLists.txt +++ b/ortools/algorithms/CMakeLists.txt @@ -12,7 +12,7 @@ # limitations under the License. file(GLOB _SRCS "*.h" "*.cc") -list(FILTER _SRCS EXCLUDE REGEX "/[^/]*_test\\.cc$") +list(FILTER _SRCS EXCLUDE REGEX ".*/.*_test.cc") set(NAME ${PROJECT_NAME}_algorithms) @@ -31,3 +31,23 @@ target_link_libraries(${NAME} PRIVATE protobuf::libprotobuf ${PROJECT_NAMESPACE}::ortools_proto) #add_library(${PROJECT_NAMESPACE}::algorithms ALIAS ${NAME}) + +if(BUILD_TESTING) + file(GLOB _TEST_SRCS "*_test.cc") + list(FILTER _TEST_SRCS EXCLUDE REGEX ".*_stress_test.cc") + list(FILTER _TEST_SRCS EXCLUDE REGEX "set_cover_test.cc") + foreach(_FULL_FILE_NAME IN LISTS _TEST_SRCS) + get_filename_component(_NAME ${_FULL_FILE_NAME} NAME_WE) + get_filename_component(_FILE_NAME ${_FULL_FILE_NAME} NAME) + ortools_cxx_test( + NAME + algorithms_${_NAME} + SOURCES + ${_FILE_NAME} + LINK_LIBRARIES + benchmark::benchmark + GTest::gmock + GTest::gtest_main + ) + endforeach() +endif() diff --git a/ortools/algorithms/duplicate_remover_test.cc b/ortools/algorithms/duplicate_remover_test.cc deleted file mode 100644 index b7c738b938..0000000000 --- a/ortools/algorithms/duplicate_remover_test.cc +++ /dev/null @@ -1,184 +0,0 @@ -// Copyright 2010-2024 Google LLC -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "ortools/algorithms/duplicate_remover.h" - -#include -#include - -#include "benchmark/benchmark.h" -#include "gtest/gtest.h" -#include "ortools/base/gmock.h" -#include "ortools/base/linked_hash_set.h" -#include "ortools/util/random_engine.h" -#include "util/tuple/dump_vars.h" - -namespace operations_research { -namespace { - -using ::testing::ElementsAre; -using ::testing::ElementsAreArray; -using ::testing::IsEmpty; - -TEST(DenseIntDuplicateRemoverTest, RemoveDuplicatesEmpty) { - std::vector v; - DenseIntDuplicateRemover deduper(10); - deduper.RemoveDuplicates(&v); - EXPECT_THAT(v, IsEmpty()); -} - -TEST(DenseIntDuplicateRemoverTest, RemoveDuplicatesNZeroAndEmpty) { - std::vector v; - DenseIntDuplicateRemover deduper(0); - deduper.RemoveDuplicates(&v); - EXPECT_THAT(v, IsEmpty()); -} - -TEST(DenseIntDuplicateRemoverTest, RemoveDuplicatesSimpleCaseWithDuplicates) { - std::vector v = {1, 8, 2, 2, 8, 4, 1, 2, 7, 0, 2}; - DenseIntDuplicateRemover deduper(9); - deduper.RemoveDuplicates(&v); - EXPECT_THAT(v, ElementsAre(1, 8, 2, 4, 7, 0)); -} - -TEST(DenseIntDuplicateRemoverTest, RemoveDuplicatesSimpleCaseWithNoDuplicates) { - std::vector v = {3, 2, 0, 5, 4, 1}; - const std::vector v_copy = v; - DenseIntDuplicateRemover deduper(6); - deduper.RemoveDuplicates(&v); - EXPECT_THAT(v, ElementsAreArray(v_copy)); -} - -TEST(DenseIntDuplicateRemoverTest, RemoveDuplicatesWithRepeatedField) { - const std::vector v = {1, 0, 1, 2, 1}; - google::protobuf::RepeatedField r(v.begin(), v.end()); - DenseIntDuplicateRemover deduper(3); - deduper.RemoveDuplicates(&r); - EXPECT_THAT(r, ElementsAre(1, 0, 2)); -} - -std::vector UniqueValues(absl::Span span) { - absl::flat_hash_set set; - std::vector out; - for (int x : span) - if (set.insert(x).second) out.push_back(x); - return out; -} - -TEST(DenseIntDuplicateRemoverTest, RemoveDuplicatesRandomizedStressTest) { - constexpr int kNumValues = 1003; - DenseIntDuplicateRemover deduper(kNumValues); - constexpr int kNumTests = 1'000'000; - absl::BitGen random; - for (int t = 0; t < kNumTests; ++t) { - const int size = absl::LogUniform(random, 0, 16); - const int domain_size = - absl::Uniform(absl::IntervalClosed, random, 1, kNumValues); - std::vector v(size); - for (int& x : v) x = absl::Uniform(random, 0, domain_size); - const std::vector v_initial = v; - const std::vector unique_values = UniqueValues(v); - deduper.RemoveDuplicates(&v); - ASSERT_THAT(v, ElementsAreArray(unique_values)) << DUMP_VARS(t, v_initial); - } -} - -TEST(DenseIntDuplicateRemoverTest, - AppendAndLazilyRemoveDuplicatesRandomizedStressTest) { - constexpr int kNumValues = 103; - constexpr int kNumTests = 1'000; - std::mt19937 random; - gtl::linked_hash_set reference; - std::vector v; - int64_t num_extra_elements = 0; - int64_t num_unique_elements = 0; - for (int t = 0; t < kNumTests; ++t) { - const int num_inserts = absl::LogUniform(random, 2, 1 << 16); - const int domain_size = - absl::Uniform(absl::IntervalClosed, random, 1, kNumValues); - v.clear(); - reference.clear(); - DenseIntDuplicateRemover deduper(domain_size); - for (int i = 0; i < num_inserts; ++i) { - const int x = absl::Uniform(random, 0, domain_size); - deduper.AppendAndLazilyRemoveDuplicates(x, &v); - reference.insert(x); - } - ASSERT_LE(v.size(), domain_size * 2 + 15); - const int old_size = v.size(); - deduper.RemoveDuplicates(&v); - num_unique_elements += v.size(); - num_extra_elements += old_size - v.size(); - ASSERT_THAT(v, ElementsAreArray(reference)) - << DUMP_VARS(t, num_inserts, domain_size, old_size, v.size()); - } - EXPECT_LE(static_cast(num_extra_elements) / num_unique_elements, 0.5); -} - -template -void BM_AppendAndLazilyRemoveDuplicates(benchmark::State& state) { - const int num_inserts = state.range(0); - const int domain_size = state.range(1); - std::vector to_insert(num_inserts); - random_engine_t random; - for (int& x : to_insert) x = absl::Uniform(random, 0, domain_size); - DenseIntDuplicateRemover deduper(domain_size); - std::vector v; - absl::flat_hash_set set; - for (auto _ : state) { - v.clear(); - set.clear(); - for (int x : to_insert) { - if (use_flat_hash_set) { - set.insert(x); - } else { - deduper.AppendAndLazilyRemoveDuplicates(x, &v); - } - } - if (!use_flat_hash_set) deduper.RemoveDuplicates(&v); - benchmark::DoNotOptimize(v); - benchmark::DoNotOptimize(set); - } - state.SetItemsProcessed(state.iterations() * num_inserts); -} - -BENCHMARK(BM_AppendAndLazilyRemoveDuplicates) - ->ArgPair(1, 10) - ->ArgPair(10, 2) - ->ArgPair(10, 10) - ->ArgPair(100, 100) - ->ArgPair(100, 10) - ->ArgPair(10'000, 10'000) - ->ArgPair(10'000, 1'000) - ->ArgPair(10'000, 100) - ->ArgPair(10'000, 10) - ->ArgPair(1'000'000, 1'000'000) - ->ArgPair(1'000'000, 10'000) - ->ArgPair(1'000'000, 100); - -BENCHMARK(BM_AppendAndLazilyRemoveDuplicates) - ->ArgPair(1, 10) - ->ArgPair(10, 2) - ->ArgPair(10, 10) - ->ArgPair(100, 100) - ->ArgPair(100, 10) - ->ArgPair(10'000, 10'000) - ->ArgPair(10'000, 1'000) - ->ArgPair(10'000, 100) - ->ArgPair(10'000, 10) - ->ArgPair(1'000'000, 1'000'000) - ->ArgPair(1'000'000, 10'000) - ->ArgPair(1'000'000, 100); - -} // namespace -} // namespace operations_research diff --git a/ortools/algorithms/set_cover_orlib_test.cc b/ortools/algorithms/set_cover_orlib_test.cc deleted file mode 100644 index 849e2b76ad..0000000000 --- a/ortools/algorithms/set_cover_orlib_test.cc +++ /dev/null @@ -1,428 +0,0 @@ -// Copyright 2010-2024 Google LLC -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include -#include -#include - -#include "absl/log/check.h" -#include "absl/strings/str_cat.h" -#include "absl/strings/str_join.h" -#include "absl/time/time.h" -#include "gtest/gtest.h" -#include "ortools/algorithms/set_cover_heuristics.h" -#include "ortools/algorithms/set_cover_invariant.h" -#include "ortools/algorithms/set_cover_lagrangian.h" -#include "ortools/algorithms/set_cover_mip.h" -#include "ortools/algorithms/set_cover_model.h" -#include "ortools/algorithms/set_cover_reader.h" -#include "ortools/base/logging.h" -#include "ortools/base/path.h" -#include "ortools/base/timer.h" - -namespace operations_research { - -void LogStats(std::string name, SetCoverModel* model) { - LOG(INFO) << ", " << name << ", num_elements, " << model->num_elements() - << ", num_subsets, " << model->num_subsets(); - LOG(INFO) << ", " << name << ", num_nonzeros, " << model->num_nonzeros() - << ", fill rate, " << model->FillRate(); - LOG(INFO) << ", " << name << ", cost, " - << model->ComputeCostStats().DebugString(); - - LOG(INFO) << ", " << name << ", num_rows, " << model->num_elements() - << ", rows sizes, " << model->ComputeRowStats().DebugString(); - LOG(INFO) << ", " << name << ", row size deciles, " - << absl::StrJoin(model->ComputeRowDeciles(), ", "); - LOG(INFO) << ", " << name << ", num_columns, " << model->num_subsets() - << ", columns sizes, " << model->ComputeColumnStats().DebugString(); - LOG(INFO) << ", " << name << ", column size deciles, " - << absl::StrJoin(model->ComputeColumnDeciles(), ", "); - SetCoverInvariant inv(model); - Preprocessor preprocessor(&inv); - preprocessor.NextSolution(); - LOG(INFO) << ", " << name << ", num_columns_fixed_by_singleton_row, " - << preprocessor.num_columns_fixed_by_singleton_row(); -} - -void LogCostAndTiming(std::string name, std::string algo, double cost, - absl::Duration duration) { - LOG(INFO) << ", " << name << ", " << algo << "_cost, " << cost << ", " - << absl::ToInt64Microseconds(duration) << "e-6, s"; -} - -SetCoverInvariant RunChvatalAndSteepest(std::string name, - SetCoverModel* model) { - SetCoverInvariant inv(model); - GreedySolutionGenerator greedy(&inv); - WallTimer timer; - timer.Start(); - CHECK(greedy.NextSolution()); - DCHECK(inv.CheckConsistency()); - LogCostAndTiming(name, "GreedySolutionGenerator", inv.cost(), - timer.GetDuration()); - SteepestSearch steepest(&inv); - steepest.NextSolution(100000); - LogCostAndTiming(name, "GreedySteepestSearch", inv.cost(), - timer.GetDuration()); - DCHECK(inv.CheckConsistency()); - return inv; -} - -SetCoverInvariant RunChvatalAndGLS(std::string name, SetCoverModel* model) { - SetCoverInvariant inv(model); - GreedySolutionGenerator greedy(&inv); - WallTimer timer; - timer.Start(); - CHECK(greedy.NextSolution()); - DCHECK(inv.CheckConsistency()); - LogCostAndTiming(name, "GreedySolutionGenerator", inv.cost(), - timer.GetDuration()); - GuidedLocalSearch gls(&inv); - gls.NextSolution(100'000); - LogCostAndTiming(name, "GLS", inv.cost(), timer.GetDuration()); - DCHECK(inv.CheckConsistency()); - return inv; -} - -SetCoverInvariant RunElementDegreeGreedyAndSteepest(std::string name, - SetCoverModel* model) { - SetCoverInvariant inv(model); - ElementDegreeSolutionGenerator element_degree(&inv); - WallTimer timer; - timer.Start(); - CHECK(element_degree.NextSolution()); - DCHECK(inv.CheckConsistency()); - LogCostAndTiming(name, "ElementDegreeSolutionGenerator", inv.cost(), - timer.GetDuration()); - SteepestSearch steepest(&inv); - steepest.NextSolution(100000); - LogCostAndTiming(name, "ElementDegreeSteepestSearch", inv.cost(), - timer.GetDuration()); - DCHECK(inv.CheckConsistency()); - return inv; -} - -void IterateClearAndMip(std::string name, SetCoverInvariant* inv) { - WallTimer timer; - timer.Start(); - std::vector focus = inv->model()->all_subsets(); - double best_cost = inv->cost(); - SubsetBoolVector best_choices = inv->is_selected(); - for (int i = 0; i < 10; ++i) { - std::vector range = - ClearMostCoveredElements(std::min(100UL, focus.size()), inv); - SetCoverMip mip(inv); - mip.NextSolution(range, true, 0.02); - DCHECK(inv->CheckConsistency()); - if (inv->cost() < best_cost) { - best_cost = inv->cost(); - best_choices = inv->is_selected(); - } - } - timer.Stop(); - LogCostAndTiming(name, "IterateClearAndMip", best_cost, timer.GetDuration()); -} - -SetCoverInvariant ComputeLPLowerBound(std::string name, SetCoverModel* model) { - SetCoverInvariant inv(model); - WallTimer timer; - timer.Start(); - SetCoverMip mip(&inv, SetCoverMipSolver::SCIP); // Use Gurobi for large pbs. - mip.NextSolution(false, .3); // Use 300s or more for large problems. - LogCostAndTiming(name, "LPLowerBound", mip.lower_bound(), - timer.GetDuration()); - return inv; -} - -void ComputeLagrangianLowerBound(std::string name, SetCoverInvariant* inv) { - const SetCoverModel* model = inv->model(); - WallTimer timer; - timer.Start(); - SetCoverLagrangian lagrangian(inv, /*num_threads=*/8); - const auto [lower_bound, reduced_costs, multipliers] = - lagrangian.ComputeLowerBound(model->subset_costs(), inv->cost()); - LogCostAndTiming(name, "LagrangianLowerBound", lower_bound, - timer.GetDuration()); -} - -SetCoverInvariant RunMip(std::string name, SetCoverModel* model) { - SetCoverInvariant inv(model); - WallTimer timer; - timer.Start(); - SetCoverMip mip(&inv, SetCoverMipSolver::SCIP); // Use Gurobi for large pbs. - mip.NextSolution(true, .5); // Use 300s or more for large problems. - timer.Stop(); - LogCostAndTiming(name, "MIP", inv.cost(), timer.GetDuration()); - return inv; -} - -void IterateClearElementDegreeAndSteepest(std::string name, - SetCoverInvariant* inv) { - WallTimer timer; - timer.Start(); - double best_cost = inv->cost(); - SubsetBoolVector best_choices = inv->is_selected(); - ElementDegreeSolutionGenerator element_degree(inv); - SteepestSearch steepest(inv); - for (int i = 0; i < 1000; ++i) { - std::vector range = - ClearRandomSubsets(0.1 * inv->trace().size(), inv); - CHECK(element_degree.NextSolution()); - steepest.NextSolution(range, 100000); - DCHECK(inv->CheckConsistency()); - if (inv->cost() < best_cost) { - best_cost = inv->cost(); - best_choices = inv->is_selected(); - } - } - timer.Stop(); - LogCostAndTiming(name, "IterateClearElementDegreeAndSteepest", best_cost, - timer.GetDuration()); -} - -double RunSolver(std::string name, SetCoverModel* model) { - LogStats(name, model); - WallTimer global_timer; - global_timer.Start(); - RunChvatalAndSteepest(name, model); - // SetCoverInvariant inv = ComputeLPLowerBound(name, model); - // RunMip(name, model); - RunChvatalAndGLS(name, model); - SetCoverInvariant inv = RunElementDegreeGreedyAndSteepest(name, model); - ComputeLagrangianLowerBound(name, &inv); - // IterateClearAndMip(name, inv); - IterateClearElementDegreeAndSteepest(name, &inv); - return inv.cost(); -} - -// We break down the ORLIB set covering problems by their expected runtime with -// our solver (as of July 2023). -enum ProblemSize { - SUBMILLI, // < 1ms - FEWMILLIS, // < 3ms - SUBHUNDREDTH, // < 10ms - FEWHUNDREDTHS, // < 30ms - SUBTENTH, // < 100ms - FEWTENTHS, // < 300ms - SUBSECOND, // < 1s - FEWSECONDS, // < 3s - MANYSECONDS, // >= 3s - UNKNOWN = 999, // Not known (i.e. not benchmarked). -}; - -// These two macros provide indirection which allows the __LINE__ macro -// to be pasted, giving the tests useful names. -#define APPEND(x, y) x##y -#define APPEND_AND_EVAL(x, y) APPEND(x, y) - -const char data_dir[] = - "operations_research_data/operations_research_data/" - "SET_COVERING"; - -// In the following, the lower bounds are taken from: -// [1] Caprara, Alberto, Matteo Fischetti, and Paolo Toth. 1999. “A Heuristic -// Method for the Set Covering Problem.” Operations Research 47 (5): 730–43. -// https://www.jstor.org/stable/223097 , and -// [2] Yagiura, Mutsunori, Masahiro Kishida, and Toshihide Ibaraki. 2006. -// “A 3-Flip Neighborhood Local Search for the Set Covering Problem.” European -// Journal of Operational Research 172 (2): 472–99. -// https://www.sciencedirect.com/science/article/pii/S0377221704008264 - -// This macro makes it possible to declare each test below with a one-liner. -// 'best_objective' denotes the best objective costs found in literature. -// These are the proven optimal values. This can be achieved with MIP. -// For the rail instances, they are the best solution found in the literature -// [1] and [2]. They are not achievable though local search or MIP or a -// combination of the two. -// 'expected_objective' are the costs currently reached by the solver. -// TODO(user): find and add values for the unit cost (aka unicost) case. - -#define ORLIB_TEST(name, best_objective, expected_objective, size, function) \ - TEST(OrlibTest, APPEND_AND_EVAL(TestOnLine, __LINE__)) { \ - auto filespec = \ - file::JoinPathRespectAbsolute(::testing::SrcDir(), data_dir, name); \ - LOG(INFO) << "Reading " << name; \ - operations_research::SetCoverModel model = function(filespec); \ - double cost = RunSolver(name, &model); \ - (void)cost; \ - } - -#define ORLIB_UNICOST_TEST(name, best_objective, expected_objective, size, \ - function) \ - TEST(OrlibUnicostTest, APPEND_AND_EVAL(TestOnLine, __LINE__)) { \ - auto filespec = \ - file::JoinPathRespectAbsolute(::testing::SrcDir(), data_dir, name); \ - LOG(INFO) << "Reading " << name; \ - operations_research::SetCoverModel model = function(filespec); \ - for (SubsetIndex i : model.SubsetRange()) { \ - model.SetSubsetCost(i, 1.0); \ - } \ - double cost = RunSolver(absl::StrCat(name, "_unicost"), &model); \ - (void)cost; \ - } - -#define SCP_TEST(name, best_objective, expected_objective, size) \ - ORLIB_TEST(name, best_objective, expected_objective, size, \ - operations_research::ReadBeasleySetCoverProblem) \ - ORLIB_UNICOST_TEST(name, best_objective, expected_objective, size, \ - operations_research::ReadBeasleySetCoverProblem) - -#define RAIL_TEST(name, best_objective, expected_objective, size) \ - ORLIB_TEST(name, best_objective, expected_objective, size, \ - operations_research::ReadRailSetCoverProblem) \ - ORLIB_UNICOST_TEST(name, best_objective, expected_objective, size, \ - operations_research::ReadRailSetCoverProblem) - -#define BASIC_SCP -#define EXTRA_SCP -#define RAIL - -#ifdef BASIC_SCP -SCP_TEST("scp41.txt", 429, 442, FEWMILLIS); -SCP_TEST("scp42.txt", 512, 555, FEWMILLIS); -SCP_TEST("scp43.txt", 516, 557, FEWMILLIS); -SCP_TEST("scp44.txt", 494, 516, FEWMILLIS); -SCP_TEST("scp45.txt", 512, 530, FEWMILLIS); -SCP_TEST("scp46.txt", 560, 594, FEWMILLIS); -SCP_TEST("scp47.txt", 430, 451, FEWMILLIS); -SCP_TEST("scp48.txt", 492, 502, FEWMILLIS); -SCP_TEST("scp49.txt", 641, 693, FEWMILLIS); -SCP_TEST("scp410.txt", 514, 525, FEWMILLIS); - -SCP_TEST("scp51.txt", 253, 274, FEWMILLIS); -SCP_TEST("scp52.txt", 302, 329, FEWMILLIS); -SCP_TEST("scp53.txt", 226, 233, FEWMILLIS); -SCP_TEST("scp54.txt", 242, 255, FEWMILLIS); -SCP_TEST("scp55.txt", 211, 222, FEWMILLIS); -SCP_TEST("scp56.txt", 213, 234, FEWMILLIS); -SCP_TEST("scp57.txt", 293, 313, FEWMILLIS); -SCP_TEST("scp58.txt", 288, 309, FEWMILLIS); -SCP_TEST("scp59.txt", 279, 292, FEWMILLIS); -SCP_TEST("scp510.txt", 265, 276, FEWMILLIS); - -SCP_TEST("scp61.txt", 138, 151, FEWMILLIS); -SCP_TEST("scp62.txt", 146, 173, FEWMILLIS); -SCP_TEST("scp63.txt", 145, 154, FEWMILLIS); -SCP_TEST("scp64.txt", 131, 137, FEWMILLIS); -SCP_TEST("scp65.txt", 161, 181, FEWMILLIS); - -SCP_TEST("scpa1.txt", 253, 275, FEWHUNDREDTHS); -SCP_TEST("scpa2.txt", 252, 268, FEWHUNDREDTHS); -SCP_TEST("scpa3.txt", 232, 244, FEWHUNDREDTHS); -SCP_TEST("scpa4.txt", 234, 253, FEWHUNDREDTHS); -SCP_TEST("scpa5.txt", 236, 249, FEWHUNDREDTHS); - -SCP_TEST("scpb1.txt", 69, 74, FEWTENTHS); -SCP_TEST("scpb2.txt", 76, 78, FEWTENTHS); -SCP_TEST("scpb3.txt", 80, 85, FEWTENTHS); -SCP_TEST("scpb4.txt", 79, 85, FEWTENTHS); -SCP_TEST("scpb5.txt", 72, 77, FEWTENTHS); - -SCP_TEST("scpc1.txt", 227, 251, FEWHUNDREDTHS); -SCP_TEST("scpc2.txt", 219, 238, FEWHUNDREDTHS); -SCP_TEST("scpc3.txt", 243, 259, FEWHUNDREDTHS); -SCP_TEST("scpc4.txt", 219, 246, FEWHUNDREDTHS); -SCP_TEST("scpc5.txt", 214, 228, FEWHUNDREDTHS); - -SCP_TEST("scpd1.txt", 60, 68, FEWHUNDREDTHS); -SCP_TEST("scpd2.txt", 66, 70, FEWHUNDREDTHS); -SCP_TEST("scpd3.txt", 72, 78, FEWHUNDREDTHS); -SCP_TEST("scpd4.txt", 62, 67, FEWHUNDREDTHS); -SCP_TEST("scpd5.txt", 61, 72, FEWHUNDREDTHS); - -SCP_TEST("scpe1.txt", 5, 5, FEWMILLIS); -SCP_TEST("scpe2.txt", 5, 6, FEWMILLIS); -SCP_TEST("scpe3.txt", 5, 5, FEWMILLIS); -SCP_TEST("scpe4.txt", 5, 6, FEWMILLIS); -SCP_TEST("scpe5.txt", 5, 5, FEWMILLIS); - -SCP_TEST("scpnre1.txt", 29, 31, SUBTENTH); -SCP_TEST("scpnre2.txt", 30, 34, SUBTENTH); -SCP_TEST("scpnre3.txt", 27, 32, SUBTENTH); -SCP_TEST("scpnre4.txt", 28, 32, SUBTENTH); -SCP_TEST("scpnre5.txt", 28, 31, SUBTENTH); - -SCP_TEST("scpnrf1.txt", 14, 17, SUBTENTH); -SCP_TEST("scpnrf2.txt", 15, 16, SUBTENTH); -SCP_TEST("scpnrf3.txt", 14, 16, SUBTENTH); -SCP_TEST("scpnrf4.txt", 14, 15, SUBTENTH); -SCP_TEST("scpnrf5.txt", 13, 15, SUBTENTH); - -SCP_TEST("scpnrg1.txt", 176, 196, SUBTENTH); -SCP_TEST("scpnrg2.txt", 154, 171, SUBTENTH); -SCP_TEST("scpnrg3.txt", 166, 182, SUBTENTH); -SCP_TEST("scpnrg4.txt", 168, 187, SUBTENTH); -SCP_TEST("scpnrg5.txt", 168, 183, SUBTENTH); - -SCP_TEST("scpnrh1.txt", 63, 71, FEWTENTHS); -SCP_TEST("scpnrh2.txt", 63, 70, FEWTENTHS); -SCP_TEST("scpnrh3.txt", 59, 65, FEWTENTHS); -SCP_TEST("scpnrh4.txt", 58, 66, FEWTENTHS); -SCP_TEST("scpnrh5.txt", 55, 62, FEWTENTHS); -#endif - -#ifdef EXTRA_SCP -SCP_TEST("scpclr10.txt", 0, 32, FEWMILLIS); -SCP_TEST("scpclr11.txt", 0, 30, FEWMILLIS); -SCP_TEST("scpclr12.txt", 0, 31, FEWMILLIS); -SCP_TEST("scpclr13.txt", 0, 33, FEWMILLIS); - -SCP_TEST("scpcyc06.txt", 0, 60, FEWMILLIS); -SCP_TEST("scpcyc07.txt", 0, 144, FEWMILLIS); -SCP_TEST("scpcyc08.txt", 0, 360, FEWMILLIS); -SCP_TEST("scpcyc09.txt", 0, 816, SUBHUNDREDTH); -SCP_TEST("scpcyc10.txt", 0, 1920, FEWHUNDREDTHS); -SCP_TEST("scpcyc11.txt", 0, 4284, SUBTENTH); -#endif - -#ifdef RAIL -RAIL_TEST("rail507.txt", 174, 218, FEWTENTHS); -RAIL_TEST("rail516.txt", 182, 204, FEWTENTHS); -RAIL_TEST("rail582.txt", 211, 250, FEWTENTHS); -RAIL_TEST("rail2536.txt", 691, 889, MANYSECONDS); -RAIL_TEST("rail2586.txt", 952, 1139, MANYSECONDS); -RAIL_TEST("rail4284.txt", 1065, 1362, MANYSECONDS); -RAIL_TEST("rail4872.txt", 1527, 1861, MANYSECONDS); // [2] -#endif - -#undef BASIC_SCP -#undef EXTRA_SCP -#undef RAIL - -#undef ORLIB_TEST -#undef ORLIB_UNICOST_TEST -#undef APPEND -#undef APPEND_AND_EVAL -#undef SCP_TEST -#undef RAIL_TEST - -TEST(SetCoverHugeTest, GenerateProblem) { - SetCoverModel seed_model = - ReadRailSetCoverProblem(file::JoinPathRespectAbsolute( - ::testing::SrcDir(), data_dir, "rail4284.txt")); - seed_model.CreateSparseRowView(); - const BaseInt num_wanted_subsets(100'000'000); - const BaseInt num_wanted_elements(40'000); - const double row_scale = 1.1; - const double column_scale = 1.1; - const double cost_scale = 10.0; - SetCoverModel model = SetCoverModel::GenerateRandomModelFrom( - seed_model, num_wanted_elements, num_wanted_subsets, row_scale, - column_scale, cost_scale); - SetCoverInvariant inv = - RunElementDegreeGreedyAndSteepest("rail4284_huge.txt", &model); - LOG(INFO) << "Cost: " << inv.cost(); -} - -} // namespace operations_research diff --git a/ortools/graph/CMakeLists.txt b/ortools/graph/CMakeLists.txt index f76659bd0c..fd86eacac3 100644 --- a/ortools/graph/CMakeLists.txt +++ b/ortools/graph/CMakeLists.txt @@ -39,3 +39,21 @@ target_link_libraries(${NAME} PRIVATE ${PROJECT_NAMESPACE}::ortools_proto $<$:Coin::Cbc>) #add_library(${PROJECT_NAMESPACE}::graph ALIAS ${NAME}) + +if(BUILD_TESTING) + file(GLOB _TEST_SRCS "*_test.cc") + foreach(_FULL_FILE_NAME IN LISTS _TEST_SRCS) + get_filename_component(_NAME ${_FULL_FILE_NAME} NAME_WE) + get_filename_component(_FILE_NAME ${_FULL_FILE_NAME} NAME) + ortools_cxx_test( + NAME + graph_${_NAME} + SOURCES + ${_FILE_NAME} + LINK_LIBRARIES + benchmark::benchmark + GTest::gmock + GTest::gtest_main + ) + endforeach() +endif() From 5912937a67753d450b662e6948147d3963550879 Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Mon, 7 Oct 2024 15:23:51 +0200 Subject: [PATCH 049/105] Add WriteModelToMpsFile to MPSolver in Java/Python/.NET --- ortools/linear_solver/csharp/linear_solver.i | 11 +++++++++++ ortools/linear_solver/java/linear_solver.i | 12 ++++++++++++ ortools/linear_solver/python/linear_solver.i | 9 +++++++++ 3 files changed, 32 insertions(+) diff --git a/ortools/linear_solver/csharp/linear_solver.i b/ortools/linear_solver/csharp/linear_solver.i index 23e425b840..58d621e010 100644 --- a/ortools/linear_solver/csharp/linear_solver.i +++ b/ortools/linear_solver/csharp/linear_solver.i @@ -166,6 +166,8 @@ CONVERT_VECTOR(operations_research::MPVariable, MPVariable) // Extend code. %unignore operations_research::MPSolver::ExportModelAsLpFormat(bool); %unignore operations_research::MPSolver::ExportModelAsMpsFormat(bool, bool); +%unignore operations_research::MPSolver::WriteModelToMpsFile( + const std::string& filename, bool, bool); %unignore operations_research::MPSolver::SetHint( const std::vector&, const std::vector&); @@ -187,6 +189,15 @@ CONVERT_VECTOR(operations_research::MPVariable, MPVariable) return ExportModelAsMpsFormat(model, options).value_or(""); } + bool WriteModelToMpsFile(const std::string& filename, bool fixed_format, + bool obfuscated) { + operations_research::MPModelExportOptions options; + options.obfuscate = obfuscated; + operations_research::MPModelProto model; + $self->ExportModelToProto(&model); + return WriteModelToMpsFile(filename, model, options).ok(); + } + void SetHint(const std::vector& variables, const std::vector& values) { if (variables.size() != values.size()) { diff --git a/ortools/linear_solver/java/linear_solver.i b/ortools/linear_solver/java/linear_solver.i index 855fa43291..0192b20188 100644 --- a/ortools/linear_solver/java/linear_solver.i +++ b/ortools/linear_solver/java/linear_solver.i @@ -207,6 +207,18 @@ PROTO2_RETURN( return ExportModelAsMpsFormat(model, options).value_or(""); } + /** + * Write the model to file in MPS format. + */ + bool writeModelToMpsFile(const std::string& filename, bool fixed_format, + bool obfuscated) { + operations_research::MPModelExportOptions options; + options.obfuscate = obfuscated; + operations_research::MPModelProto model; + $self->ExportModelToProto(&model); + return WriteModelToMpsFile(filename, model, options).ok(); + } + /** * Sets a hint for solution. * diff --git a/ortools/linear_solver/python/linear_solver.i b/ortools/linear_solver/python/linear_solver.i index c73d11807c..e1d1c38426 100644 --- a/ortools/linear_solver/python/linear_solver.i +++ b/ortools/linear_solver/python/linear_solver.i @@ -143,6 +143,15 @@ from ortools.linear_solver.python.linear_solver_natural_api import VariableExpr return ExportModelAsMpsFormat(model, options).value_or(""); } + bool WriteModelToMpsFile(const std::string& filename, bool fixed_format, + bool obfuscated) { + operations_research::MPModelExportOptions options; + options.obfuscate = obfuscated; + operations_research::MPModelProto model; + $self->ExportModelToProto(&model); + return WriteModelToMpsFile(filename, model, options).ok(); + } + /// Set a hint for solution. /// /// If a feasible or almost-feasible solution to the problem is already known, From f269264849b7ae551c8913eed97757c7543eef61 Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Mon, 7 Oct 2024 15:54:04 +0200 Subject: [PATCH 050/105] support hinting literals in CP-SAT Python --- ortools/sat/python/cp_model.py | 16 +++++++++++++--- ortools/sat/python/cp_model_test.py | 15 +++++++++++++++ 2 files changed, 28 insertions(+), 3 deletions(-) diff --git a/ortools/sat/python/cp_model.py b/ortools/sat/python/cp_model.py index cea258dca5..0f1b93ea07 100644 --- a/ortools/sat/python/cp_model.py +++ b/ortools/sat/python/cp_model.py @@ -2939,10 +2939,20 @@ class CpModel: """ return swig_helper.CpSatHelper.write_model_to_file(self.__model, file) - def add_hint(self, var: IntVar, value: int) -> None: + @overload + def add_hint(self, var: IntVar, value: int) -> None: ... + + @overload + def add_hint(self, literal: BoolVarT, value: bool) -> None: ... + + def add_hint(self, var, value) -> None: """Adds 'var == value' as a hint to the solver.""" - self.__model.solution_hint.vars.append(self.get_or_make_index(var)) - self.__model.solution_hint.values.append(value) + if var.index >= 0: + self.__model.solution_hint.vars.append(self.get_or_make_index(var)) + self.__model.solution_hint.values.append(int(value)) + else: + self.__model.solution_hint.vars.append(self.negated(var.index)) + self.__model.solution_hint.values.append(int(not value)) def clear_hints(self): """Removes any solution hint from the model.""" diff --git a/ortools/sat/python/cp_model_test.py b/ortools/sat/python/cp_model_test.py index 8bd2aae00b..09235c4e0b 100644 --- a/ortools/sat/python/cp_model_test.py +++ b/ortools/sat/python/cp_model_test.py @@ -1428,6 +1428,21 @@ class CpModelTest(absltest.TestCase): self.assertEqual(2, solver.value(x)) self.assertEqual(4, solver.value(y)) + def testSolutionHintingWithBooleans(self): + print("testSolutionHintingWithBooleans") + model = cp_model.CpModel() + x = model.new_bool_var("x") + y = model.new_bool_var("y") + model.add_linear_constraint(x + y, 1, 1) + model.add_hint(x, True) + model.add_hint(~y, True) + solver = cp_model.CpSolver() + solver.parameters.cp_model_presolve = False + status = solver.solve(model) + self.assertEqual(cp_model.OPTIMAL, status) + self.assertTrue(solver.boolean_value(x)) + self.assertFalse(solver.boolean_value(y)) + def testStats(self): print("testStats") model = cp_model.CpModel() From 7ded00dd97c4f7f30f7db893fe4c17e97771bb22 Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Mon, 7 Oct 2024 16:16:36 +0200 Subject: [PATCH 051/105] bump bazel python requirements --- bazel/notebook_requirements.in | 12 ++++++------ bazel/notebook_requirements.txt | 8 ++++---- bazel/ortools_requirements.in | 14 +++++++------- bazel/ortools_requirements.txt | 10 +++++----- 4 files changed, 22 insertions(+), 22 deletions(-) diff --git a/bazel/notebook_requirements.in b/bazel/notebook_requirements.in index 6ef8bd685e..b4285bbff7 100644 --- a/bazel/notebook_requirements.in +++ b/bazel/notebook_requirements.in @@ -1,19 +1,19 @@ # OR-Tools code dependencies -absl-py==2.0.0 +absl-py==2.1.0 immutabledict==3.0.0 -numpy==1.26.4 -protobuf==5.27.3 +numpy==2.1.1 +protobuf==5.27.5 requests==2.32.0 -scipy==1.11.3 +scipy==1.14.1 # OR-Tools build dependencies mypy==1.6.1 mypy-protobuf==3.5.0 virtualenv==20.24.6 -black==24.3.0 +black==24.8.0 # Example dependencies -pandas==2.1.2 +pandas==2.2.3 # Visualization dependencies svgwrite==1.4.3 diff --git a/bazel/notebook_requirements.txt b/bazel/notebook_requirements.txt index c5443bfca5..afcaede6ae 100644 --- a/bazel/notebook_requirements.txt +++ b/bazel/notebook_requirements.txt @@ -30,7 +30,7 @@ backcall==0.2.0 # via ipython beautifulsoup4==4.12.2 # via nbconvert -black==24.3.0 +black==24.8.0 # via -r bazel/notebook_requirements.in bleach==6.0.0 # via nbconvert @@ -176,7 +176,7 @@ notebook-shim==0.2.3 # via # jupyterlab # notebook -numpy==1.26.4 +numpy==2.1.0 # via # -r bazel/notebook_requirements.in # pandas @@ -192,7 +192,7 @@ packaging==23.1 # jupyterlab-server # nbconvert # plotly -pandas==2.1.2 +pandas==2.2.3 # via -r bazel/notebook_requirements.in pandocfilters==1.5.0 # via nbconvert @@ -271,7 +271,7 @@ rpds-py==0.10.2 # via # jsonschema # referencing -scipy==1.11.3 +scipy==1.14.1 # via -r bazel/notebook_requirements.in send2trash==1.8.2 # via jupyter-server diff --git a/bazel/ortools_requirements.in b/bazel/ortools_requirements.in index 9cd90604d7..02b8424d62 100644 --- a/bazel/ortools_requirements.in +++ b/bazel/ortools_requirements.in @@ -1,17 +1,17 @@ # OR-Tools code dependencies -absl-py==2.0.0 +absl-py==2.1.0 immutabledict==3.0.0 -numpy==1.26.4 -protobuf==5.27.3 -requests==2.32.0 -scipy==1.11.3 +numpy==2.1.1 +protobuf==5.27.5 +requests==2.32.3 +scipy==1.14.1 # OR-Tools build dependencies mypy==1.6.1 mypy-protobuf==3.5.0 virtualenv==20.24.6 -black==24.3.0 +black==24.8.0 # Example dependencies -pandas==2.1.2 +pandas==2.2.3 svgwrite==1.4.3 diff --git a/bazel/ortools_requirements.txt b/bazel/ortools_requirements.txt index 4df6fa63d5..bbae051c6c 100644 --- a/bazel/ortools_requirements.txt +++ b/bazel/ortools_requirements.txt @@ -6,7 +6,7 @@ # absl-py==2.0.0 # via -r bazel/ortools_requirements.in -black==24.3.0 +black==24.8.0 # via -r bazel/ortools_requirements.in certifi==2024.7.4 # via requests @@ -30,14 +30,14 @@ mypy-extensions==1.0.0 # mypy mypy-protobuf==3.5.0 # via -r bazel/ortools_requirements.in -numpy==1.26.4 +numpy==2.1.0 # via # -r bazel/ortools_requirements.in # pandas # scipy packaging==23.1 # via black -pandas==2.1.2 +pandas==2.2.3 # via -r bazel/ortools_requirements.in pathspec==0.11.1 # via black @@ -53,9 +53,9 @@ python-dateutil==2.8.2 # via pandas pytz==2022.7.1 # via pandas -requests==2.32.0 +requests==2.32.3 # via -r bazel/ortools_requirements.in -scipy==1.11.3 +scipy==1.14.1 # via -r bazel/ortools_requirements.in six==1.16.0 # via python-dateutil From caad9bf1e086a142202947a0881b97fb51900583 Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Mon, 7 Oct 2024 16:26:07 +0200 Subject: [PATCH 052/105] [CP-SAT] add more type annotations --- ortools/sat/python/cp_model_test.py | 240 ++++++++++++++-------------- 1 file changed, 120 insertions(+), 120 deletions(-) diff --git a/ortools/sat/python/cp_model_test.py b/ortools/sat/python/cp_model_test.py index 09235c4e0b..b568289ad6 100644 --- a/ortools/sat/python/cp_model_test.py +++ b/ortools/sat/python/cp_model_test.py @@ -24,46 +24,46 @@ from ortools.sat.python import cp_model class SolutionCounter(cp_model.CpSolverSolutionCallback): """Count solutions.""" - def __init__(self): + def __init__(self) -> None: cp_model.CpSolverSolutionCallback.__init__(self) self.__solution_count = 0 - def on_solution_callback(self): + def on_solution_callback(self) -> None: self.__solution_count += 1 @property - def solution_count(self): + def solution_count(self) -> None: return self.__solution_count class SolutionSum(cp_model.CpSolverSolutionCallback): """Record the sum of variables in the solution.""" - def __init__(self, variables): + def __init__(self, variables: list[cp_model.IntVar]) -> None: cp_model.CpSolverSolutionCallback.__init__(self) - self.__sum = 0 + self.__sum: int = 0 self.__vars = variables - def on_solution_callback(self): + def on_solution_callback(self) -> None: self.__sum = sum(self.value(x) for x in self.__vars) @property - def sum(self): + def sum(self) -> int: return self.__sum class SolutionObjective(cp_model.CpSolverSolutionCallback): """Record the objective value of the solution.""" - def __init__(self): + def __init__(self) -> None: cp_model.CpSolverSolutionCallback.__init__(self) self.__obj = 0 - def on_solution_callback(self): + def on_solution_callback(self) -> None: self.__obj = self.objective_value @property - def obj(self): + def obj(self) -> None: return self.__obj @@ -74,25 +74,25 @@ class RecordSolution(cp_model.CpSolverSolutionCallback): self, int_vars: list[cp_model.VariableT], bool_vars: list[cp_model.LiteralT], - ): + ) -> None: cp_model.CpSolverSolutionCallback.__init__(self) self.__int_vars = int_vars self.__bool_vars = bool_vars self.__int_var_values: list[int] = [] self.__bool_var_values: list[bool] = [] - def on_solution_callback(self): + def on_solution_callback(self) -> None: for int_var in self.__int_vars: self.__int_var_values.append(self.value(int_var)) for bool_var in self.__bool_vars: self.__bool_var_values.append(self.boolean_value(bool_var)) @property - def int_var_values(self): + def int_var_values(self) -> None: return self.__int_var_values @property - def bool_var_values(self): + def bool_var_values(self) -> None: return self.__bool_var_values @@ -149,7 +149,7 @@ class BestBoundTimeCallback: class CpModelTest(absltest.TestCase): - def testCreateIntegerVariable(self): + def testCreateIntegerVariable(self) -> None: print("testCreateIntegerVariable") model = cp_model.CpModel() x = model.new_int_var(-10, 10, "x") @@ -173,7 +173,7 @@ class CpModelTest(absltest.TestCase): cst = model.new_constant(5) self.assertEqual("5", str(cst)) - def testNegation(self): + def testNegation(self) -> None: print("testNegation") model = cp_model.CpModel() x = model.new_int_var(-10, 10, "x") @@ -186,7 +186,7 @@ class CpModelTest(absltest.TestCase): self.assertEqual(nb.index, -b.index - 1) self.assertRaises(TypeError, x.negated) - def testEqualityOverload(self): + def testEqualityOverload(self) -> None: print("testEqualityOverload") model = cp_model.CpModel() x = model.new_int_var(-10, 10, "x") @@ -194,7 +194,7 @@ class CpModelTest(absltest.TestCase): self.assertEqual(x, x) self.assertNotEqual(x, y) - def testLinear(self): + def testLinear(self) -> None: print("testLinear") model = cp_model.CpModel() x = model.new_int_var(-10, 10, "x") @@ -206,7 +206,7 @@ class CpModelTest(absltest.TestCase): self.assertEqual(10, solver.value(x)) self.assertEqual(-5, solver.value(y)) - def testLinearConstraint(self): + def testLinearConstraint(self) -> None: print("testLinear") model = cp_model.CpModel() model.add_linear_constraint(5, 0, 10) @@ -217,7 +217,7 @@ class CpModelTest(absltest.TestCase): self.assertTrue(model.proto.constraints[1].HasField("bool_or")) self.assertEmpty(model.proto.constraints[1].bool_or.literals) - def testLinearNonEqual(self): + def testLinearNonEqual(self) -> None: print("testLinearNonEqual") model = cp_model.CpModel() x = model.new_int_var(-10, 10, "x") @@ -229,7 +229,7 @@ class CpModelTest(absltest.TestCase): self.assertEqual(4, ct.linear.domain[2]) self.assertEqual(cp_model.INT_MAX, ct.linear.domain[3]) - def testEq(self): + def testEq(self) -> None: print("testEq") model = cp_model.CpModel() x = model.new_int_var(-10, 10, "x") @@ -240,7 +240,7 @@ class CpModelTest(absltest.TestCase): self.assertEqual(2, ct.linear.domain[0]) self.assertEqual(2, ct.linear.domain[1]) - def testGe(self): + def testGe(self) -> None: print("testGe") model = cp_model.CpModel() x = model.new_int_var(-10, 10, "x") @@ -251,7 +251,7 @@ class CpModelTest(absltest.TestCase): self.assertEqual(2, ct.linear.domain[0]) self.assertEqual(cp_model.INT_MAX, ct.linear.domain[1]) - def testGt(self): + def testGt(self) -> None: print("testGt") model = cp_model.CpModel() x = model.new_int_var(-10, 10, "x") @@ -262,7 +262,7 @@ class CpModelTest(absltest.TestCase): self.assertEqual(3, ct.linear.domain[0]) self.assertEqual(cp_model.INT_MAX, ct.linear.domain[1]) - def testLe(self): + def testLe(self) -> None: print("testLe") model = cp_model.CpModel() x = model.new_int_var(-10, 10, "x") @@ -273,7 +273,7 @@ class CpModelTest(absltest.TestCase): self.assertEqual(cp_model.INT_MIN, ct.linear.domain[0]) self.assertEqual(2, ct.linear.domain[1]) - def testLt(self): + def testLt(self) -> None: print("testLt") model = cp_model.CpModel() x = model.new_int_var(-10, 10, "x") @@ -284,7 +284,7 @@ class CpModelTest(absltest.TestCase): self.assertEqual(cp_model.INT_MIN, ct.linear.domain[0]) self.assertEqual(1, ct.linear.domain[1]) - def testEqVar(self): + def testEqVar(self) -> None: print("testEqVar") model = cp_model.CpModel() x = model.new_int_var(-10, 10, "x") @@ -298,7 +298,7 @@ class CpModelTest(absltest.TestCase): self.assertEqual(2, ct.linear.domain[0]) self.assertEqual(2, ct.linear.domain[1]) - def testGeVar(self): + def testGeVar(self) -> None: print("testGeVar") model = cp_model.CpModel() x = model.new_int_var(-10, 10, "x") @@ -313,7 +313,7 @@ class CpModelTest(absltest.TestCase): self.assertEqual(1, ct.linear.domain[0]) self.assertEqual(cp_model.INT_MAX, ct.linear.domain[1]) - def testGtVar(self): + def testGtVar(self) -> None: print("testGeVar") model = cp_model.CpModel() x = model.new_int_var(-10, 10, "x") @@ -328,7 +328,7 @@ class CpModelTest(absltest.TestCase): self.assertEqual(2, ct.linear.domain[0]) self.assertEqual(cp_model.INT_MAX, ct.linear.domain[1]) - def testLeVar(self): + def testLeVar(self) -> None: print("testLeVar") model = cp_model.CpModel() x = model.new_int_var(-10, 10, "x") @@ -343,7 +343,7 @@ class CpModelTest(absltest.TestCase): self.assertEqual(cp_model.INT_MIN, ct.linear.domain[0]) self.assertEqual(1, ct.linear.domain[1]) - def testLtVar(self): + def testLtVar(self) -> None: print("testLtVar") model = cp_model.CpModel() x = model.new_int_var(-10, 10, "x") @@ -358,7 +358,7 @@ class CpModelTest(absltest.TestCase): self.assertEqual(cp_model.INT_MIN, ct.linear.domain[0]) self.assertEqual(0, ct.linear.domain[1]) - def testSimplification1(self): + def testSimplification1(self) -> None: print("testSimplification1") model = cp_model.CpModel() x = model.new_int_var(-10, 10, "x") @@ -366,7 +366,7 @@ class CpModelTest(absltest.TestCase): self.assertEqual(x, prod.expression()) self.assertEqual(4, prod.coefficient()) - def testSimplification2(self): + def testSimplification2(self) -> None: print("testSimplification2") model = cp_model.CpModel() x = model.new_int_var(-10, 10, "x") @@ -374,7 +374,7 @@ class CpModelTest(absltest.TestCase): self.assertEqual(x, prod.expression()) self.assertEqual(4, prod.coefficient()) - def testSimplification3(self): + def testSimplification3(self) -> None: print("testSimplification3") model = cp_model.CpModel() x = model.new_int_var(-10, 10, "x") @@ -382,7 +382,7 @@ class CpModelTest(absltest.TestCase): self.assertEqual(x, prod.expression()) self.assertEqual(4, prod.coefficient()) - def testSimplification4(self): + def testSimplification4(self) -> None: print("testSimplification4") model = cp_model.CpModel() x = model.new_int_var(-10, 10, "x") @@ -390,7 +390,7 @@ class CpModelTest(absltest.TestCase): self.assertEqual(x, prod.expression()) self.assertEqual(4, prod.coefficient()) - def testLinearNonEqualWithConstant(self): + def testLinearNonEqualWithConstant(self) -> None: print("testLinearNonEqualWithConstant") model = cp_model.CpModel() x = model.new_int_var(-10, 10, "x") @@ -403,7 +403,7 @@ class CpModelTest(absltest.TestCase): self.assertEqual(-1, ct.linear.domain[2]) self.assertEqual(cp_model.INT_MAX, ct.linear.domain[3]) - def testLinearWithEnforcement(self): + def testLinearWithEnforcement(self) -> None: print("testLinearWithEnforcement") model = cp_model.CpModel() x = model.new_int_var(-10, 10, "x") @@ -423,7 +423,7 @@ class CpModelTest(absltest.TestCase): self.assertEqual(-4, model.proto.constraints[2].enforcement_literal[0]) self.assertEqual(2, model.proto.constraints[2].enforcement_literal[1]) - def testConstraintWithName(self): + def testConstraintWithName(self) -> None: print("testConstraintWithName") model = cp_model.CpModel() x = model.new_int_var(-10, 10, "x") @@ -431,7 +431,7 @@ class CpModelTest(absltest.TestCase): ct = model.add_linear_constraint(x + 2 * y, 0, 10).with_name("test_constraint") self.assertEqual("test_constraint", ct.name) - def testNaturalApiMinimize(self): + def testNaturalApiMinimize(self) -> None: print("testNaturalApiMinimize") model = cp_model.CpModel() x = model.new_int_var(-10, 10, "x") @@ -445,7 +445,7 @@ class CpModelTest(absltest.TestCase): self.assertEqual(6, solver.value(1 + x)) self.assertEqual(-10.0, solver.objective_value) - def testNaturalApiMaximizeFloat(self): + def testNaturalApiMaximizeFloat(self) -> None: print("testNaturalApiMaximizeFloat") model = cp_model.CpModel() x = model.new_bool_var("x") @@ -458,7 +458,7 @@ class CpModelTest(absltest.TestCase): self.assertEqual(-10, solver.value(-y)) self.assertEqual(16.1, solver.objective_value) - def testNaturalApiMaximizeComplex(self): + def testNaturalApiMaximizeComplex(self) -> None: print("testNaturalApiMaximizeFloat") model = cp_model.CpModel() x1 = model.new_bool_var("x1") @@ -483,7 +483,7 @@ class CpModelTest(absltest.TestCase): self.assertEqual(5, solver.value(5 * x4.negated())) self.assertEqual(8, solver.objective_value) - def testNaturalApiMaximize(self): + def testNaturalApiMaximize(self) -> None: print("testNaturalApiMaximize") model = cp_model.CpModel() x = model.new_int_var(-10, 10, "x") @@ -496,7 +496,7 @@ class CpModelTest(absltest.TestCase): self.assertEqual(-9, solver.value(y)) self.assertEqual(17, solver.objective_value) - def testMinimizeConstant(self): + def testMinimizeConstant(self) -> None: print("testMinimizeConstant") model = cp_model.CpModel() x = model.new_int_var(-10, 10, "x") @@ -506,7 +506,7 @@ class CpModelTest(absltest.TestCase): self.assertEqual("OPTIMAL", solver.status_name(solver.solve(model))) self.assertEqual(10, solver.objective_value) - def testMaximizeConstant(self): + def testMaximizeConstant(self) -> None: print("testMinimizeConstant") model = cp_model.CpModel() x = model.new_int_var(-10, 10, "x") @@ -516,7 +516,7 @@ class CpModelTest(absltest.TestCase): self.assertEqual("OPTIMAL", solver.status_name(solver.solve(model))) self.assertEqual(5, solver.objective_value) - def testAddTrue(self): + def testAddTrue(self) -> None: print("testAddTrue") model = cp_model.CpModel() x = model.new_int_var(-10, 10, "x") @@ -526,7 +526,7 @@ class CpModelTest(absltest.TestCase): self.assertEqual("OPTIMAL", solver.status_name(solver.solve(model))) self.assertEqual(-10, solver.value(x)) - def testAddFalse(self): + def testAddFalse(self) -> None: print("testAddFalse") model = cp_model.CpModel() x = model.new_int_var(-10, 10, "x") @@ -535,7 +535,7 @@ class CpModelTest(absltest.TestCase): solver = cp_model.CpSolver() self.assertEqual("INFEASIBLE", solver.status_name(solver.solve(model))) - def testSum(self): + def testSum(self) -> None: print("testSum") model = cp_model.CpModel() x = [model.new_int_var(0, 2, "x%i" % i) for i in range(100)] @@ -547,7 +547,7 @@ class CpModelTest(absltest.TestCase): for i in range(100): self.assertEqual(solver.value(x[i]), 1 if i == 99 else 0) - def testSumWithApi(self): + def testSumWithApi(self) -> None: print("testSumWithApi") model = cp_model.CpModel() x = [model.new_int_var(0, 2, "x%i" % i) for i in range(100)] @@ -559,7 +559,7 @@ class CpModelTest(absltest.TestCase): for i in range(100): self.assertEqual(solver.value(x[i]), 1 if i == 99 else 0) - def testWeightedSum(self): + def testWeightedSum(self) -> None: print("testWeightedSum") model = cp_model.CpModel() x = [model.new_int_var(0, 2, "x%i" % i) for i in range(100)] @@ -572,7 +572,7 @@ class CpModelTest(absltest.TestCase): for i in range(100): self.assertEqual(solver.value(x[i]), 1 if i == 99 else 0) - def testAllDifferent(self): + def testAllDifferent(self) -> None: print("testAllDifferent") model = cp_model.CpModel() x = [model.new_int_var(0, 4, "x%i" % i) for i in range(5)] @@ -581,7 +581,7 @@ class CpModelTest(absltest.TestCase): self.assertLen(model.proto.constraints, 1) self.assertLen(model.proto.constraints[0].all_diff.exprs, 5) - def testAllDifferentGen(self): + def testAllDifferentGen(self) -> None: print("testAllDifferentGen") model = cp_model.CpModel() model.add_all_different(model.new_int_var(0, 4, "x%i" % i) for i in range(5)) @@ -589,7 +589,7 @@ class CpModelTest(absltest.TestCase): self.assertLen(model.proto.constraints, 1) self.assertLen(model.proto.constraints[0].all_diff.exprs, 5) - def testAllDifferentList(self): + def testAllDifferentList(self) -> None: print("testAllDifferentList") model = cp_model.CpModel() x = [model.new_int_var(0, 4, "x%i" % i) for i in range(5)] @@ -598,7 +598,7 @@ class CpModelTest(absltest.TestCase): self.assertLen(model.proto.constraints, 1) self.assertLen(model.proto.constraints[0].all_diff.exprs, 5) - def testElement(self): + def testElement(self) -> None: print("testElement") model = cp_model.CpModel() x = [model.new_int_var(0, 4, "x%i" % i) for i in range(5)] @@ -610,7 +610,7 @@ class CpModelTest(absltest.TestCase): self.assertEqual(4, model.proto.constraints[0].element.target) self.assertRaises(ValueError, model.add_element, x[0], [], x[4]) - def testFixedElement(self): + def testFixedElement(self) -> None: print("testFixedElement") model = cp_model.CpModel() x = [model.new_int_var(0, 4, "x%i" % i) for i in range(4)] @@ -622,7 +622,7 @@ class CpModelTest(absltest.TestCase): self.assertEqual(1, model.proto.constraints[0].linear.coeffs[0]) self.assertEqual([2, 2], model.proto.constraints[0].linear.domain) - def testCircuit(self): + def testCircuit(self) -> None: print("testCircuit") model = cp_model.CpModel() x = [model.new_bool_var(f"x{i}") for i in range(5)] @@ -634,7 +634,7 @@ class CpModelTest(absltest.TestCase): self.assertLen(model.proto.constraints[0].circuit.literals, 5) self.assertRaises(ValueError, model.add_circuit, []) - def testMultipleCircuit(self): + def testMultipleCircuit(self) -> None: print("testMultipleCircuit") model = cp_model.CpModel() x = [model.new_bool_var(f"x{i}") for i in range(5)] @@ -646,7 +646,7 @@ class CpModelTest(absltest.TestCase): self.assertLen(model.proto.constraints[0].routes.literals, 5) self.assertRaises(ValueError, model.add_multiple_circuit, []) - def testAllowedAssignments(self): + def testAllowedAssignments(self) -> None: print("testAllowedAssignments") model = cp_model.CpModel() x = [model.new_int_var(0, 4, "x%i" % i) for i in range(5)] @@ -670,7 +670,7 @@ class CpModelTest(absltest.TestCase): [(0, 1, 2, 3, 4), (4, 3, 2, 1, 1), (0, 0, 0, 0)], ) - def testForbiddenAssignments(self): + def testForbiddenAssignments(self) -> None: print("testForbiddenAssignments") model = cp_model.CpModel() x = [model.new_int_var(0, 4, "x%i" % i) for i in range(5)] @@ -695,7 +695,7 @@ class CpModelTest(absltest.TestCase): [(0, 1, 2, 3, 4), (4, 3, 2, 1, 1), (0, 0, 0, 0)], ) - def testAutomaton(self): + def testAutomaton(self) -> None: print("testAutomaton") model = cp_model.CpModel() x = [model.new_int_var(0, 4, "x%i" % i) for i in range(5)] @@ -734,7 +734,7 @@ class CpModelTest(absltest.TestCase): ) self.assertRaises(ValueError, model.add_automaton, x, 0, [2, 3], []) - def testInverse(self): + def testInverse(self) -> None: print("testInverse") model = cp_model.CpModel() x = [model.new_int_var(0, 4, "x%i" % i) for i in range(5)] @@ -745,7 +745,7 @@ class CpModelTest(absltest.TestCase): self.assertLen(model.proto.constraints[0].inverse.f_direct, 5) self.assertLen(model.proto.constraints[0].inverse.f_inverse, 5) - def testMaxEquality(self): + def testMaxEquality(self) -> None: print("testMaxEquality") model = cp_model.CpModel() x = model.new_int_var(0, 4, "x") @@ -757,7 +757,7 @@ class CpModelTest(absltest.TestCase): self.assertEqual(0, model.proto.constraints[0].lin_max.target.vars[0]) self.assertEqual(1, model.proto.constraints[0].lin_max.target.coeffs[0]) - def testMinEquality(self): + def testMinEquality(self) -> None: print("testMinEquality") model = cp_model.CpModel() x = model.new_int_var(0, 4, "x") @@ -768,7 +768,7 @@ class CpModelTest(absltest.TestCase): self.assertEqual(0, model.proto.constraints[0].lin_max.target.vars[0]) self.assertEqual(-1, model.proto.constraints[0].lin_max.target.coeffs[0]) - def testMinEqualityList(self): + def testMinEqualityList(self) -> None: print("testMinEqualityList") model = cp_model.CpModel() x = model.new_int_var(0, 4, "x") @@ -779,7 +779,7 @@ class CpModelTest(absltest.TestCase): self.assertEqual(0, model.proto.constraints[0].lin_max.target.vars[0]) self.assertEqual(-1, model.proto.constraints[0].lin_max.target.coeffs[0]) - def testMinEqualityTuple(self): + def testMinEqualityTuple(self) -> None: print("testMinEqualityTuple") model = cp_model.CpModel() x = model.new_int_var(0, 4, "x") @@ -790,7 +790,7 @@ class CpModelTest(absltest.TestCase): self.assertEqual(0, model.proto.constraints[0].lin_max.target.vars[0]) self.assertEqual(-1, model.proto.constraints[0].lin_max.target.coeffs[0]) - def testMinEqualityGenerator(self): + def testMinEqualityGenerator(self) -> None: print("testMinEqualityGenerator") model = cp_model.CpModel() x = model.new_int_var(0, 4, "x") @@ -801,7 +801,7 @@ class CpModelTest(absltest.TestCase): self.assertEqual(0, model.proto.constraints[0].lin_max.target.vars[0]) self.assertEqual(-1, model.proto.constraints[0].lin_max.target.coeffs[0]) - def testMinEqualityWithConstant(self): + def testMinEqualityWithConstant(self) -> None: print("testMinEqualityWithConstant") model = cp_model.CpModel() x = model.new_int_var(0, 4, "x") @@ -818,7 +818,7 @@ class CpModelTest(absltest.TestCase): self.assertEmpty(lin_max.exprs[1].vars) self.assertEqual(-3, lin_max.exprs[1].offset) - def testAbs(self): + def testAbs(self) -> None: print("testAbs") model = cp_model.CpModel() x = model.new_int_var(0, 4, "x") @@ -845,7 +845,7 @@ class CpModelTest(absltest.TestCase): ) self.assertTrue(passed) - def testDivision(self): + def testDivision(self) -> None: print("testDivision") model = cp_model.CpModel() x = model.new_int_var(0, 10, "x") @@ -872,7 +872,7 @@ class CpModelTest(absltest.TestCase): ) self.assertTrue(passed) - def testModulo(self): + def testModulo(self) -> None: print("testModulo") model = cp_model.CpModel() x = model.new_int_var(0, 10, "x") @@ -899,7 +899,7 @@ class CpModelTest(absltest.TestCase): ) self.assertTrue(passed) - def testMultiplicationEquality(self): + def testMultiplicationEquality(self) -> None: print("testMultiplicationEquality") model = cp_model.CpModel() x = model.new_int_var(0, 4, "x") @@ -910,7 +910,7 @@ class CpModelTest(absltest.TestCase): self.assertLen(model.proto.constraints[0].int_prod.exprs, 5) self.assertEqual(0, model.proto.constraints[0].int_prod.target.vars[0]) - def testImplication(self): + def testImplication(self) -> None: print("testImplication") model = cp_model.CpModel() x = model.new_bool_var("x") @@ -923,7 +923,7 @@ class CpModelTest(absltest.TestCase): self.assertEqual(x.index, model.proto.constraints[0].enforcement_literal[0]) self.assertEqual(y.index, model.proto.constraints[0].bool_or.literals[0]) - def testBoolOr(self): + def testBoolOr(self) -> None: print("testBoolOr") model = cp_model.CpModel() x = [model.new_bool_var("x%i" % i) for i in range(5)] @@ -937,7 +937,7 @@ class CpModelTest(absltest.TestCase): y = model.new_int_var(0, 4, "y") self.assertRaises(TypeError, model.add_bool_or, [y, False]) - def testBoolOrListOrGet(self): + def testBoolOrListOrGet(self) -> None: print("testBoolOrListOrGet") model = cp_model.CpModel() x = [model.new_bool_var("x%i" % i) for i in range(5)] @@ -952,7 +952,7 @@ class CpModelTest(absltest.TestCase): self.assertLen(model.proto.constraints[2].bool_or.literals, 2) self.assertLen(model.proto.constraints[3].bool_or.literals, 4) - def testAtLeastOne(self): + def testAtLeastOne(self) -> None: print("testAtLeastOne") model = cp_model.CpModel() x = [model.new_bool_var("x%i" % i) for i in range(5)] @@ -966,7 +966,7 @@ class CpModelTest(absltest.TestCase): y = model.new_int_var(0, 4, "y") self.assertRaises(TypeError, model.add_at_least_one, [y, False]) - def testAtMostOne(self): + def testAtMostOne(self) -> None: print("testAtMostOne") model = cp_model.CpModel() x = [model.new_bool_var("x%i" % i) for i in range(5)] @@ -980,7 +980,7 @@ class CpModelTest(absltest.TestCase): y = model.new_int_var(0, 4, "y") self.assertRaises(TypeError, model.add_at_most_one, [y, False]) - def testExactlyOne(self): + def testExactlyOne(self) -> None: print("testExactlyOne") model = cp_model.CpModel() x = [model.new_bool_var("x%i" % i) for i in range(5)] @@ -994,7 +994,7 @@ class CpModelTest(absltest.TestCase): y = model.new_int_var(0, 4, "y") self.assertRaises(TypeError, model.add_exactly_one, [y, False]) - def testBoolAnd(self): + def testBoolAnd(self) -> None: print("testBoolAnd") model = cp_model.CpModel() x = [model.new_bool_var("x%i" % i) for i in range(5)] @@ -1007,7 +1007,7 @@ class CpModelTest(absltest.TestCase): self.assertEqual(-3, model.proto.constraints[1].bool_and.literals[1]) self.assertEqual(5, model.proto.constraints[1].bool_and.literals[2]) - def testBoolXOr(self): + def testBoolXOr(self) -> None: print("testBoolXOr") model = cp_model.CpModel() x = [model.new_bool_var("x%i" % i) for i in range(5)] @@ -1016,7 +1016,7 @@ class CpModelTest(absltest.TestCase): self.assertLen(model.proto.constraints, 1) self.assertLen(model.proto.constraints[0].bool_xor.literals, 5) - def testMapDomain(self): + def testMapDomain(self) -> None: print("testMapDomain") model = cp_model.CpModel() x = [model.new_bool_var("x%i" % i) for i in range(5)] @@ -1025,7 +1025,7 @@ class CpModelTest(absltest.TestCase): self.assertLen(model.proto.variables, 6) self.assertLen(model.proto.constraints, 10) - def testInterval(self): + def testInterval(self) -> None: print("testInterval") model = cp_model.CpModel() x = model.new_int_var(0, 4, "x") @@ -1042,13 +1042,13 @@ class CpModelTest(absltest.TestCase): self.assertEqual(size_expr, 2) self.assertEqual(str(end_expr), "(x + 2)") - def testAbsentInterval(self): + def testAbsentInterval(self) -> None: print("testInterval") model = cp_model.CpModel() i = model.new_optional_interval_var(1, 0, 1, False, "") self.assertEqual(0, i.index) - def testOptionalInterval(self): + def testOptionalInterval(self) -> None: print("testOptionalInterval") model = cp_model.CpModel() b = model.new_bool_var("b") @@ -1070,7 +1070,7 @@ class CpModelTest(absltest.TestCase): TypeError, model.new_optional_interval_var, 1, 2, 3, b + 1, "x" ) - def testNoOverlap(self): + def testNoOverlap(self) -> None: print("testNoOverlap") model = cp_model.CpModel() x = model.new_int_var(0, 4, "x") @@ -1084,7 +1084,7 @@ class CpModelTest(absltest.TestCase): self.assertEqual(0, ct.proto.no_overlap.intervals[0]) self.assertEqual(1, ct.proto.no_overlap.intervals[1]) - def testNoOverlap2D(self): + def testNoOverlap2D(self) -> None: print("testNoOverlap2D") model = cp_model.CpModel() x = model.new_int_var(0, 4, "x") @@ -1101,7 +1101,7 @@ class CpModelTest(absltest.TestCase): self.assertEqual(1, ct.proto.no_overlap_2d.y_intervals[0]) self.assertEqual(0, ct.proto.no_overlap_2d.y_intervals[1]) - def testCumulative(self): + def testCumulative(self) -> None: print("testCumulative") model = cp_model.CpModel() intervals = [ @@ -1120,7 +1120,7 @@ class CpModelTest(absltest.TestCase): self.assertLen(ct.proto.cumulative.intervals, 10) self.assertRaises(TypeError, model.add_cumulative, [intervals[0], 3], [2, 3], 3) - def testGetOrMakeIndexFromConstant(self): + def testGetOrMakeIndexFromConstant(self) -> None: print("testGetOrMakeIndexFromConstant") model = cp_model.CpModel() self.assertEqual(0, model.get_or_make_index_from_constant(3)) @@ -1131,7 +1131,7 @@ class CpModelTest(absltest.TestCase): self.assertEqual(3, model_var.domain[0]) self.assertEqual(3, model_var.domain[1]) - def testStr(self): + def testStr(self) -> None: print("testStr") model = cp_model.CpModel() x = model.new_int_var(0, 4, "x") @@ -1164,7 +1164,7 @@ class CpModelTest(absltest.TestCase): i = model.new_interval_var(x, 2, y, "i") self.assertEqual(str(i), "i") - def testRepr(self): + def testRepr(self) -> None: print("testRepr") model = cp_model.CpModel() x = model.new_int_var(0, 4, "x") @@ -1195,19 +1195,19 @@ class CpModelTest(absltest.TestCase): repr(k), "k(start = x2, size = 2, end = y2, is_present = not(b))" ) - def testDisplayBounds(self): + def testDisplayBounds(self) -> None: print("testDisplayBounds") self.assertEqual("10..20", cp_model.display_bounds([10, 20])) self.assertEqual("10", cp_model.display_bounds([10, 10])) self.assertEqual("10..15, 20..30", cp_model.display_bounds([10, 15, 20, 30])) - def testShortName(self): + def testShortName(self) -> None: print("testShortName") model = cp_model.CpModel() model.proto.variables.add(domain=[5, 10]) self.assertEqual("[5..10]", cp_model.short_name(model.proto, 0)) - def testIntegerExpressionErrors(self): + def testIntegerExpressionErrors(self) -> None: print("testIntegerExpressionErrors") model = cp_model.CpModel() x = model.new_int_var(0, 1, "x") @@ -1227,14 +1227,14 @@ class CpModelTest(absltest.TestCase): self.assertRaises(TypeError, x.__add__, "dummy") self.assertRaises(TypeError, x.__mul__, "dummy") - def testModelErrors(self): + def testModelErrors(self) -> None: print("testModelErrors") model = cp_model.CpModel() self.assertRaises(TypeError, model.add, "dummy") self.assertRaises(TypeError, model.get_or_make_index, "dummy") self.assertRaises(TypeError, model.minimize, "dummy") - def testSolverErrors(self): + def testSolverErrors(self) -> None: print("testSolverErrors") model = cp_model.CpModel() x = model.new_int_var(0, 1, "x") @@ -1247,7 +1247,7 @@ class CpModelTest(absltest.TestCase): self.assertRaises(TypeError, solver.value, "not_a_variable") self.assertRaises(TypeError, model.add_bool_or, [x, y]) - def testHasObjectiveMinimize(self): + def testHasObjectiveMinimize(self) -> None: print("testHasObjectiveMinimizs") model = cp_model.CpModel() x = model.new_int_var(0, 1, "x") @@ -1257,7 +1257,7 @@ class CpModelTest(absltest.TestCase): model.minimize(y) self.assertTrue(model.has_objective()) - def testHasObjectiveMaximize(self): + def testHasObjectiveMaximize(self) -> None: print("testHasObjectiveMaximizs") model = cp_model.CpModel() x = model.new_int_var(0, 1, "x") @@ -1267,7 +1267,7 @@ class CpModelTest(absltest.TestCase): model.maximize(y) self.assertTrue(model.has_objective()) - def testSearchForAllSolutions(self): + def testSearchForAllSolutions(self) -> None: print("testSearchForAllSolutions") model = cp_model.CpModel() x = model.new_int_var(0, 5, "x") @@ -1281,7 +1281,7 @@ class CpModelTest(absltest.TestCase): self.assertEqual(cp_model.OPTIMAL, status) self.assertEqual(5, solution_counter.solution_count) - def testSolveWithSolutionCallback(self): + def testSolveWithSolutionCallback(self) -> None: print("testSolveWithSolutionCallback") model = cp_model.CpModel() x = model.new_int_var(0, 5, "x") @@ -1295,7 +1295,7 @@ class CpModelTest(absltest.TestCase): self.assertEqual(cp_model.OPTIMAL, status) self.assertEqual(6, solution_sum.sum) - def testBestBoundCallback(self): + def testBestBoundCallback(self) -> None: print("testBestBoundCallback") model = cp_model.CpModel() x0 = model.new_bool_var("x0") @@ -1314,7 +1314,7 @@ class CpModelTest(absltest.TestCase): self.assertEqual(cp_model.OPTIMAL, status) self.assertEqual(2.6, best_bound_callback.best_bound) - def testValue(self): + def testValue(self) -> None: print("testValue") model = cp_model.CpModel() x = model.new_int_var(0, 10, "x") @@ -1327,7 +1327,7 @@ class CpModelTest(absltest.TestCase): self.assertEqual(solver.value(y), 10) self.assertEqual(solver.value(2), 2) - def testBooleanValue(self): + def testBooleanValue(self) -> None: print("testBooleanValue") model = cp_model.CpModel() x = model.new_bool_var("x") @@ -1349,7 +1349,7 @@ class CpModelTest(absltest.TestCase): self.assertEqual(solver.boolean_value(2), True) self.assertEqual(solver.boolean_value(0), False) - def testUnsupportedOperators(self): + def testUnsupportedOperators(self) -> None: print("testUnsupportedOperators") model = cp_model.CpModel() x = model.new_int_var(0, 10, "x") @@ -1365,7 +1365,7 @@ class CpModelTest(absltest.TestCase): if x == 2: print("passed2") - def testIsLiteralTrueFalse(self): + def testIsLiteralTrueFalse(self) -> None: print("testIsLiteralTrueFalse") model = cp_model.CpModel() x = model.new_constant(0) @@ -1378,7 +1378,7 @@ class CpModelTest(absltest.TestCase): self.assertFalse(cp_model.object_is_a_true_literal(False)) self.assertFalse(cp_model.object_is_a_false_literal(True)) - def testSolveMinimizeWithSolutionCallback(self): + def testSolveMinimizeWithSolutionCallback(self) -> None: print("testSolveMinimizeWithSolutionCallback") model = cp_model.CpModel() x = model.new_int_var(0, 5, "x") @@ -1393,7 +1393,7 @@ class CpModelTest(absltest.TestCase): print("obj = ", solution_obj.obj) self.assertEqual(11, solution_obj.obj) - def testSolutionValue(self): + def testSolutionValue(self) -> None: print("testSolutionValue") model = cp_model.CpModel() x = model.new_int_var(0, 5, "x") @@ -1413,7 +1413,7 @@ class CpModelTest(absltest.TestCase): self.assertEqual([3, 5, -4], solution_recorder.int_var_values) self.assertEqual([True, False, True], solution_recorder.bool_var_values) - def testSolutionHinting(self): + def testSolutionHinting(self) -> None: print("testSolutionHinting") model = cp_model.CpModel() x = model.new_int_var(0, 5, "x") @@ -1428,7 +1428,7 @@ class CpModelTest(absltest.TestCase): self.assertEqual(2, solver.value(x)) self.assertEqual(4, solver.value(y)) - def testSolutionHintingWithBooleans(self): + def testSolutionHintingWithBooleans(self) -> None: print("testSolutionHintingWithBooleans") model = cp_model.CpModel() x = model.new_bool_var("x") @@ -1443,7 +1443,7 @@ class CpModelTest(absltest.TestCase): self.assertTrue(solver.boolean_value(x)) self.assertFalse(solver.boolean_value(y)) - def testStats(self): + def testStats(self) -> None: print("testStats") model = cp_model.CpModel() x = model.new_int_var(0, 5, "x") @@ -1460,7 +1460,7 @@ class CpModelTest(absltest.TestCase): self.assertEqual(solver.num_branches, 0) self.assertGreater(solver.wall_time, 0.0) - def testSearchStrategy(self): + def testSearchStrategy(self) -> None: print("testSearchStrategy") model = cp_model.CpModel() x = model.new_int_var(0, 5, "x") @@ -1486,7 +1486,7 @@ class CpModelTest(absltest.TestCase): ) self.assertEqual(cp_model.SELECT_MAX_VALUE, strategy.domain_reduction_strategy) - def testModelAndResponseStats(self): + def testModelAndResponseStats(self) -> None: print("testStats") model = cp_model.CpModel() x = model.new_int_var(0, 5, "x") @@ -1499,7 +1499,7 @@ class CpModelTest(absltest.TestCase): solver.solve(model) self.assertTrue(solver.response_stats()) - def testValidateModel(self): + def testValidateModel(self) -> None: print("testValidateModel") model = cp_model.CpModel() x = model.new_int_var(0, 5, "x") @@ -1508,7 +1508,7 @@ class CpModelTest(absltest.TestCase): model.maximize(x + 2 * y) self.assertFalse(model.validate()) - def testValidateModelWithOverflow(self): + def testValidateModelWithOverflow(self) -> None: print("testValidateModel") model = cp_model.CpModel() x = model.new_int_var(0, cp_model.INT_MAX, "x") @@ -1517,7 +1517,7 @@ class CpModelTest(absltest.TestCase): model.maximize(x + 2 * y) self.assertTrue(model.validate()) - def testCopyModel(self): + def testCopyModel(self) -> None: print("testCopyModel") model = cp_model.CpModel() b = model.new_bool_var("b") @@ -1555,7 +1555,7 @@ class CpModelTest(absltest.TestCase): interval_ct = new_model.proto.constraints[copy_i.index].interval self.assertEqual(12, interval_ct.size.offset) - def testCustomLog(self): + def testCustomLog(self) -> None: print("testCustomLog") model = cp_model.CpModel() x = model.new_int_var(-10, 10, "x") @@ -1574,7 +1574,7 @@ class CpModelTest(absltest.TestCase): self.assertRegex(log_callback.log, ".*log_to_stdout.*") - def testIssue2762(self): + def testIssue2762(self) -> None: print("testIssue2762") model = cp_model.CpModel() @@ -1582,7 +1582,7 @@ class CpModelTest(absltest.TestCase): with self.assertRaises(NotImplementedError): model.add((x[0] != 0) or (x[1] != 0)) - def testModelError(self): + def testModelError(self) -> None: print("TestModelError") model = cp_model.CpModel() x = [model.new_int_var(0, -2, "x%i" % i) for i in range(100)] @@ -1592,7 +1592,7 @@ class CpModelTest(absltest.TestCase): self.assertEqual(cp_model.MODEL_INVALID, solver.solve(model)) self.assertEqual(solver.solution_info(), 'var #0 has no domain(): name: "x0"') - def testIntVarSeries(self): + def testIntVarSeries(self) -> None: print("testIntVarSeries") df = pd.DataFrame([1, -1, 1], columns=["coeffs"]) model = cp_model.CpModel() @@ -1605,7 +1605,7 @@ class CpModelTest(absltest.TestCase): solution = solver.values(x) self.assertTrue((solution.values == [0, 5, 0]).all()) - def testBoolVarSeries(self): + def testBoolVarSeries(self) -> None: print("testBoolVarSeries") df = pd.DataFrame([1, -1, 1], columns=["coeffs"]) model = cp_model.CpModel() @@ -1616,7 +1616,7 @@ class CpModelTest(absltest.TestCase): solution = solver.boolean_values(x) self.assertTrue((solution.values == [False, True, False]).all()) - def testFixedSizeIntervalVarSeries(self): + def testFixedSizeIntervalVarSeries(self) -> None: print("testFixedSizeIntervalVarSeries") df = pd.DataFrame([2, 4, 6], columns=["size"]) model = cp_model.CpModel() @@ -1642,7 +1642,7 @@ class CpModelTest(absltest.TestCase): ) self.assertLen(model.proto.constraints, 7) - def testIntervalVarSeries(self): + def testIntervalVarSeries(self) -> None: print("testIntervalVarSeries") df = pd.DataFrame([2, 4, 6], columns=["size"]) model = cp_model.CpModel() @@ -1692,7 +1692,7 @@ class CpModelTest(absltest.TestCase): ) self.assertLen(model.proto.constraints, 13) - def testIssue4376SatModel(self): + def testIssue4376SatModel(self) -> None: print("testIssue4376SatModel") letters: str = "BCFLMRT" @@ -1803,7 +1803,7 @@ TRFM""" if status == cp_model.OPTIMAL: self.assertLess(time.time(), solution_callback.last_time + 5.0) - def testIssue4376MinimizeModel(self): + def testIssue4376MinimizeModel(self) -> None: print("testIssue4376MinimizeModel") model = cp_model.CpModel() From 0571f3fac200e6655382403660d9b3339fcafd70 Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Mon, 7 Oct 2024 16:27:06 +0200 Subject: [PATCH 053/105] [CP-SAT] one more presolve on int_prod --- ortools/sat/cp_model_postsolve.cc | 20 +++++++ ortools/sat/cp_model_presolve.cc | 83 ++++++++++++++++++++-------- ortools/util/sorted_interval_list.cc | 77 ++++++++++++++++++++++++++ ortools/util/sorted_interval_list.h | 5 ++ 4 files changed, 161 insertions(+), 24 deletions(-) diff --git a/ortools/sat/cp_model_postsolve.cc b/ortools/sat/cp_model_postsolve.cc index 24cf1f3949..bca33c8ab2 100644 --- a/ortools/sat/cp_model_postsolve.cc +++ b/ortools/sat/cp_model_postsolve.cc @@ -331,6 +331,23 @@ void PostsolveIntMod(const ConstraintProto& ct, std::vector* domains) { (*domains)[target.vars(0)] = Domain(value); } +// We only support assigning to an affine target. +void PostsolveIntProd(const ConstraintProto& ct, std::vector* domains) { + int64_t target_value = 1; + for (const LinearExpressionProto& expr : ct.int_prod().exprs()) { + target_value *= EvaluateLinearExpression(expr, *domains); + } + + const LinearExpressionProto& target = ct.int_prod().target(); + CHECK_EQ(target.vars().size(), 1); + CHECK(RefIsPositive(target.vars(0))); + + target_value -= target.offset(); + CHECK_EQ(target_value % target.coeffs(0), 0); + target_value /= target.coeffs(0); + (*domains)[target.vars(0)] = Domain(target_value); +} + void PostsolveResponse(const int64_t num_variables_in_original_model, const CpModelProto& mapping_proto, const std::vector& postsolve_mapping, @@ -390,6 +407,9 @@ void PostsolveResponse(const int64_t num_variables_in_original_model, case ConstraintProto::kIntMod: PostsolveIntMod(ct, &domains); break; + case ConstraintProto::kIntProd: + PostsolveIntProd(ct, &domains); + break; default: // This should never happen as we control what kind of constraint we // add to the mapping_proto; diff --git a/ortools/sat/cp_model_presolve.cc b/ortools/sat/cp_model_presolve.cc index 1a1eeb358d..8f1b09515d 100644 --- a/ortools/sat/cp_model_presolve.cc +++ b/ortools/sat/cp_model_presolve.cc @@ -1455,21 +1455,66 @@ bool CpModelPresolver::PropagateAndReduceIntAbs(ConstraintProto* ct) { return false; } +Domain EvaluateImpliedIntProdDomain(const LinearArgumentProto& expr, + const PresolveContext& context) { + if (expr.exprs().size() == 2) { + const LinearExpressionProto& expr0 = expr.exprs(0); + const LinearExpressionProto& expr1 = expr.exprs(1); + if (LinearExpressionProtosAreEqual(expr0, expr1)) { + return context.DomainSuperSetOf(expr0).SquareSuperset(); + } + if (expr0.vars().size() == 1 && expr1.vars().size() == 1 && + expr0.vars(0) == expr1.vars(0)) { + return context.DomainOf(expr0.vars(0)) + .QuadraticSuperset(expr0.coeffs(0), expr0.offset(), expr1.coeffs(0), + expr1.offset()); + } + } + + Domain implied(1); + for (const LinearExpressionProto& expr : expr.exprs()) { + implied = + implied.ContinuousMultiplicationBy(context.DomainSuperSetOf(expr)); + } + return implied; +} + bool CpModelPresolver::PresolveIntProd(ConstraintProto* ct) { if (context_->ModelIsUnsat()) return false; if (HasEnforcementLiteral(*ct)) return false; // Start by restricting the domain of target. We will be more precise later. bool domain_modified = false; - { - Domain implied(1); - for (const LinearExpressionProto& expr : ct->int_prod().exprs()) { - implied = - implied.ContinuousMultiplicationBy(context_->DomainSuperSetOf(expr)); - } - if (!context_->IntersectDomainWith(ct->int_prod().target(), implied, - &domain_modified)) { - return false; + Domain implied_domain = + EvaluateImpliedIntProdDomain(ct->int_prod(), *context_); + if (!context_->IntersectDomainWith(ct->int_prod().target(), implied_domain, + &domain_modified)) { + return false; + } + + // Remove a constraint if the target only appears in the constraint. For this + // to be correct some conditions must be met: + // - The target is an affine linear with coefficient -1 or 1. + // - The target does not appear in the rhs (no x = (a*x + b) * ...). + // - The target domain covers all the possible range of the rhs. + if (ExpressionContainsSingleRef(ct->int_prod().target()) && + context_->VariableIsUniqueAndRemovable(ct->int_prod().target().vars(0)) && + std::abs(ct->int_prod().target().coeffs(0)) == 1) { + const LinearExpressionProto& target = ct->int_prod().target(); + if (!absl::c_any_of(ct->int_prod().exprs(), + [&target](const LinearExpressionProto& expr) { + return absl::c_linear_search(expr.vars(), + target.vars(0)); + })) { + const Domain target_domain = + Domain(target.offset()) + .AdditionWith(context_->DomainOf(target.vars(0))); + if (implied_domain.IsIncludedIn(target_domain)) { + context_->MarkVariableAsRemoved(ct->int_prod().target().vars(0)); + context_->NewMappingConstraint(*ct, __FILE__, __LINE__); + context_->UpdateRuleStats("int_prod: unused affine target"); + return RemoveConstraint(ct); + } } } @@ -1651,21 +1696,11 @@ bool CpModelPresolver::PresolveIntProd(ConstraintProto* ct) { } // Restrict the target domain if possible. - Domain implied(1); - bool is_square = false; - if (ct->int_prod().exprs_size() == 2 && - LinearExpressionProtosAreEqual(ct->int_prod().exprs(0), - ct->int_prod().exprs(1))) { - is_square = true; - implied = - context_->DomainSuperSetOf(ct->int_prod().exprs(0)).SquareSuperset(); - } else { - for (const LinearExpressionProto& expr : ct->int_prod().exprs()) { - implied = - implied.ContinuousMultiplicationBy(context_->DomainSuperSetOf(expr)); - } - } - if (!context_->IntersectDomainWith(ct->int_prod().target(), implied, + implied_domain = EvaluateImpliedIntProdDomain(ct->int_prod(), *context_); + const bool is_square = ct->int_prod().exprs_size() == 2 && + LinearExpressionProtosAreEqual( + ct->int_prod().exprs(0), ct->int_prod().exprs(1)); + if (!context_->IntersectDomainWith(ct->int_prod().target(), implied_domain, &domain_modified)) { return false; } diff --git a/ortools/util/sorted_interval_list.cc b/ortools/util/sorted_interval_list.cc index 6343b19ab1..332cf664b9 100644 --- a/ortools/util/sorted_interval_list.cc +++ b/ortools/util/sorted_interval_list.cc @@ -24,6 +24,7 @@ #include #include "absl/container/inlined_vector.h" +#include "absl/numeric/int128.h" #include "absl/strings/str_format.h" #include "absl/types/span.h" #include "ortools/base/logging.h" @@ -634,6 +635,82 @@ Domain Domain::SquareSuperset() const { } } +namespace { +ClosedInterval EvaluateQuadraticProdInterval(int64_t a, int64_t b, int64_t c, + int64_t d, int64_t variable_min, + int64_t variable_max) { + // We have (a*x + b)(c*x + d) = a*c*x*x + (a*d + b*c)*x + b*d + // The minimum or maximum is at x = -(a*d + b*c)/(2*a*c) + // + // The minimum and maximum of the expression happens when x is one of the + // following: + // - variable_min; + // - variable_max; + // - the closest point to the parabola extreme, rounded down; + // - the closest point to the parabola extreme, rounded up. + + const absl::int128 nominator = + -absl::int128{a} * absl::int128{d} - absl::int128{b} * absl::int128{c}; + const absl::int128 denominator = absl::int128{a} * absl::int128{c}; + const absl::int128 evaluated_minimum_point = (nominator / denominator) / 2; + + const auto& evaluate = [&a, &b, &c, &d](const int64_t x) { + return CapProd(CapAdd(CapProd(a, x), b), CapAdd(CapProd(c, x), d)); + }; + + const int64_t at_min_x = evaluate(variable_min); + const int64_t at_max_x = evaluate(variable_max); + int64_t min_var = std::min(at_min_x, at_max_x); + int64_t max_var = std::max(at_min_x, at_max_x); + + if (evaluated_minimum_point > variable_min && + evaluated_minimum_point < variable_max) { + const int64_t point_at_minimum_64 = + static_cast(evaluated_minimum_point); + const int rounder = ((nominator > 0) == (denominator > 0) ? 1 : -1); + const int64_t point1 = evaluate(point_at_minimum_64); + const int64_t point2 = evaluate(point_at_minimum_64 + rounder); + min_var = std::min(min_var, std::min(point1, point2)); + max_var = std::max(max_var, std::max(point1, point2)); + } + + return ClosedInterval(min_var, max_var); +} +} // namespace + +Domain Domain::QuadraticSuperset(int64_t a, int64_t b, int64_t c, + int64_t d) const { + if (IsEmpty()) return Domain(); + + if (Size() < kDomainComplexityLimit) { + std::vector values; + values.reserve(Size()); + for (const int64_t value : Values()) { + values.push_back( // + CapProd( // + CapAdd(CapProd(a, value), b), CapAdd(CapProd(c, value), d))); + } + return Domain::FromValues(std::move(values)); + } + + if (a == 0) { + return MultiplicationBy(CapProd(c, b)).AdditionWith(Domain(CapProd(d, b))); + } + if (c == 0) { + return MultiplicationBy(CapProd(a, d)).AdditionWith(Domain(CapProd(d, b))); + } + + Domain result; + result.intervals_.reserve(NumIntervals()); + for (const auto& interval : intervals_) { + result.intervals_.push_back(EvaluateQuadraticProdInterval( + a, b, c, d, interval.start, interval.end)); + } + std::sort(result.intervals_.begin(), result.intervals_.end()); + UnionOfSortedIntervals(&result.intervals_); + return result; +} + // It is a bit difficult to see, but this code is doing the same thing as // for all interval in this.UnionWith(implied_domain.Complement())): // - Take the two extreme points (min and max) in interval \inter implied. diff --git a/ortools/util/sorted_interval_list.h b/ortools/util/sorted_interval_list.h index 052d550ca1..d0b8cf53ba 100644 --- a/ortools/util/sorted_interval_list.h +++ b/ortools/util/sorted_interval_list.h @@ -421,6 +421,11 @@ class Domain { */ Domain SquareSuperset() const; + /** + * Returns a superset of {x ∈ Int64, ∃ y ∈ D, x = (a*y + b)*(c*y + d) }. + */ + Domain QuadraticSuperset(int64_t a, int64_t b, int64_t c, int64_t d) const; + /** * Advanced usage. Given some \e implied information on this domain that is * assumed to be always true (i.e. only values in the intersection with From 391c782dd9b93d3d9ce2f02113c25671d87e8c07 Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Mon, 7 Oct 2024 15:21:20 +0200 Subject: [PATCH 054/105] dotnet: Improve internal doc --- ortools/dotnet/README.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/ortools/dotnet/README.md b/ortools/dotnet/README.md index 01d828af89..f11ce54848 100644 --- a/ortools/dotnet/README.md +++ b/ortools/dotnet/README.md @@ -315,6 +315,9 @@ ref: [Mono `pinvoke#libraryname`](https://www.mono-project.com/docs/advanced/pin ### Issues +`dotnet` seems to use a previous package version ?
+You can clear your local cache using: `dotnet nuget locals all --clear` + Some issue related to this process * [`PackageReference` only support `TargetFramework` condition](https://docs.microsoft.com/en-us/nuget/consume-packages/package-references-in-project-files#adding-a-packagereference-condition) * [Nuget needs to support dependencies specific to target runtime #1660](https://github.com/NuGet/Home/issues/1660) From 4b0aabeb4aba2f006c8b5d8e2107a0ad4ef6fac1 Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Tue, 8 Oct 2024 16:02:16 +0200 Subject: [PATCH 055/105] fix pdptw example --- examples/cpp/pdptw.cc | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/examples/cpp/pdptw.cc b/examples/cpp/pdptw.cc index 1e28a98700..2a3a8bb265 100644 --- a/examples/cpp/pdptw.cc +++ b/examples/cpp/pdptw.cc @@ -82,6 +82,7 @@ ABSL_FLAG(std::string, routing_model_parameters, "", "override the DefaultRoutingModelParameters()"); namespace operations_research { +namespace routing { namespace { // Returns the list of variables to use for the Tabu metaheuristic. @@ -91,7 +92,7 @@ namespace { // - Total schedule duration. // TODO(user): add total waiting time. std::vector GetTabuVars(std::vector existing_vars, - operations_research::RoutingModel* routing) { + RoutingModel* routing) { Solver* const solver = routing->solver(); std::vector vars(std::move(existing_vars)); vars.push_back(routing->CostVar()); @@ -128,7 +129,7 @@ double ComputeScalingFactorFromCallback(const C& callback, int size) { void SetupModel(const LiLimParser& parser, const RoutingIndexManager& manager, RoutingModel* model, - RoutingSearchParameters* search_parameters) { + routing::RoutingSearchParameters* search_parameters) { const int64_t kPenalty = 100000000; const int64_t kFixedCost = 100000; const int num_nodes = parser.NumberOfNodes(); @@ -354,23 +355,24 @@ bool LoadAndSolve(absl::string_view pdp_file, return false; } +} // namespace routing } // namespace operations_research int main(int argc, char** argv) { absl::SetFlag(&FLAGS_stderrthreshold, 0); InitGoogle(argv[0], &argc, &argv, true); - operations_research::RoutingModelParameters model_parameters = - operations_research::DefaultRoutingModelParameters(); + operations_research::routing::RoutingModelParameters model_parameters = + operations_research::routing::DefaultRoutingModelParameters(); model_parameters.set_reduce_vehicle_cost_model( absl::GetFlag(FLAGS_reduce_vehicle_cost_model)); CHECK(google::protobuf::TextFormat::MergeFromString( absl::GetFlag(FLAGS_routing_model_parameters), &model_parameters)); - operations_research::RoutingSearchParameters search_parameters = - operations_research::DefaultRoutingSearchParameters(); + operations_research::routing::RoutingSearchParameters search_parameters = + operations_research::routing::DefaultRoutingSearchParameters(); CHECK(google::protobuf::TextFormat::MergeFromString( absl::GetFlag(FLAGS_routing_search_parameters), &search_parameters)); - if (!operations_research::LoadAndSolve(absl::GetFlag(FLAGS_pdp_file), - model_parameters, search_parameters)) { + if (!operations_research::routing::LoadAndSolve( + absl::GetFlag(FLAGS_pdp_file), model_parameters, search_parameters)) { LOG(INFO) << "Error solving " << absl::GetFlag(FLAGS_pdp_file); } return EXIT_SUCCESS; From 12ce2c78207566d643e05c5fe76eb1c5351ccf36 Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Tue, 8 Oct 2024 16:02:57 +0200 Subject: [PATCH 056/105] remove obsolete threadpool ctor; minor reindent --- ortools/bop/bop_ls.cc | 3 ++- ortools/constraint_solver/constraint_solveri.h | 14 +++++++------- ortools/constraint_solver/samples/cp_is_fun_cp.cc | 15 ++++++++++----- ortools/graph/samples/assignment_min_flow.py | 8 ++++---- ortools/graph/shortest_paths.cc | 3 +-- 5 files changed, 24 insertions(+), 19 deletions(-) diff --git a/ortools/bop/bop_ls.cc b/ortools/bop/bop_ls.cc index af4cd53e1d..a1aa677a5b 100644 --- a/ortools/bop/bop_ls.cc +++ b/ortools/bop/bop_ls.cc @@ -829,7 +829,8 @@ bool LocalSearchAssignmentIterator::NextAssignment() { // All nodes have been explored. if (search_nodes_.empty()) { VLOG(1) << std::string(27, ' ') + "LS " << max_num_decisions_ - << " finished." << " #explored:" << num_nodes_ + << " finished." + << " #explored:" << num_nodes_ << " #stored:" << transposition_table_.size() << " #skipped:" << num_skipped_nodes_; return false; diff --git a/ortools/constraint_solver/constraint_solveri.h b/ortools/constraint_solver/constraint_solveri.h index 6243c1793f..23eaa17077 100644 --- a/ortools/constraint_solver/constraint_solveri.h +++ b/ortools/constraint_solver/constraint_solveri.h @@ -517,7 +517,7 @@ class CallMethod0 : public Demon { private: T* const constraint_; - void (T::*const method_)(); + void (T::* const method_)(); const std::string name_; }; @@ -557,7 +557,7 @@ class CallMethod1 : public Demon { private: T* const constraint_; - void (T::*const method_)(P); + void (T::* const method_)(P); const std::string name_; P param1_; }; @@ -594,7 +594,7 @@ class CallMethod2 : public Demon { private: T* const constraint_; - void (T::*const method_)(P, Q); + void (T::* const method_)(P, Q); const std::string name_; P param1_; Q param2_; @@ -636,7 +636,7 @@ class CallMethod3 : public Demon { private: T* const constraint_; - void (T::*const method_)(P, Q, R); + void (T::* const method_)(P, Q, R); const std::string name_; P param1_; Q param2_; @@ -679,7 +679,7 @@ class DelayedCallMethod0 : public Demon { private: T* const constraint_; - void (T::*const method_)(); + void (T::* const method_)(); const std::string name_; }; @@ -714,7 +714,7 @@ class DelayedCallMethod1 : public Demon { private: T* const constraint_; - void (T::*const method_)(P); + void (T::* const method_)(P); const std::string name_; P param1_; }; @@ -757,7 +757,7 @@ class DelayedCallMethod2 : public Demon { private: T* const constraint_; - void (T::*const method_)(P, Q); + void (T::* const method_)(P, Q); const std::string name_; P param1_; Q param2_; diff --git a/ortools/constraint_solver/samples/cp_is_fun_cp.cc b/ortools/constraint_solver/samples/cp_is_fun_cp.cc index ca90e12be8..34fef46e5e 100644 --- a/ortools/constraint_solver/samples/cp_is_fun_cp.cc +++ b/ortools/constraint_solver/samples/cp_is_fun_cp.cc @@ -119,11 +119,16 @@ void CPIsFunCp() { letters, Solver::CHOOSE_FIRST_UNBOUND, Solver::ASSIGN_MIN_VALUE); solver.NewSearch(db); while (solver.NextSolution()) { - LOG(INFO) << "C=" << c->Value() << " " << "P=" << p->Value() << " " - << "I=" << i->Value() << " " << "S=" << s->Value() << " " - << "F=" << f->Value() << " " << "U=" << u->Value() << " " - << "N=" << n->Value() << " " << "T=" << t->Value() << " " - << "R=" << r->Value() << " " << "E=" << e->Value(); + LOG(INFO) << "C=" << c->Value() << " " + << "P=" << p->Value() << " " + << "I=" << i->Value() << " " + << "S=" << s->Value() << " " + << "F=" << f->Value() << " " + << "U=" << u->Value() << " " + << "N=" << n->Value() << " " + << "T=" << t->Value() << " " + << "R=" << r->Value() << " " + << "E=" << e->Value(); // Is CP + IS + FUN = TRUE? CHECK_EQ(p->Value() + s->Value() + n->Value() + diff --git a/ortools/graph/samples/assignment_min_flow.py b/ortools/graph/samples/assignment_min_flow.py index 381a75da5f..e418367997 100755 --- a/ortools/graph/samples/assignment_min_flow.py +++ b/ortools/graph/samples/assignment_min_flow.py @@ -51,10 +51,10 @@ def main(): # [START constraints] # Add each arc. - for idx, _ in enumerate(start_nodes): - smcf.add_arc_with_capacity_and_unit_cost( - start_nodes[idx], end_nodes[idx], capacities[idx], costs[idx] - ) + for start_node, end_node, capacity, cost in zip( + start_nodes, end_nodes, capacities, costs + ): + smcf.add_arc_with_capacity_and_unit_cost(start_node, end_node, capacity, cost) # Add node supplies. for idx, supply in enumerate(supplies): smcf.set_node_supply(idx, supply) diff --git a/ortools/graph/shortest_paths.cc b/ortools/graph/shortest_paths.cc index c5978f06e7..8c8b3d80fc 100644 --- a/ortools/graph/shortest_paths.cc +++ b/ortools/graph/shortest_paths.cc @@ -509,8 +509,7 @@ void ComputeManyToManyShortestPathsWithMultipleThreadsInternal( container->Initialize(unique_sources, unique_destinations, graph.num_nodes()); { - std::unique_ptr pool( - new ThreadPool("OR_Dijkstra", num_threads)); + std::unique_ptr pool(new ThreadPool(num_threads)); pool->StartWorkers(); for (int i = 0; i < unique_sources.size(); ++i) { pool->Schedule(absl::bind_front( From 5384913c34f131fc8f52a3bc22be3da444ca3f8f Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Tue, 8 Oct 2024 16:03:18 +0200 Subject: [PATCH 057/105] revamp non C++ MPSolver export methods --- ortools/java/com/google/ortools/Loader.java | 6 +++--- .../linear_solver/java/LinearSolverTest.java | 16 +++++----------- ortools/linear_solver/java/linear_solver.i | 18 +++++++++--------- ortools/linear_solver/linear_solver.cc | 2 +- ortools/linear_solver/python/linear_solver.i | 19 ++++++++++--------- 5 files changed, 28 insertions(+), 33 deletions(-) diff --git a/ortools/java/com/google/ortools/Loader.java b/ortools/java/com/google/ortools/Loader.java index 4c01a9c4c8..0c2dbadd76 100644 --- a/ortools/java/com/google/ortools/Loader.java +++ b/ortools/java/com/google/ortools/Loader.java @@ -119,9 +119,9 @@ public class Loader { Path tempPath = unpackNativeResources(resourceURI); // Load the native library System.load(tempPath.resolve(RESOURCE_PATH) - .resolve(System.mapLibraryName("jniortools")) - .toAbsolutePath() - .toString()); + .resolve(System.mapLibraryName("jniortools")) + .toAbsolutePath() + .toString()); loaded = true; } catch (IOException e) { throw new RuntimeException(e); diff --git a/ortools/linear_solver/java/LinearSolverTest.java b/ortools/linear_solver/java/LinearSolverTest.java index 83c6f826e0..435fe6b832 100644 --- a/ortools/linear_solver/java/LinearSolverTest.java +++ b/ortools/linear_solver/java/LinearSolverTest.java @@ -520,15 +520,9 @@ public final class LinearSolverTest { final MPConstraint c0 = solver.makeConstraint(-infinity, 100.0); c0.setCoefficient(x1, 5); - final MPModelExportOptions obfuscate = new MPModelExportOptions(); - obfuscate.setObfuscate(true); - String out = solver.exportModelAsLpFormat(); + String out = solver.exportModelAsLpFormat(/* obfuscate= */ true); assertThat(out).isNotEmpty(); - out = solver.exportModelAsLpFormat(obfuscate); - assertThat(out).isNotEmpty(); - out = solver.exportModelAsMpsFormat(); - assertThat(out).isNotEmpty(); - out = solver.exportModelAsMpsFormat(obfuscate); + out = solver.exportModelAsMpsFormat(/* fixed_format= */ true, /* obfuscate= */ true); assertThat(out).isNotEmpty(); } @@ -543,9 +537,9 @@ public final class LinearSolverTest { assertNotNull(solver); // Test that forbidden names are renamed. solver.makeBoolVar("<-%$#!&~-+ ⌂"); // Some illegal name. - String out = solver.exportModelAsLpFormat(); + String out = solver.exportModelAsLpFormat(/* obfuscate= */ false); assertThat(out).isNotEmpty(); - out = solver.exportModelAsMpsFormat(); + out = solver.exportModelAsMpsFormat(/* fixed_format= */ true, /* obfuscate= */ true); assertThat(out).isNotEmpty(); } @@ -611,7 +605,7 @@ public final class LinearSolverTest { System.out.println("Number of constraints = " + solver.numConstraints()); solver.enableOutput(); - System.out.println(solver.exportModelAsLpFormat()); + System.out.println(solver.exportModelAsLpFormat(/* obfuscate= */ false)); System.out.println(solver.solve()); } diff --git a/ortools/linear_solver/java/linear_solver.i b/ortools/linear_solver/java/linear_solver.i index 0192b20188..2df3c50df6 100644 --- a/ortools/linear_solver/java/linear_solver.i +++ b/ortools/linear_solver/java/linear_solver.i @@ -188,9 +188,9 @@ PROTO2_RETURN( /** * Export the loaded model in LP format. */ - std::string exportModelAsLpFormat( - const operations_research::MPModelExportOptions& options = - operations_research::MPModelExportOptions()) { + std::string exportModelAsLpFormat(bool obfuscate) { + operations_research::MPModelExportOptions options; + options.obfuscate = obfuscate; operations_research::MPModelProto model; $self->ExportModelToProto(&model); return ExportModelAsLpFormat(model, options).value_or(""); @@ -199,21 +199,21 @@ PROTO2_RETURN( /** * Export the loaded model in MPS format. */ - std::string exportModelAsMpsFormat( - const operations_research::MPModelExportOptions& options = - operations_research::MPModelExportOptions()) { + std::string exportModelAsMpsFormat(bool fixed_format, bool obfuscate) { + operations_research::MPModelExportOptions options; + options.obfuscate = obfuscate; operations_research::MPModelProto model; $self->ExportModelToProto(&model); return ExportModelAsMpsFormat(model, options).value_or(""); } /** - * Write the model to file in MPS format. + * Write the loaded model to file in MPS format. */ bool writeModelToMpsFile(const std::string& filename, bool fixed_format, - bool obfuscated) { + bool obfuscate) { operations_research::MPModelExportOptions options; - options.obfuscate = obfuscated; + options.obfuscate = obfuscate; operations_research::MPModelProto model; $self->ExportModelToProto(&model); return WriteModelToMpsFile(filename, model, options).ok(); diff --git a/ortools/linear_solver/linear_solver.cc b/ortools/linear_solver/linear_solver.cc index b8dd07a570..77de096ce9 100644 --- a/ortools/linear_solver/linear_solver.cc +++ b/ortools/linear_solver/linear_solver.cc @@ -1154,7 +1154,7 @@ void MPSolver::SolveLazyMutableRequest(LazyMutableCopy request, // not arbitrary, as we want to maintain any custom thread options set by // the user. They shouldn't matter for polling, but for solving we might // e.g. use a larger stack. - ThreadPool thread_pool("SolverThread", /*num_threads=*/1); + ThreadPool thread_pool(/*num_threads=*/1); thread_pool.StartWorkers(); thread_pool.Schedule(polling_func); diff --git a/ortools/linear_solver/python/linear_solver.i b/ortools/linear_solver/python/linear_solver.i index e1d1c38426..7a5bf5ef83 100644 --- a/ortools/linear_solver/python/linear_solver.i +++ b/ortools/linear_solver/python/linear_solver.i @@ -127,26 +127,26 @@ from ortools.linear_solver.python.linear_solver_natural_api import VariableExpr return status.ok(); } - std::string ExportModelAsLpFormat(bool obfuscated) { + std::string ExportModelAsLpFormat(bool obfuscate) { operations_research::MPModelExportOptions options; - options.obfuscate = obfuscated; + options.obfuscate = obfuscate; operations_research::MPModelProto model; $self->ExportModelToProto(&model); return ExportModelAsLpFormat(model, options).value_or(""); } - std::string ExportModelAsMpsFormat(bool fixed_format, bool obfuscated) { + std::string ExportModelAsMpsFormat(bool fixed_format, bool obfuscate) { operations_research::MPModelExportOptions options; - options.obfuscate = obfuscated; + options.obfuscate = obfuscate; operations_research::MPModelProto model; $self->ExportModelToProto(&model); return ExportModelAsMpsFormat(model, options).value_or(""); } - bool WriteModelToMpsFile(const std::string& filename, bool fixed_format, - bool obfuscated) { + bool WriteModelToMpsFile(const std::string& filename, bool fixed_format, + bool obfuscate) { operations_research::MPModelExportOptions options; - options.obfuscate = obfuscated; + options.obfuscate = obfuscate; operations_research::MPModelProto model; $self->ExportModelToProto(&model); return WriteModelToMpsFile(filename, model, options).ok(); @@ -374,8 +374,9 @@ PY_CONVERT(MPVariable); %rename (LookupVariable) operations_research::MPSolver::LookupVariableOrNull; %unignore operations_research::MPSolver::SetSolverSpecificParametersAsString; %unignore operations_research::MPSolver::NextSolution; -// ExportModelAsLpFormat() is also visible: it's overridden by an %extend, above. -// ExportModelAsMpsFormat() is also visible: it's overridden by an %extend, above. +%unignore operations_research::MPSolver::ExportModelAsLpFormat; +%unignore operations_research::MPSolver::ExportModelAsMpsFormat; +%unignore operations_research::MPSolver::WriteModelToMpsFile; %unignore operations_research::MPSolver::Write; // Expose very advanced parts of the MPSolver API. For expert users only. From 0405174e8128bd3bd3ce8a32cdaf87c1f6a59acb Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Tue, 8 Oct 2024 16:03:32 +0200 Subject: [PATCH 058/105] minor reindent --- ortools/glop/preprocessor.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ortools/glop/preprocessor.cc b/ortools/glop/preprocessor.cc index 4f88318c8a..035f8373da 100644 --- a/ortools/glop/preprocessor.cc +++ b/ortools/glop/preprocessor.cc @@ -422,8 +422,8 @@ bool EmptyColumnPreprocessor::Run(LinearProgram* lp) { if (!IsFinite(value)) { VLOG(1) << "Problem INFEASIBLE_OR_UNBOUNDED, empty column " << col << " has a minimization cost of " << objective_coefficient - << " and bounds" << " [" << lower_bound << "," << upper_bound - << "]"; + << " and bounds" + << " [" << lower_bound << "," << upper_bound << "]"; status_ = ProblemStatus::INFEASIBLE_OR_UNBOUNDED; return false; } From a3d4efde854e902721a8e9210633e5f3e0800e49 Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Tue, 8 Oct 2024 16:04:53 +0200 Subject: [PATCH 059/105] [CP-SAT] vivification after import; remove unused affine in presolve; remove obsolete ThreadPool usage --- ortools/sat/clause.cc | 37 +++++++++++++++++++++++ ortools/sat/clause.h | 41 +++++++++++++------------- ortools/sat/cp_model_presolve.cc | 25 +++++++--------- ortools/sat/cp_model_search.cc | 1 - ortools/sat/cp_model_search.h | 2 +- ortools/sat/cp_model_solver_helpers.cc | 19 ++++++++++-- ortools/sat/presolve_context.cc | 35 ++++++++++++++++++++++ ortools/sat/presolve_context.h | 10 +++++++ ortools/sat/sat_parameters.proto | 4 +++ ortools/sat/sat_solver.cc | 19 +++++++----- ortools/sat/sat_solver.h | 7 ++++- ortools/sat/subsolver.cc | 4 +-- ortools/sat/synchronization.h | 2 +- 13 files changed, 157 insertions(+), 49 deletions(-) diff --git a/ortools/sat/clause.cc b/ortools/sat/clause.cc index 8241b15329..f7de79c194 100644 --- a/ortools/sat/clause.cc +++ b/ortools/sat/clause.cc @@ -483,6 +483,7 @@ void ClauseManager::DeleteRemovedClauses() { const int old_size = clauses_.size(); for (int i = 0; i < old_size; ++i) { if (i == to_minimize_index_) to_minimize_index_ = new_size; + if (i == to_first_minimize_index_) to_first_minimize_index_ = new_size; if (i == to_probe_index_) to_probe_index_ = new_size; if (clauses_[i]->IsRemoved()) { delete clauses_[i]; @@ -493,9 +494,45 @@ void ClauseManager::DeleteRemovedClauses() { clauses_.resize(new_size); if (to_minimize_index_ > new_size) to_minimize_index_ = new_size; + if (to_first_minimize_index_ > new_size) to_first_minimize_index_ = new_size; if (to_probe_index_ > new_size) to_probe_index_ = new_size; } +SatClause* ClauseManager::NextNewClauseToMinimize() { + for (; to_first_minimize_index_ < clauses_.size(); + ++to_first_minimize_index_) { + if (clauses_[to_first_minimize_index_]->IsRemoved()) continue; + if (!IsRemovable(clauses_[to_first_minimize_index_])) { + // If the round-robin is in-sync with the new clauses, we may as well + // count this minimization as part of the round-robin and advance both + // indexes. + if (to_minimize_index_ == to_first_minimize_index_) { + ++to_minimize_index_; + } + return clauses_[to_first_minimize_index_++]; + } + } + return nullptr; +} + +SatClause* ClauseManager::NextClauseToMinimize() { + for (; to_minimize_index_ < clauses_.size(); ++to_minimize_index_) { + if (clauses_[to_minimize_index_]->IsRemoved()) continue; + if (!IsRemovable(clauses_[to_minimize_index_])) { + return clauses_[to_minimize_index_++]; + } + } + return nullptr; +} + +SatClause* ClauseManager::NextClauseToProbe() { + for (; to_probe_index_ < clauses_.size(); ++to_probe_index_) { + if (clauses_[to_probe_index_]->IsRemoved()) continue; + return clauses_[to_probe_index_++]; + } + return nullptr; +} + // ----- BinaryImplicationGraph ----- void BinaryImplicationGraph::Resize(int num_variables) { diff --git a/ortools/sat/clause.h b/ortools/sat/clause.h index 3f1c0c8f84..457d62e2c2 100644 --- a/ortools/sat/clause.h +++ b/ortools/sat/clause.h @@ -245,30 +245,29 @@ class ClauseManager : public SatPropagator { drat_proof_handler_ = drat_proof_handler; } - // Round-robbing selection of the next clause to minimize/probe. - // Note that for minimization we only look at clause kept forever. - // - // TODO(user): If more indices are needed, switch to a generic API. - SatClause* NextClauseToMinimize() { - for (; to_minimize_index_ < clauses_.size(); ++to_minimize_index_) { - if (clauses_[to_minimize_index_]->IsRemoved()) continue; - if (!IsRemovable(clauses_[to_minimize_index_])) { - return clauses_[to_minimize_index_++]; - } - } - return nullptr; - } - SatClause* NextClauseToProbe() { - for (; to_probe_index_ < clauses_.size(); ++to_probe_index_) { - if (clauses_[to_probe_index_]->IsRemoved()) continue; - return clauses_[to_probe_index_++]; - } - return nullptr; - } + // Methods implementing pseudo-iterators over the clause database that are + // stable across cleanups. They all return nullptr if there are no more + // clauses. + + // Returns the next clause to minimize that has never been minimized before. + // Note that we only minimize clauses kept forever. + SatClause* NextNewClauseToMinimize(); + // Returns the next clause to minimize, this iterator will be reset to the + // start so the clauses will be returned in round-robin order. + // Note that we only minimize clauses kept forever. + SatClause* NextClauseToMinimize(); + // Returns the next clause to probe in round-robin order. + SatClause* NextClauseToProbe(); // Restart the scans. void ResetToProbeIndex() { to_probe_index_ = 0; } void ResetToMinimizeIndex() { to_minimize_index_ = 0; } + // Ensures that NextNewClauseToMinimize() returns only learned clauses. + // This is a noop after the first call. + void EnsureNewClauseIndexInitialized() { + if (to_first_minimize_index_ > 0) return; + to_first_minimize_index_ = clauses_.size(); + } // During an inprocessing phase, it is easier to detach all clause first, // then simplify and then reattach them. Note however that during these @@ -380,7 +379,9 @@ class ClauseManager : public SatPropagator { // Note that the unit clauses and binary clause are not kept here. std::vector clauses_; + // TODO(user): If more indices are needed, switch to a generic API. int to_minimize_index_ = 0; + int to_first_minimize_index_ = 0; int to_probe_index_ = 0; // Only contains removable clause. diff --git a/ortools/sat/cp_model_presolve.cc b/ortools/sat/cp_model_presolve.cc index 8f1b09515d..5454548ceb 100644 --- a/ortools/sat/cp_model_presolve.cc +++ b/ortools/sat/cp_model_presolve.cc @@ -2806,8 +2806,7 @@ bool CpModelPresolver::PresolveLinearOfSizeTwo(ConstraintProto* ct) { } context_->UpdateNewConstraintsVariableUsage(); - ct->Clear(); - return true; + return RemoveConstraint(ct); } // Code below require equality. @@ -11623,18 +11622,7 @@ bool CpModelPresolver::PresolveAffineRelationIfAny(int var) { // If var is no longer used, remove. Note that we can always do that since we // propagated the domain above and so we can find a feasible value for a for // any value of the representative. - if (context_->VariableIsUnique(var)) { - // Add relation with current representative to the mapping model. - ConstraintProto* ct = context_->NewMappingConstraint(__FILE__, __LINE__); - auto* arg = ct->mutable_linear(); - arg->add_vars(var); - arg->add_coeffs(1); - arg->add_vars(r.representative); - arg->add_coeffs(-r.coeff); - arg->add_domain(r.offset); - arg->add_domain(r.offset); - context_->RemoveVariableFromAffineRelation(var); - } + context_->RemoveNonRepresentativeAffineVariableIfUnused(var); return true; } @@ -11649,6 +11637,7 @@ bool CpModelPresolver::ProcessChangedVariables(std::vector* in_queue, context_->modified_domains.PositionsSetAtLeastOnce(); for (int i = 0; i < vector_that_can_grow_during_iter.size(); ++i) { const int v = vector_that_can_grow_during_iter[i]; + context_->modified_domains.Clear(v); if (context_->VariableIsNotUsedAnymore(v)) continue; if (context_->ModelIsUnsat()) return false; if (!PresolveAffineRelationIfAny(v)) return false; @@ -11779,6 +11768,11 @@ void CpModelPresolver::PresolveToFixPoint() { const int v = vector_that_can_grow_during_iter[i]; if (context_->VariableIsNotUsedAnymore(v)) continue; + // Remove the variable from the set to allow it to be pushed again. + // This is necessary since a few affine logic needs to add the same + // variable back to a second pass of processing. + context_->var_with_reduced_small_degree.Clear(v); + // Make sure all affine relations are propagated. // This also remove the relation if the degree is now one. if (context_->ModelIsUnsat()) return; @@ -11819,6 +11813,9 @@ void CpModelPresolver::PresolveToFixPoint() { if (ProcessChangedVariables(&in_queue, &queue)) continue; + // TODO(user): Uncomment this line once the tests pass. + // DCHECK(!context_->HasUnusedAffineVariable()); + // Deal with integer variable only appearing in an encoding. for (int v = 0; v < context_->working_model->variables().size(); ++v) { ProcessVariableOnlyUsedInEncoding(v); diff --git a/ortools/sat/cp_model_search.cc b/ortools/sat/cp_model_search.cc index 993acaf519..d6ffc9f60a 100644 --- a/ortools/sat/cp_model_search.cc +++ b/ortools/sat/cp_model_search.cc @@ -24,7 +24,6 @@ #include "absl/container/flat_hash_map.h" #include "absl/container/flat_hash_set.h" -#include "absl/flags/flag.h" #include "absl/log/check.h" #include "absl/random/distributions.h" #include "absl/strings/str_cat.h" diff --git a/ortools/sat/cp_model_search.h b/ortools/sat/cp_model_search.h index 2e005b818c..4f57c3f741 100644 --- a/ortools/sat/cp_model_search.h +++ b/ortools/sat/cp_model_search.h @@ -20,7 +20,7 @@ #include #include "absl/container/flat_hash_map.h" -#include "ortools/base/types.h" +#include "absl/strings/string_view.h" #include "ortools/sat/cp_model.pb.h" #include "ortools/sat/cp_model_mapping.h" #include "ortools/sat/integer.h" diff --git a/ortools/sat/cp_model_solver_helpers.cc b/ortools/sat/cp_model_solver_helpers.cc index 3e51e066ec..405d4b58b5 100644 --- a/ortools/sat/cp_model_solver_helpers.cc +++ b/ortools/sat/cp_model_solver_helpers.cc @@ -856,14 +856,17 @@ int RegisterClausesLevelZeroImport(int id, CpModelMapping* const mapping = model->GetOrCreate(); auto* sat_solver = model->GetOrCreate(); auto* implications = model->GetOrCreate(); - bool share_glue_clauses = + const bool share_glue_clauses = model->GetOrCreate()->share_glue_clauses(); + const bool minimize_shared_clauses = + model->GetOrCreate()->minimize_shared_clauses(); auto* clause_stream = share_glue_clauses ? shared_clauses_manager->GetClauseStream(id) : nullptr; const auto& import_level_zero_clauses = [shared_clauses_manager, id, mapping, sat_solver, implications, - clause_stream]() { + clause_stream, + minimize_shared_clauses]() { std::vector> new_binary_clauses; shared_clauses_manager->GetUnseenBinaryClauses(id, &new_binary_clauses); implications->EnableSharing(false); @@ -877,7 +880,9 @@ int RegisterClausesLevelZeroImport(int id, implications->EnableSharing(true); if (clause_stream == nullptr) return true; + int new_clauses = 0; std::array local_clause; + sat_solver->EnsureNewClauseIndexInitialized(); for (const absl::Span shared_clause : shared_clauses_manager->GetUnseenClauses(id)) { // Check this clause was not already learned by this worker. @@ -893,8 +898,18 @@ int RegisterClausesLevelZeroImport(int id, absl::MakeSpan(local_clause).subspan(0, shared_clause.size()))) { return false; } + ++new_clauses; } clause_stream->RemoveWorstClauses(); + if (minimize_shared_clauses && new_clauses > 0) { + // The new clauses may be subsumed, so try to minimize them to reduce + // overhead of sharing. + // We only share up to 1024 literals worth of new clauses per second, so + // at most 1024 decisions to vivify all new clauses, so this should be + // relatively cheap. + return sat_solver->MinimizeByPropagation( + /*dtime=*/0.5, /*minimize_new_clauses_only=*/true); + } return true; }; model->GetOrCreate()->callbacks.push_back( diff --git a/ortools/sat/presolve_context.cc b/ortools/sat/presolve_context.cc index c4789f9bff..870ba4004c 100644 --- a/ortools/sat/presolve_context.cc +++ b/ortools/sat/presolve_context.cc @@ -742,6 +742,19 @@ void PresolveContext::UpdateNewConstraintsVariableUsage() { } } +bool PresolveContext::HasUnusedAffineVariable() const { + for (int var = 0; var < working_model->variables_size(); ++var) { + if (VariableIsNotUsedAnymore(var)) continue; + const auto& constraints = VarToConstraints(var); + if (constraints.size() == 1 && + constraints.contains(kAffineRelationConstraint) && + GetAffineRelation(var).representative != var) { + return true; + } + } + return false; +} + // TODO(user): Also test var_to_constraints_ !! bool PresolveContext::ConstraintVariableUsageIsConsistent() { if (is_unsat_) return true; // We do not care in this case. @@ -893,6 +906,28 @@ void PresolveContext::RemoveAllVariablesFromAffineRelationConstraint() { } } +void PresolveContext::RemoveNonRepresentativeAffineVariableIfUnused(int var) { + if (!VariableIsUnique(var)) { + return; + } + const AffineRelation::Relation r = GetAffineRelation(var); + if (var == r.representative) { + return; + } + DCHECK(VarToConstraints(var).contains(kAffineRelationConstraint)); + DCHECK(!VariableIsNotUsedAnymore(r.representative)); + // Add relation with current representative to the mapping model. + ConstraintProto* ct = NewMappingConstraint(__FILE__, __LINE__); + auto* arg = ct->mutable_linear(); + arg->add_vars(var); + arg->add_coeffs(1); + arg->add_vars(r.representative); + arg->add_coeffs(-r.coeff); + arg->add_domain(r.offset); + arg->add_domain(r.offset); + RemoveVariableFromAffineRelation(var); +} + // We only call that for a non representative variable that is only used in // the kAffineRelationConstraint. Such variable can be ignored and should never // be seen again in the presolve. diff --git a/ortools/sat/presolve_context.h b/ortools/sat/presolve_context.h index 1dfac184ef..fc05904c09 100644 --- a/ortools/sat/presolve_context.h +++ b/ortools/sat/presolve_context.h @@ -298,6 +298,11 @@ class PresolveContext { // This is meant to be used in DEBUG mode only. bool ConstraintVariableUsageIsConsistent(); + // Loop over all variable and return true if one of them is only used in + // affine relation and is not a representative. This is in O(num_vars) and + // only meant to be used in DCHECKs. + bool HasUnusedAffineVariable() const; + // A "canonical domain" always have a MinOf() equal to zero. // If needed we introduce a new variable with such canonical domain and // add the relation X = Y + offset. @@ -502,6 +507,11 @@ class PresolveContext { return objective_domain_is_constraining_; } + // If var is an unused variable in an affine relation and is not a + // representative, we can remove it from the model. Note that this requires + // the variable usage graph to be up to date. + void RemoveNonRepresentativeAffineVariableIfUnused(int var); + // Advanced usage. This should be called when a variable can be removed from // the problem, so we don't count it as part of an affine relation anymore. void RemoveVariableFromAffineRelation(int var); diff --git a/ortools/sat/sat_parameters.proto b/ortools/sat/sat_parameters.proto index 7527fa6481..b7e73d92ac 100644 --- a/ortools/sat/sat_parameters.proto +++ b/ortools/sat/sat_parameters.proto @@ -685,6 +685,10 @@ message SatParameters { // Implicitly disabled if share_binary_clauses is false. optional bool share_glue_clauses = 285 [default = false]; + // Minimize and detect subsumption of shared clauses immediately after they + // are imported. + optional bool minimize_shared_clauses = 300 [default = true]; + // ========================================================================== // Debugging parameters // ========================================================================== diff --git a/ortools/sat/sat_solver.cc b/ortools/sat/sat_solver.cc index c29bdd25e0..f5fab65702 100644 --- a/ortools/sat/sat_solver.cc +++ b/ortools/sat/sat_solver.cc @@ -1501,7 +1501,8 @@ SatSolver::Status SatSolver::SolveInternal(TimeLimit* time_limit, } } -bool SatSolver::MinimizeByPropagation(double dtime) { +bool SatSolver::MinimizeByPropagation(double dtime, + bool minimize_new_clauses_only) { CHECK(time_limit_ != nullptr); AdvanceDeterministicTime(time_limit_); const double threshold = time_limit_->GetElapsedDeterministicTime() + dtime; @@ -1513,16 +1514,20 @@ bool SatSolver::MinimizeByPropagation(double dtime) { int num_resets = 0; while (!time_limit_->LimitReached() && time_limit_->GetElapsedDeterministicTime() < threshold) { - SatClause* to_minimize = clauses_propagator_->NextClauseToMinimize(); + SatClause* to_minimize = clauses_propagator_->NextNewClauseToMinimize(); + if (!minimize_new_clauses_only && to_minimize == nullptr) { + to_minimize = clauses_propagator_->NextClauseToMinimize(); + } + if (to_minimize != nullptr) { TryToMinimizeClause(to_minimize); if (model_is_unsat_) return false; + } else if (minimize_new_clauses_only) { + break; } else { - if (to_minimize == nullptr) { - ++num_resets; - VLOG(1) << "Minimized all clauses, restarting from first one."; - clauses_propagator_->ResetToMinimizeIndex(); - } + ++num_resets; + VLOG(1) << "Minimized all clauses, restarting from first one."; + clauses_propagator_->ResetToMinimizeIndex(); if (num_resets > 1) break; } diff --git a/ortools/sat/sat_solver.h b/ortools/sat/sat_solver.h index d1bc181aa5..84bbf0483f 100644 --- a/ortools/sat/sat_solver.h +++ b/ortools/sat/sat_solver.h @@ -472,7 +472,8 @@ class SatSolver { // Mainly visible for testing. ABSL_MUST_USE_RESULT bool Propagate(); - bool MinimizeByPropagation(double dtime); + bool MinimizeByPropagation(double dtime, + bool minimize_new_clauses_only = false); // Advance the given time limit with all the deterministic time that was // elapsed since last call. @@ -503,6 +504,10 @@ class SatSolver { // exposed to allow processing a conflict detected outside normal propagation. void ProcessCurrentConflict(); + void EnsureNewClauseIndexInitialized() { + clauses_propagator_->EnsureNewClauseIndexInitialized(); + } + private: // All Solve() functions end up calling this one. Status SolveInternal(TimeLimit* time_limit, int64_t max_number_of_conflicts); diff --git a/ortools/sat/subsolver.cc b/ortools/sat/subsolver.cc index 6641c51add..3884ed47d6 100644 --- a/ortools/sat/subsolver.cc +++ b/ortools/sat/subsolver.cc @@ -136,7 +136,7 @@ void DeterministicLoop(std::vector>& subsolvers, std::vector indices; std::vector timing; to_run.reserve(batch_size); - ThreadPool pool("DeterministicLoop", num_threads); + ThreadPool pool(num_threads); pool.StartWorkers(); for (int batch_index = 0;; ++batch_index) { VLOG(2) << "Starting deterministic batch of size " << batch_size; @@ -207,7 +207,7 @@ void NonDeterministicLoop(std::vector>& subsolvers, return num_in_flight < num_threads; }; - ThreadPool pool("NonDeterministicLoop", num_threads); + ThreadPool pool(num_threads); pool.StartWorkers(); // The lambda below are using little space, but there is no reason diff --git a/ortools/sat/synchronization.h b/ortools/sat/synchronization.h index 8e462399c1..c5d5acc116 100644 --- a/ortools/sat/synchronization.h +++ b/ortools/sat/synchronization.h @@ -589,7 +589,7 @@ class SharedBoundsManager { class UniqueClauseStream { public: static constexpr int kMinClauseSize = 3; - static constexpr int kMaxClauseSize = 8; + static constexpr int kMaxClauseSize = 32; // Export 4KiB of clauses per batch. static constexpr int kMaxLiteralsPerBatch = 4096 / sizeof(int); // Bound the total literals we buffer, approximately enforced so shorter From 3b5cacc130134c2b7459243f4f8c19cd23254a56 Mon Sep 17 00:00:00 2001 From: Mizux Seiha Date: Tue, 8 Oct 2024 16:47:26 +0200 Subject: [PATCH 060/105] sat: Fix build --- ortools/sat/work_assignment_test.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ortools/sat/work_assignment_test.cc b/ortools/sat/work_assignment_test.cc index ba4a6d9533..83de91dc08 100644 --- a/ortools/sat/work_assignment_test.cc +++ b/ortools/sat/work_assignment_test.cc @@ -146,9 +146,9 @@ class SharedTreeSolveTest : public testing::TestWithParam { params.set_num_workers(4); params.set_shared_tree_num_workers(4); params.set_cp_model_presolve(false); - params.MergeFrom( + params.MergeFrom(SatParameters{ google::protobuf::contrib::parse_proto::ParseTextProtoOrDie( - GetParam())); + GetParam())}); return params; } }; From ac7622df2a22399f334adc4945bcb7b556ba2ea5 Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Tue, 8 Oct 2024 19:31:32 +0200 Subject: [PATCH 061/105] fix --- ortools/sat/cp_model_search_test.cc | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/ortools/sat/cp_model_search_test.cc b/ortools/sat/cp_model_search_test.cc index c2961a523d..4207708282 100644 --- a/ortools/sat/cp_model_search_test.cc +++ b/ortools/sat/cp_model_search_test.cc @@ -89,6 +89,7 @@ TEST(RandomSearchTest, CheckSeed) { params.set_search_branching(SatParameters::FIXED_SEARCH); params.set_use_absl_random(false); // Otherwise, each solve changes. params.set_random_seed(0); + params.set_num_workers(1); const CpSolverResponse response = SolveWithParameters(model_proto, params); for (int i = 0; i < kSize; ++i) { @@ -122,7 +123,7 @@ TEST(BasicFixedSearchBehaviorTest, Default) { )pb"); Model model; model.Add(NewSatParameters( - "cp_model_presolve:false,search_branching:FIXED_SEARCH")); + "cp_model_presolve:false,search_branching:FIXED_SEARCH,num_workers:1")); const CpSolverResponse response = SolveCpModel(model_proto, &model); EXPECT_EQ(response.status(), CpSolverStatus::OPTIMAL); EXPECT_THAT(response.solution(), testing::ElementsAre(4, 3, 0, 5, 6)); @@ -154,7 +155,7 @@ TEST(BasicFixedSearchBehaviorTest, ReverseOrder) { )pb"); Model model; model.Add(NewSatParameters( - "cp_model_presolve:false,search_branching:FIXED_SEARCH")); + "cp_model_presolve:false,search_branching:FIXED_SEARCH,num_workers:1")); const CpSolverResponse response = SolveCpModel(model_proto, &model); EXPECT_EQ(response.status(), CpSolverStatus::OPTIMAL); EXPECT_THAT(response.solution(), testing::ElementsAre(6, 5, 0, 4, 3)); @@ -186,7 +187,7 @@ TEST(BasicFixedSearchBehaviorTest, MinDomainSize) { )pb"); Model model; model.Add(NewSatParameters( - "cp_model_presolve:false,search_branching:FIXED_SEARCH")); + "cp_model_presolve:false,search_branching:FIXED_SEARCH,num_workers:1")); const CpSolverResponse response = SolveCpModel(model_proto, &model); EXPECT_EQ(response.status(), CpSolverStatus::OPTIMAL); EXPECT_THAT(response.solution(), testing::ElementsAre(10, 7, 6, 5, 9)); @@ -211,7 +212,7 @@ TEST(BasicFixedSearchBehaviorTest, WithTransformation1) { )pb"); Model model; model.Add(NewSatParameters( - "cp_model_presolve:false,search_branching:FIXED_SEARCH")); + "cp_model_presolve:false,search_branching:FIXED_SEARCH,num_workers:1")); const CpSolverResponse response = SolveCpModel(model_proto, &model); EXPECT_EQ(response.status(), CpSolverStatus::OPTIMAL); EXPECT_THAT(response.solution(), testing::ElementsAre(3, 4)); @@ -236,7 +237,7 @@ TEST(BasicFixedSearchBehaviorTest, WithTransformation2) { )pb"); Model model; model.Add(NewSatParameters( - "cp_model_presolve:false,search_branching:FIXED_SEARCH")); + "cp_model_presolve:false,search_branching:FIXED_SEARCH,num_workers:1")); const CpSolverResponse response = SolveCpModel(model_proto, &model); EXPECT_EQ(response.status(), CpSolverStatus::OPTIMAL); EXPECT_THAT(response.solution(), testing::ElementsAre(6, 7)); @@ -262,6 +263,7 @@ TEST(BasicFixedSearchBehaviorTest, MedianTest) { SatParameters params; params.set_keep_all_feasible_solutions_in_presolve(true); params.set_search_branching(SatParameters::FIXED_SEARCH); + params.set_num_workers(1); const CpSolverResponse response = SolveWithParameters(model_proto, params); EXPECT_EQ(response.status(), CpSolverStatus::OPTIMAL); EXPECT_THAT(response.solution(), testing::ElementsAre(4, 6)); @@ -286,6 +288,7 @@ TEST(BasicFixedSearchBehaviorTest, MedianTest2) { SatParameters params; params.set_keep_all_feasible_solutions_in_presolve(true); params.set_search_branching(SatParameters::FIXED_SEARCH); + params.set_num_workers(1); const CpSolverResponse response = SolveWithParameters(model_proto, params); EXPECT_EQ(response.status(), CpSolverStatus::OPTIMAL); From 792e3362b7e29c943fda9b826959edf6e5a818c3 Mon Sep 17 00:00:00 2001 From: Mizux Seiha Date: Tue, 8 Oct 2024 22:18:28 +0200 Subject: [PATCH 062/105] graph: Fix max_flow_test --- cmake/cpp.cmake | 4 +++- ortools/graph/CMakeLists.txt | 13 +++++++++++++ ortools/graph/max_flow_test.cc | 2 ++ 3 files changed, 18 insertions(+), 1 deletion(-) diff --git a/cmake/cpp.cmake b/cmake/cpp.cmake index d586dbcdea..aee9c23808 100644 --- a/cmake/cpp.cmake +++ b/cmake/cpp.cmake @@ -211,7 +211,9 @@ function(ortools_cxx_test) if(BUILD_TESTING) add_test( NAME cxx_${TEST_NAME} - COMMAND ${TEST_NAME}) + COMMAND ${TEST_NAME} + WORKING_DIRECTORY ${PROJECT_SOURCE_DIR} + ) endif() message(STATUS "Configuring test ${TEST_NAME} ...DONE") endfunction() diff --git a/ortools/graph/CMakeLists.txt b/ortools/graph/CMakeLists.txt index fd86eacac3..75bd09582d 100644 --- a/ortools/graph/CMakeLists.txt +++ b/ortools/graph/CMakeLists.txt @@ -42,6 +42,7 @@ target_link_libraries(${NAME} PRIVATE if(BUILD_TESTING) file(GLOB _TEST_SRCS "*_test.cc") + list(FILTER _TEST_SRCS EXCLUDE REGEX "max_flow_test.cc") foreach(_FULL_FILE_NAME IN LISTS _TEST_SRCS) get_filename_component(_NAME ${_FULL_FILE_NAME} NAME_WE) get_filename_component(_FILE_NAME ${_FULL_FILE_NAME} NAME) @@ -56,4 +57,16 @@ if(BUILD_TESTING) GTest::gtest_main ) endforeach() + ortools_cxx_test( + NAME + graph_max_flow_test + SOURCES + "max_flow_test.cc" + LINK_LIBRARIES + benchmark::benchmark + GTest::gmock + GTest::gtest_main + COMPILE_DEFINITIONS + -DROOT_DIR="../../" + ) endif() diff --git a/ortools/graph/max_flow_test.cc b/ortools/graph/max_flow_test.cc index 5b9e0fbcce..d7af59311a 100644 --- a/ortools/graph/max_flow_test.cc +++ b/ortools/graph/max_flow_test.cc @@ -36,7 +36,9 @@ #include "ortools/linear_solver/linear_solver.h" #include "ortools/util/file_util.h" +#if not defined(ROOT_DIR) #define ROOT_DIR "com_google_ortools/" +#endif namespace operations_research { namespace { From 30bbae08bb19b218bc0c694ec1e71a61d7168de0 Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Mon, 7 Oct 2024 09:53:14 +0200 Subject: [PATCH 063/105] cmake: Add MathOpt tests to build (#4402) --- cmake/cpp.cmake | 78 ++++ ortools/math_opt/CMakeLists.txt | 1 + .../constraints/indicator/CMakeLists.txt | 18 + .../constraints/quadratic/CMakeLists.txt | 18 + .../second_order_cone/CMakeLists.txt | 18 + .../math_opt/constraints/sos/CMakeLists.txt | 18 + .../math_opt/constraints/util/CMakeLists.txt | 18 + ortools/math_opt/cpp/CMakeLists.txt | 16 + ortools/math_opt/solver_tests/CMakeLists.txt | 369 ++++++++++++++++++ ortools/math_opt/solvers/CMakeLists.txt | 180 +++++++++ 10 files changed, 734 insertions(+) create mode 100644 ortools/math_opt/solver_tests/CMakeLists.txt diff --git a/cmake/cpp.cmake b/cmake/cpp.cmake index aee9c23808..e384f314ba 100644 --- a/cmake/cpp.cmake +++ b/cmake/cpp.cmake @@ -218,6 +218,84 @@ function(ortools_cxx_test) message(STATUS "Configuring test ${TEST_NAME} ...DONE") endfunction() +################### +## C++ Library ## +################### +# ortools_cxx_library() +# CMake function to generate and build C++ library. +# Parameters: +# NAME: CMake target name +# SOURCES: List of source files +# [TYPE]: SHARED or STATIC +# [COMPILE_DEFINITIONS]: List of private compile definitions +# [COMPILE_OPTIONS]: List of private compile options +# [LINK_LIBRARIES]: List of **public** libraries to use when linking +# note: ortools::ortools is always linked to the target +# [LINK_OPTIONS]: List of private link options +# e.g.: +# ortools_cxx_library( +# NAME +# foo_bar_library +# SOURCES +# bar_library.cc +# ${PROJECT_SOURCE_DIR}/ortools/foo/bar_library.cc +# TYPE +# SHARED +# LINK_LIBRARIES +# GTest::gmock +# GTest::gtest_main +# TESTING +# ) +function(ortools_cxx_library) + set(options "TESTING") + set(oneValueArgs "NAME;TYPE") + set(multiValueArgs + "SOURCES;COMPILE_DEFINITIONS;COMPILE_OPTIONS;LINK_LIBRARIES;LINK_OPTIONS") + cmake_parse_arguments(LIBRARY + "${options}" + "${oneValueArgs}" + "${multiValueArgs}" + ${ARGN} + ) + if(LIBRARY_TESTING AND NOT BUILD_TESTING) + return() + endif() + + if(NOT LIBRARY_NAME) + message(FATAL_ERROR "no NAME provided") + endif() + if(NOT LIBRARY_SOURCES) + message(FATAL_ERROR "no SOURCES provided") + endif() + message(STATUS "Configuring library ${LIBRARY_NAME} ...") + + add_library(${LIBRARY_NAME} ${LIBRARY_TYPE} "") + target_sources(${LIBRARY_NAME} PRIVATE ${LIBRARY_SOURCES}) + target_include_directories(${LIBRARY_NAME} PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}) + target_compile_definitions(${LIBRARY_NAME} PRIVATE ${LIBRARY_COMPILE_DEFINITIONS}) + target_compile_features(${LIBRARY_NAME} PRIVATE cxx_std_17) + target_compile_options(${LIBRARY_NAME} PRIVATE ${LIBRARY_COMPILE_OPTIONS}) + target_link_libraries(${LIBRARY_NAME} PUBLIC + ${PROJECT_NAMESPACE}::ortools + ${LIBRARY_LINK_LIBRARIES} + ) + target_link_options(${LIBRARY_NAME} PRIVATE ${LIBRARY_LINK_OPTIONS}) + + include(GNUInstallDirs) + if(APPLE) + set_target_properties(${LIBRARY_NAME} PROPERTIES + INSTALL_RPATH "@loader_path/../${CMAKE_INSTALL_LIBDIR};@loader_path") + elseif(UNIX) + cmake_path(RELATIVE_PATH CMAKE_INSTALL_FULL_LIBDIR + BASE_DIRECTORY ${CMAKE_INSTALL_FULL_BINDIR} + OUTPUT_VARIABLE libdir_relative_path) + set_target_properties(${LIBRARY_NAME} PROPERTIES + INSTALL_RPATH "$ORIGIN/${libdir_relative_path}:$ORIGIN") + endif() + add_library(${PROJECT_NAMESPACE}::${LIBRARY_NAME} ALIAS ${LIBRARY_NAME}) + message(STATUS "Configuring library ${LIBRARY_NAME} ...DONE") +endfunction() + ################## ## PROTO FILE ## ################## diff --git a/ortools/math_opt/CMakeLists.txt b/ortools/math_opt/CMakeLists.txt index 13615e7acc..8339888525 100644 --- a/ortools/math_opt/CMakeLists.txt +++ b/ortools/math_opt/CMakeLists.txt @@ -20,6 +20,7 @@ add_subdirectory(constraints) add_subdirectory(cpp) add_subdirectory(io) add_subdirectory(labs) +add_subdirectory(solver_tests) add_subdirectory(solvers) add_subdirectory(storage) add_subdirectory(validators) diff --git a/ortools/math_opt/constraints/indicator/CMakeLists.txt b/ortools/math_opt/constraints/indicator/CMakeLists.txt index af47b4cb6a..5e286eee83 100644 --- a/ortools/math_opt/constraints/indicator/CMakeLists.txt +++ b/ortools/math_opt/constraints/indicator/CMakeLists.txt @@ -26,3 +26,21 @@ target_link_libraries(${NAME} PRIVATE absl::strings ${PROJECT_NAMESPACE}::math_opt_proto) #install(TARGETS ${NAME} EXPORT ${PROJECT_NAME}Targets) + +if(BUILD_TESTING) + file(GLOB _TEST_SRCS "*_test.cc") + foreach(_FULL_FILE_NAME IN LISTS _TEST_SRCS) + get_filename_component(_NAME ${_FULL_FILE_NAME} NAME_WE) + get_filename_component(_FILE_NAME ${_FULL_FILE_NAME} NAME) + ortools_cxx_test( + NAME + math_opt_constraints_indicator_${_NAME} + SOURCES + ${_FILE_NAME} + LINK_LIBRARIES + #benchmark::benchmark + GTest::gmock + GTest::gtest_main + ) + endforeach() +endif() diff --git a/ortools/math_opt/constraints/quadratic/CMakeLists.txt b/ortools/math_opt/constraints/quadratic/CMakeLists.txt index 0240bc5296..a58437c85c 100644 --- a/ortools/math_opt/constraints/quadratic/CMakeLists.txt +++ b/ortools/math_opt/constraints/quadratic/CMakeLists.txt @@ -25,3 +25,21 @@ target_include_directories(${NAME} PUBLIC target_link_libraries(${NAME} PRIVATE absl::strings ${PROJECT_NAMESPACE}::math_opt_proto) + +if(BUILD_TESTING) + file(GLOB _TEST_SRCS "*_test.cc") + foreach(_FULL_FILE_NAME IN LISTS _TEST_SRCS) + get_filename_component(_NAME ${_FULL_FILE_NAME} NAME_WE) + get_filename_component(_FILE_NAME ${_FULL_FILE_NAME} NAME) + ortools_cxx_test( + NAME + math_opt_constraints_quadratic_${_NAME} + SOURCES + ${_FILE_NAME} + LINK_LIBRARIES + #benchmark::benchmark + GTest::gmock + GTest::gtest_main + ) + endforeach() +endif() diff --git a/ortools/math_opt/constraints/second_order_cone/CMakeLists.txt b/ortools/math_opt/constraints/second_order_cone/CMakeLists.txt index c282fe1a7e..53c8cd9ede 100644 --- a/ortools/math_opt/constraints/second_order_cone/CMakeLists.txt +++ b/ortools/math_opt/constraints/second_order_cone/CMakeLists.txt @@ -25,3 +25,21 @@ target_include_directories(${NAME} PUBLIC target_link_libraries(${NAME} PRIVATE absl::strings ${PROJECT_NAMESPACE}::math_opt_proto) + +if(BUILD_TESTING) + file(GLOB _TEST_SRCS "*_test.cc") + foreach(_FULL_FILE_NAME IN LISTS _TEST_SRCS) + get_filename_component(_NAME ${_FULL_FILE_NAME} NAME_WE) + get_filename_component(_FILE_NAME ${_FULL_FILE_NAME} NAME) + ortools_cxx_test( + NAME + math_opt_constraints_second_order_cone_${_NAME} + SOURCES + ${_FILE_NAME} + LINK_LIBRARIES + #benchmark::benchmark + GTest::gmock + GTest::gtest_main + ) + endforeach() +endif() diff --git a/ortools/math_opt/constraints/sos/CMakeLists.txt b/ortools/math_opt/constraints/sos/CMakeLists.txt index c0b0ff207a..8d54916edf 100644 --- a/ortools/math_opt/constraints/sos/CMakeLists.txt +++ b/ortools/math_opt/constraints/sos/CMakeLists.txt @@ -25,3 +25,21 @@ target_include_directories(${NAME} PUBLIC target_link_libraries(${NAME} PRIVATE absl::strings ${PROJECT_NAMESPACE}::math_opt_proto) + +if(BUILD_TESTING) + file(GLOB _TEST_SRCS "*_test.cc") + foreach(_FULL_FILE_NAME IN LISTS _TEST_SRCS) + get_filename_component(_NAME ${_FULL_FILE_NAME} NAME_WE) + get_filename_component(_FILE_NAME ${_FULL_FILE_NAME} NAME) + ortools_cxx_test( + NAME + math_opt_constraints_sos_${_NAME} + SOURCES + ${_FILE_NAME} + LINK_LIBRARIES + #benchmark::benchmark + GTest::gmock + GTest::gtest_main + ) + endforeach() +endif() diff --git a/ortools/math_opt/constraints/util/CMakeLists.txt b/ortools/math_opt/constraints/util/CMakeLists.txt index 151755f284..1c7aa149dc 100644 --- a/ortools/math_opt/constraints/util/CMakeLists.txt +++ b/ortools/math_opt/constraints/util/CMakeLists.txt @@ -25,3 +25,21 @@ target_include_directories(${NAME} PUBLIC target_link_libraries(${NAME} PRIVATE absl::strings ${PROJECT_NAMESPACE}::math_opt_proto) + +if(BUILD_TESTING) + file(GLOB _TEST_SRCS "*_test.cc") + foreach(_FULL_FILE_NAME IN LISTS _TEST_SRCS) + get_filename_component(_NAME ${_FULL_FILE_NAME} NAME_WE) + get_filename_component(_FILE_NAME ${_FULL_FILE_NAME} NAME) + ortools_cxx_test( + NAME + math_opt_constraints_util_${_NAME} + SOURCES + ${_FILE_NAME} + LINK_LIBRARIES + #benchmark::benchmark + GTest::gmock + GTest::gtest_main + ) + endforeach() +endif() diff --git a/ortools/math_opt/cpp/CMakeLists.txt b/ortools/math_opt/cpp/CMakeLists.txt index 9b38d22c55..a4e3b0e0dd 100644 --- a/ortools/math_opt/cpp/CMakeLists.txt +++ b/ortools/math_opt/cpp/CMakeLists.txt @@ -25,3 +25,19 @@ target_include_directories(${NAME} PUBLIC target_link_libraries(${NAME} PRIVATE absl::strings ${PROJECT_NAMESPACE}::math_opt_proto) + +ortools_cxx_library( + NAME + math_opt_matchers + SOURCES + "matchers.cc" + "matchers.h" + TYPE + SHARED + LINK_LIBRARIES + absl::log + absl::status + absl::strings + GTest::gmock + TESTING +) diff --git a/ortools/math_opt/solver_tests/CMakeLists.txt b/ortools/math_opt/solver_tests/CMakeLists.txt new file mode 100644 index 0000000000..f4a92b5147 --- /dev/null +++ b/ortools/math_opt/solver_tests/CMakeLists.txt @@ -0,0 +1,369 @@ +# Copyright 2010-2024 Google LLC +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set(_PREFIX math_opt) + +ortools_cxx_library( + NAME + ${_PREFIX}_base_solver_test + SOURCES + "base_solver_test.cc" + "base_solver_test.h" + TYPE + SHARED + LINK_LIBRARIES + GTest::gmock + absl::log + TESTING +) + +ortools_cxx_library( + NAME + ${_PREFIX}_callback_tests + SOURCES + "callback_tests.cc" + "callback_tests.h" + TYPE + SHARED + LINK_LIBRARIES + GTest::gmock + absl::log + absl::status + absl::strings + ortools::math_opt_matchers + ortools::math_opt_base_solver_test + ortools::math_opt_test_models + TESTING +) + + +ortools_cxx_library( + NAME + ${_PREFIX}_status_tests + SOURCES + "status_tests.cc" + "status_tests.h" + TYPE + SHARED + LINK_LIBRARIES + GTest::gmock + absl::log + ortools::math_opt_matchers + ortools::math_opt_test_models + TESTING +) + +ortools_cxx_library( + NAME + ${_PREFIX}_lp_tests + SOURCES + "lp_tests.cc" + "lp_tests.h" + TYPE + SHARED + LINK_LIBRARIES + GTest::gmock + absl::log + ortools::math_opt_matchers + ortools::math_opt_base_solver_test + TESTING +) + +ortools_cxx_library( + NAME + ${_PREFIX}_lp_incomplete_solve_tests + SOURCES + "lp_incomplete_solve_tests.cc" + "lp_incomplete_solve_tests.h" + TYPE + SHARED + LINK_LIBRARIES + GTest::gmock + absl::log + ortools::math_opt_matchers + ortools::math_opt_test_models + TESTING +) + +ortools_cxx_library( + NAME + ${_PREFIX}_invalid_input_tests + SOURCES + "invalid_input_tests.cc" + "invalid_input_tests.h" + TYPE + SHARED + LINK_LIBRARIES + GTest::gmock + absl::log + ortools::math_opt_base_solver_test + TESTING +) + +ortools_cxx_test( + NAME + ${_PREFIX}_unregistered_solver_test + SOURCES + "unregistered_solver_test.cc" + LINK_LIBRARIES + GTest::gmock + GTest::gmock_main +) + +ortools_cxx_library( + NAME + ${_PREFIX}_mip_tests + SOURCES + "mip_tests.cc" + "mip_tests.h" + TYPE + SHARED + LINK_LIBRARIES + GTest::gmock + absl::log + absl::status + ortools::math_opt_matchers + ortools::math_opt_base_solver_test + TESTING +) + +ortools_cxx_library( + NAME + ${_PREFIX}_ip_model_solve_parameters_tests + SOURCES + "ip_model_solve_parameters_tests.cc" + "ip_model_solve_parameters_tests.h" + TYPE + SHARED + LINK_LIBRARIES + GTest::gmock + absl::log + ortools::math_opt_matchers + ortools::math_opt_base_solver_test + TESTING +) + +ortools_cxx_library( + NAME + ${_PREFIX}_ip_multiple_solutions_tests + SOURCES + "ip_multiple_solutions_tests.cc" + "ip_multiple_solutions_tests.h" + TYPE + SHARED + LINK_LIBRARIES + GTest::gmock + absl::strings + ortools::math_opt_matchers + TESTING +) + +ortools_cxx_library( + NAME + ${_PREFIX}_lp_model_solve_parameters_tests + SOURCES + "lp_model_solve_parameters_tests.cc" + "lp_model_solve_parameters_tests.h" + TYPE + SHARED + LINK_LIBRARIES + GTest::gmock + absl::log + ortools::math_opt_matchers + ortools::math_opt_base_solver_test + TESTING +) + +ortools_cxx_library( + NAME + ${_PREFIX}_lp_parameter_tests + SOURCES + "lp_parameter_tests.cc" + "lp_parameter_tests.h" + TYPE + SHARED + LINK_LIBRARIES + GTest::gmock + absl::log + absl::status + absl::strings + ortools::math_opt_matchers + TESTING +) + +ortools_cxx_library( + NAME + ${_PREFIX}_lp_initial_basis_tests + SOURCES + "lp_initial_basis_tests.cc" + "lp_initial_basis_tests.h" + TYPE + SHARED + LINK_LIBRARIES + absl::log + ortools::math_opt_base_solver_test + TESTING +) + +ortools_cxx_library( + NAME + ${_PREFIX}_ip_parameter_tests + SOURCES + "ip_parameter_tests.cc" + "ip_parameter_tests.h" + TYPE + SHARED + LINK_LIBRARIES + GTest::gmock + absl::log + ortools::math_opt_matchers + ortools::math_opt_test_models + TESTING +) + +ortools_cxx_library( + NAME + ${_PREFIX}_multi_objective_tests + SOURCES + "multi_objective_tests.cc" + "multi_objective_tests.h" + TYPE + SHARED + LINK_LIBRARIES + GTest::gmock + absl::log + ortools::math_opt_matchers + TESTING +) + +ortools_cxx_library( + NAME + ${_PREFIX}_qp_tests + SOURCES + "qp_tests.cc" + "qp_tests.h" + TYPE + SHARED + LINK_LIBRARIES + GTest::gmock + absl::log + ortools::math_opt_matchers + TESTING +) + +ortools_cxx_library( + NAME + ${_PREFIX}_qc_tests + SOURCES + "qc_tests.cc" + "qc_tests.h" + TYPE + SHARED + LINK_LIBRARIES + GTest::gmock + absl::log + ortools::math_opt_matchers + TESTING +) + +ortools_cxx_library( + NAME + ${_PREFIX}_second_order_cone_tests + SOURCES + "second_order_cone_tests.cc" + "second_order_cone_tests.h" + TYPE + SHARED + LINK_LIBRARIES + GTest::gmock + absl::log + ortools::math_opt_matchers + TESTING +) + +ortools_cxx_library( + NAME + ${_PREFIX}_logical_constraint_tests + SOURCES + "logical_constraint_tests.cc" + "logical_constraint_tests.h" + TYPE + SHARED + LINK_LIBRARIES + GTest::gmock + absl::log + ortools::math_opt_matchers + TESTING +) + +ortools_cxx_library( + NAME + ${_PREFIX}_test_models + SOURCES + "test_models.cc" + "test_models.h" + TYPE + SHARED + LINK_LIBRARIES + absl::log + absl::strings + TESTING +) + +ortools_cxx_test( + NAME + ${_PREFIX}_test_models_test + SOURCES + "test_models_test.cc" + LINK_LIBRARIES + GTest::gmock + GTest::gmock_main + ortools::math_opt_test_models + ortools::math_opt_matchers + #ortools::math_opt_glop_solver + #ortools::math_opt_gscipt_solver +) + +ortools_cxx_library( + NAME + ${_PREFIX}_generic_tests + SOURCES + "generic_tests.cc" + "generic_tests.h" + TYPE + SHARED + LINK_LIBRARIES + GTest::gmock + absl::log + ortools::math_opt_matchers + ortools::math_opt_test_models + TESTING +) + +ortools_cxx_library( + NAME + ${_PREFIX}_infeasible_subsystem_tests + SOURCES + "infeasible_subsystem_tests.cc" + "infeasible_subsystem_tests.h" + TYPE + SHARED + LINK_LIBRARIES + GTest::gmock + absl::log + absl::status + absl::strings + absl::time + ortools::math_opt_matchers + TESTING +) diff --git a/ortools/math_opt/solvers/CMakeLists.txt b/ortools/math_opt/solvers/CMakeLists.txt index a5eac44747..2e71e90511 100644 --- a/ortools/math_opt/solvers/CMakeLists.txt +++ b/ortools/math_opt/solvers/CMakeLists.txt @@ -52,3 +52,183 @@ target_link_libraries(${NAME} PRIVATE $<$:Eigen3::Eigen> $<$:libscip> ${PROJECT_NAMESPACE}::math_opt_proto) + +if(USE_SCIP) + ortools_cxx_test( + NAME + math_opt_solvers_gscip_solver_test + SOURCES + "gscip_solver_test.cc" + LINK_LIBRARIES + GTest::gmock + GTest::gmock_main + absl::status + ortools::math_opt_matchers + ortools::math_opt_callback_tests + ortools::math_opt_generic_tests + ortools::math_opt_infeasible_subsystem_tests + ortools::math_opt_invalid_input_tests + ortools::math_opt_ip_model_solve_parameters_tests + ortools::math_opt_ip_multiple_solutions_tests + ortools::math_opt_ip_parameter_tests + ortools::math_opt_logical_constraint_tests + ortools::math_opt_mip_tests + ortools::math_opt_multi_objective_tests + ortools::math_opt_qc_tests + ortools::math_opt_qp_tests + ortools::math_opt_second_order_cone_tests + ortools::math_opt_status_tests + ) +endif() + +if(USE_GLOP) + ortools_cxx_test( + NAME + math_opt_solvers_glop_solver_test + SOURCES + "glop_solver_test.cc" + LINK_LIBRARIES + GTest::gmock + GTest::gmock_main + absl::status + ortools::math_opt_matchers + ortools::math_opt_callback_tests + ortools::math_opt_generic_tests + ortools::math_opt_infeasible_subsystem_tests + ortools::math_opt_invalid_input_tests + ortools::math_opt_logical_constraint_tests + ortools::math_opt_lp_incomplete_solve_tests + ortools::math_opt_lp_initial_basis_tests + ortools::math_opt_lp_model_solve_parameters_tests + ortools::math_opt_lp_parameter_tests + ortools::math_opt_lp_tests + ortools::math_opt_multi_objective_tests + ortools::math_opt_qc_tests + ortools::math_opt_qp_tests + ortools::math_opt_second_order_cone_tests + ortools::math_opt_status_tests + ) +endif() + +ortools_cxx_test( + NAME + math_opt_solvers_cp_sat_solver_test + SOURCES + "cp_sat_solver_test.cc" + LINK_LIBRARIES + GTest::gmock + GTest::gmock_main + absl::status + ortools::math_opt_matchers + ortools::math_opt_callback_tests + ortools::math_opt_generic_tests + ortools::math_opt_infeasible_subsystem_tests + ortools::math_opt_invalid_input_tests + ortools::math_opt_ip_model_solve_parameters_tests + ortools::math_opt_ip_multiple_solutions_tests + ortools::math_opt_ip_parameter_tests + ortools::math_opt_logical_constraint_tests + ortools::math_opt_mip_tests + ortools::math_opt_multi_objective_tests + ortools::math_opt_qc_tests + ortools::math_opt_qp_tests + ortools::math_opt_second_order_cone_tests + ortools::math_opt_status_tests +) + +ortools_cxx_test( + NAME + math_opt_solvers_message_callback_data_test + SOURCES + "message_callback_data_test.cc" + LINK_LIBRARIES + GTest::gmock_main + absl::cleanup + absl::synchronization +) + +if(USE_PDLP) + ortools_cxx_test( + NAME + math_opt_solvers_pdlp_solver_test + SOURCES + "pdlp_solver_test.cc" + LINK_LIBRARIES + GTest::gmock + GTest::gmock_main + absl::status + ortools::math_opt_callback_tests + ortools::math_opt_generic_tests + ortools::math_opt_infeasible_subsystem_tests + ortools::math_opt_invalid_input_tests + ortools::math_opt_logical_constraint_tests + ortools::math_opt_lp_incomplete_solve_tests + ortools::math_opt_lp_initial_basis_tests + ortools::math_opt_lp_model_solve_parameters_tests + ortools::math_opt_lp_parameter_tests + ortools::math_opt_lp_tests + ortools::math_opt_multi_objective_tests + ortools::math_opt_qc_tests + ortools::math_opt_qp_tests + ortools::math_opt_second_order_cone_tests + ortools::math_opt_status_tests + ) +endif() + +if(USE_GLPK) + ortools_cxx_test( + NAME + math_opt_solvers_glpk_solver_test + SOURCES + "glpk_solver_test.cc" + LINK_LIBRARIES + GTest::gmock + GTest::gmock_main + absl::status + absl::time + ortools::math_opt_matchers + ortools::math_opt_callback_tests + ortools::math_opt_generic_tests + ortools::math_opt_infeasible_subsystem_tests + ortools::math_opt_invalid_input_tests + ortools::math_opt_ip_model_solve_parameters_tests + ortools::math_opt_ip_parameter_tests + ortools::math_opt_logical_constraint_tests + ortools::math_opt_lp_incomplete_solve_tests + ortools::math_opt_lp_model_solve_parameters_tests + ortools::math_opt_lp_parameter_tests + ortools::math_opt_lp_tests + ortools::math_opt_mip_tests + ortools::math_opt_multi_objective_tests + ortools::math_opt_qc_tests + ortools::math_opt_qp_tests + ortools::math_opt_second_order_cone_tests + ortools::math_opt_status_tests + ) +endif() + +if(USE_HIGHS) + ortools_cxx_test( + NAME + math_opt_solvers_highs_solver_test + SOURCES + "highs_solver_test.cc" + LINK_LIBRARIES + GTest::gmock + GTest::gmock_main + absl::status + ortools::math_opt_matchers + ortools::math_opt_callback_tests + ortools::math_opt_generic_tests + ortools::math_opt_infeasible_subsystem_tests + ortools::math_opt_ip_model_solve_parameters_tests + ortools::math_opt_ip_parameter_tests + #ortools::math_opt_logical_constraint_tests + #ortools::math_opt_lp_incomplete_solve_tests + ortools::math_opt_lp_model_solve_parameters_tests + ortools::math_opt_lp_parameter_tests + ortools::math_opt_lp_tests + ortools::math_opt_mip_tests + ortools::math_opt_status_tests + ) +endif() From e23077cb3134140e90856462fe42158b3364ab98 Mon Sep 17 00:00:00 2001 From: Mizux Seiha Date: Wed, 9 Oct 2024 01:28:31 +0200 Subject: [PATCH 064/105] cmake: build deps as shared --- cmake/dependencies/CMakeLists.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cmake/dependencies/CMakeLists.txt b/cmake/dependencies/CMakeLists.txt index 6c49336ba8..c86c82e53f 100644 --- a/cmake/dependencies/CMakeLists.txt +++ b/cmake/dependencies/CMakeLists.txt @@ -49,7 +49,7 @@ endif() include(FetchContent) set(FETCHCONTENT_QUIET OFF) set(FETCHCONTENT_UPDATES_DISCONNECTED ON) -set(BUILD_SHARED_LIBS OFF) +set(BUILD_SHARED_LIBS ON) set(CMAKE_POSITION_INDEPENDENT_CODE ON) set(BUILD_TESTING OFF) set(CMAKE_Fortran_COMPILER OFF) @@ -100,7 +100,7 @@ if(BUILD_Protobuf) message(CHECK_START "Fetching Protobuf") list(APPEND CMAKE_MESSAGE_INDENT " ") set(protobuf_BUILD_TESTS OFF) - set(protobuf_BUILD_SHARED_LIBS OFF) + set(protobuf_BUILD_SHARED_LIBS ON) set(protobuf_BUILD_EXPORT OFF) set(protobuf_MSVC_STATIC_RUNTIME OFF) #set(protobuf_BUILD_LIBUPB ON) From e95eb8ba07d87b214f61d82730db229b79294a30 Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Wed, 9 Oct 2024 10:47:23 +0200 Subject: [PATCH 065/105] [CP-SAT] bugfixes; improving the connection to glop --- ortools/lp_data/lp_data_utils.cc | 8 ++ ortools/lp_data/lp_data_utils.h | 1 + ortools/sat/cp_model_presolve.cc | 2 + ortools/sat/cuts.h | 3 + ortools/sat/linear_programming_constraint.cc | 144 +++++++++---------- ortools/sat/linear_programming_constraint.h | 14 +- 6 files changed, 92 insertions(+), 80 deletions(-) diff --git a/ortools/lp_data/lp_data_utils.cc b/ortools/lp_data/lp_data_utils.cc index 126121e65b..d234ff189c 100644 --- a/ortools/lp_data/lp_data_utils.cc +++ b/ortools/lp_data/lp_data_utils.cc @@ -184,6 +184,14 @@ Fractional LpScalingHelper::UnscaleDualValue(RowIndex row, return value / (RowUnscalingFactor(row) * objective_scaling_factor_); } +Fractional LpScalingHelper::UnscaleLeftSolveValue(RowIndex row, + Fractional value) const { + // In the scaled domain, we are takeing a sum coeff * scaling * row, + // so to get the same effect in the unscaled domain, we want to multiply by + // (coeff * scaling). + return value / RowUnscalingFactor(row); +} + Fractional LpScalingHelper::UnscaleConstraintActivity(RowIndex row, Fractional value) const { // The activity move with the row_scale and the bound_scaling_factor. diff --git a/ortools/lp_data/lp_data_utils.h b/ortools/lp_data/lp_data_utils.h index 37e94249ce..e1c94bce25 100644 --- a/ortools/lp_data/lp_data_utils.h +++ b/ortools/lp_data/lp_data_utils.h @@ -70,6 +70,7 @@ class LpScalingHelper { Fractional UnscaleVariableValue(ColIndex col, Fractional value) const; Fractional UnscaleReducedCost(ColIndex col, Fractional value) const; Fractional UnscaleDualValue(RowIndex row, Fractional value) const; + Fractional UnscaleLeftSolveValue(RowIndex row, Fractional value) const; Fractional UnscaleConstraintActivity(RowIndex row, Fractional value) const; // Unscale a row vector v such that v.B = unit_row. When basis_col is the diff --git a/ortools/sat/cp_model_presolve.cc b/ortools/sat/cp_model_presolve.cc index 5454548ceb..9ae2eca5e4 100644 --- a/ortools/sat/cp_model_presolve.cc +++ b/ortools/sat/cp_model_presolve.cc @@ -1787,6 +1787,8 @@ bool CpModelPresolver::PresolveIntProd(ConstraintProto* ct) { linear_for_true); AddLinearExpressionToLinearConstraint(ct->int_prod().target(), -1, linear_for_true); + context_->CanonicalizeLinearConstraint(constraint_for_false); + context_->CanonicalizeLinearConstraint(constraint_for_true); context_->UpdateRuleStats("int_prod: boolean affine term"); context_->UpdateNewConstraintsVariableUsage(); return RemoveConstraint(ct); diff --git a/ortools/sat/cuts.h b/ortools/sat/cuts.h index 245a440c87..28359dc474 100644 --- a/ortools/sat/cuts.h +++ b/ortools/sat/cuts.h @@ -63,6 +63,9 @@ struct CutTerm { bool IsBoolean() const { return bound_diff == 1; } bool IsSimple() const { return expr_coeffs[1] == 0; } bool HasRelevantLpValue() const { return lp_value > 1e-2; } + bool IsFractional() const { + return std::abs(lp_value - std::round(lp_value)) > 1e-4; + } double LpDistToMaxValue() const { return static_cast(bound_diff.value()) - lp_value; } diff --git a/ortools/sat/linear_programming_constraint.cc b/ortools/sat/linear_programming_constraint.cc index d308b1ae9d..52fb78652c 100644 --- a/ortools/sat/linear_programming_constraint.cc +++ b/ortools/sat/linear_programming_constraint.cc @@ -288,7 +288,6 @@ LinearProgrammingConstraint::LinearProgrammingConstraint( expanded_reduced_costs_(*model->GetOrCreate()) { // Tweak the default parameters to make the solve incremental. simplex_params_.set_use_dual_simplex(true); - simplex_params_.set_cost_scaling(glop::GlopParameters::MEAN_COST_SCALING); simplex_params_.set_primal_feasibility_tolerance( parameters_.lp_primal_tolerance()); simplex_params_.set_dual_feasibility_tolerance( @@ -1060,7 +1059,7 @@ bool LinearProgrammingConstraint::PreprocessCut(IntegerVariable first_slack, } bool some_fixed_terms = false; - bool some_relevant_positions = false; + bool some_fractional_positions = false; for (CutTerm& term : cut->terms) { const absl::int128 magnitude128 = term.coeff.value(); const absl::int128 range = @@ -1138,8 +1137,8 @@ bool LinearProgrammingConstraint::PreprocessCut(IntegerVariable first_slack, if (term.bound_diff == 0) { some_fixed_terms = true; } else { - if (term.HasRelevantLpValue()) { - some_relevant_positions = true; + if (term.IsFractional()) { + some_fractional_positions = true; } } } @@ -1153,7 +1152,7 @@ bool LinearProgrammingConstraint::PreprocessCut(IntegerVariable first_slack, } cut->terms.resize(new_size); } - return some_relevant_positions; + return some_fractional_positions; } bool LinearProgrammingConstraint::AddCutFromConstraints( @@ -1192,14 +1191,16 @@ bool LinearProgrammingConstraint::AddCutFromConstraints( ImpliedBoundsProcessor* ib_processor = nullptr; { bool some_ints = false; - bool some_relevant_positions = false; + bool some_fractional_positions = false; for (const CutTerm& term : base_ct_.terms) { if (term.bound_diff > 1) some_ints = true; - if (term.HasRelevantLpValue()) some_relevant_positions = true; + if (term.IsFractional()) { + some_fractional_positions = true; + } } // If all value are integer, we will not be able to cut anything. - if (!some_relevant_positions) return false; + if (!some_fractional_positions) return false; if (some_ints) ib_processor = &implied_bounds_processor_; } @@ -1356,7 +1357,7 @@ bool LinearProgrammingConstraint::PostprocessAndAddCut( // TODO(user): Ideally we should detect this even earlier during the cut // generation. if (cut.ComputeViolation() < 1e-4) { - VLOG(2) << "Bad cut " << name << " " << info; + VLOG(3) << "Bad cut " << name << " " << info; ++num_bad_cuts_; return false; } @@ -1428,13 +1429,6 @@ bool LinearProgrammingConstraint::PostprocessAndAddCut( // it triggers. We should add heuristics to abort earlier if a cut is not // promising. Or only test a few positions and not all rows. void LinearProgrammingConstraint::AddCGCuts() { - // We used not to do "classical" gomory and instead used this heuristic. - // It is usually faster but on some problem like neos*creuse, this do not find - // good cut though. - // - // TODO(user): Make the cut generation lighter and try this at false. - const bool old_gomory = true; - // Note that the index is permuted and do not correspond to a row. const RowIndex num_rows(integer_lp_.size()); for (RowIndex index(0); index < num_rows; ++index) { @@ -1442,12 +1436,18 @@ void LinearProgrammingConstraint::AddCGCuts() { const ColIndex basis_col = simplex_.GetBasis(index); - // If this variable is a slack, we ignore it. This is because the - // corresponding row is not tight under the given lp values. - if (old_gomory && basis_col >= integer_variables_.size()) continue; + // We used to skip slack and also not to do "classical" gomory and instead + // call IgnoreTrivialConstraintMultipliers() heuristic. It is usually faster + // but on some problem like neos*creuse, this do not find good cut though. + // + // TODO(user): Tune this. + if (basis_col >= integer_variables_.size()) continue; - // TODO(user): If the variable is a slack, the unscaling is wrong! - const Fractional lp_value = GetVariableValueAtCpScale(basis_col); + // Get he variable value at cp-scale. Similar to GetVariableValueAtCpScale() + // but this works for slack variable too. + const Fractional lp_value = + simplex_.GetVariableValue(basis_col) / + scaler_.VariableScalingFactorWithSlack(basis_col); // Only consider fractional basis element. We ignore element that are close // to an integer to reduce the amount of positions we try. @@ -1457,6 +1457,9 @@ void LinearProgrammingConstraint::AddCGCuts() { // also be just under it. if (std::abs(lp_value - std::round(lp_value)) < 0.01) continue; + // We multiply by row_factors_ directly, which might be slighly more precise + // than dividing by 1/factor like UnscaleLeftSolveValue() does. + // // TODO(user): Avoid code duplication between the sparse/dense path. tmp_lp_multipliers_.clear(); const glop::ScatteredRow& lambda = simplex_.GetUnitRowLeftInverse(index); @@ -1464,14 +1467,14 @@ void LinearProgrammingConstraint::AddCGCuts() { for (RowIndex row(0); row < num_rows; ++row) { const double value = lambda.values[glop::RowToColIndex(row)]; if (std::abs(value) < kZeroTolerance) continue; - tmp_lp_multipliers_.push_back({row, value}); + tmp_lp_multipliers_.push_back({row, row_factors_[row.value()] * value}); } } else { for (const ColIndex col : lambda.non_zeros) { const RowIndex row = glop::ColToRowIndex(col); const double value = lambda.values[col]; if (std::abs(value) < kZeroTolerance) continue; - tmp_lp_multipliers_.push_back({row, value}); + tmp_lp_multipliers_.push_back({row, row_factors_[row.value()] * value}); } } @@ -1480,22 +1483,30 @@ void LinearProgrammingConstraint::AddCGCuts() { IntegerValue scaling; for (int i = 0; i < 2; ++i) { + tmp_cg_multipliers_ = tmp_lp_multipliers_; if (i == 1) { // Try other sign. // // TODO(user): Maybe add an heuristic to know beforehand which sign to // use? - for (std::pair& p : tmp_lp_multipliers_) { + for (std::pair& p : tmp_cg_multipliers_) { p.second = -p.second; } } - // TODO(user): We use a lower value here otherwise we might run into - // overflow while computing the cut. This should be fixable. - tmp_integer_multipliers_ = ScaleLpMultiplier( - /*take_objective_into_account=*/false, - /*ignore_trivial_constraints=*/old_gomory, tmp_lp_multipliers_, - &scaling); + // Remove constraints that shouldn't be helpful. + // + // In practice, because we can complement the slack, it might still be + // useful to have some constraint with a trivial upper bound. That said, + // this does looks weird, maybe we miss something in our one-constraint + // cut generation if it is useful to add such a term. Investigate on + // neos-555884. + if (true) { + IgnoreTrivialConstraintMultipliers(&tmp_cg_multipliers_); + if (tmp_cg_multipliers_.size() <= 1) continue; + } + tmp_integer_multipliers_ = ScaleMultipliers( + tmp_cg_multipliers_, /*take_objective_into_account=*/false, &scaling); if (scaling != 0) { AddCutFromConstraints("CG", tmp_integer_multipliers_); } @@ -2090,50 +2101,36 @@ bool LinearProgrammingConstraint::ScalingCanOverflow( return bound >= overflow_cap; } -std::vector> -LinearProgrammingConstraint::ScaleLpMultiplier( - bool take_objective_into_account, bool ignore_trivial_constraints, - absl::Span> lp_multipliers, - IntegerValue* scaling, int64_t overflow_cap) const { - *scaling = 0; - - // First unscale the values with the LP scaling and remove bad cases. - tmp_cp_multipliers_.clear(); - for (const std::pair& p : lp_multipliers) { +void LinearProgrammingConstraint::IgnoreTrivialConstraintMultipliers( + std::vector>* lp_multipliers) { + int new_size = 0; + for (const std::pair& p : *lp_multipliers) { const RowIndex row = p.first; const Fractional lp_multi = p.second; - - // We ignore small values since these are likely errors and will not - // contribute much to the new lp constraint anyway. - if (std::abs(lp_multi) < kZeroTolerance) continue; - - // Remove constraints that shouldn't be helpful. - // - // In practice, because we can complement the slack, it might still be - // useful to have some constraint with a trivial upper bound. - if (ignore_trivial_constraints) { - if (lp_multi > 0.0 && integer_lp_[row].ub_is_trivial) { - continue; - } - if (lp_multi < 0.0 && integer_lp_[row].lb_is_trivial) { - continue; - } - } - - tmp_cp_multipliers_.push_back( - {row, scaler_.UnscaleDualValue(row, lp_multi)}); + if (lp_multi > 0.0 && integer_lp_[row].ub_is_trivial) continue; + if (lp_multi < 0.0 && integer_lp_[row].lb_is_trivial) continue; + (*lp_multipliers)[new_size++] = p; } + lp_multipliers->resize(new_size); +} + +std::vector> +LinearProgrammingConstraint::ScaleMultipliers( + absl::Span> lp_multipliers, + bool take_objective_into_account, IntegerValue* scaling) const { + *scaling = 0; std::vector> integer_multipliers; - if (tmp_cp_multipliers_.empty()) { + if (lp_multipliers.empty()) { // Empty linear combinaison. return integer_multipliers; } // TODO(user): we currently do not support scaling down, so we just abort // if with a scaling of 1, we reach the overflow_cap. + const int64_t overflow_cap = std::numeric_limits::max(); if (ScalingCanOverflow(/*power=*/0, take_objective_into_account, - tmp_cp_multipliers_, overflow_cap)) { + lp_multipliers, overflow_cap)) { ++num_scaling_issues_; return integer_multipliers; } @@ -2151,7 +2148,7 @@ LinearProgrammingConstraint::ScaleLpMultiplier( if (candidate >= 63) return false; return !ScalingCanOverflow(candidate, take_objective_into_account, - tmp_cp_multipliers_, overflow_cap); + lp_multipliers, overflow_cap); }); *scaling = int64_t{1} << power; @@ -2159,7 +2156,7 @@ LinearProgrammingConstraint::ScaleLpMultiplier( // Note that we use the exact same formula as in ScalingCanOverflow(). int64_t gcd = scaling->value(); const double scaling_as_double = static_cast(scaling->value()); - for (const auto [row, double_coeff] : tmp_cp_multipliers_) { + for (const auto [row, double_coeff] : lp_multipliers) { const IntegerValue coeff(std::round(double_coeff * scaling_as_double)); if (coeff != 0) { gcd = std::gcd(gcd, std::abs(coeff.value())); @@ -2431,7 +2428,7 @@ bool LinearProgrammingConstraint::PropagateExactLpReason() { for (RowIndex row(0); row < num_rows; ++row) { const double value = -simplex_.GetDualValue(row); if (std::abs(value) < kZeroTolerance) continue; - tmp_lp_multipliers_.push_back({row, value}); + tmp_lp_multipliers_.push_back({row, scaler_.UnscaleDualValue(row, value)}); } // In this case, the LP lower bound match the basic objective "constraint" @@ -2454,9 +2451,9 @@ bool LinearProgrammingConstraint::PropagateExactLpReason() { } IntegerValue scaling = 0; - tmp_integer_multipliers_ = ScaleLpMultiplier( - take_objective_into_account, - /*ignore_trivial_constraints=*/true, tmp_lp_multipliers_, &scaling); + IgnoreTrivialConstraintMultipliers(&tmp_lp_multipliers_); + tmp_integer_multipliers_ = ScaleMultipliers( + tmp_lp_multipliers_, take_objective_into_account, &scaling); if (scaling == 0) { VLOG(1) << simplex_.GetProblemStatus(); VLOG(1) << "Issue while computing the exact LP reason. Aborting."; @@ -2514,11 +2511,14 @@ bool LinearProgrammingConstraint::PropagateExactDualRay() { for (RowIndex row(0); row < ray.size(); ++row) { const double value = ray[row]; if (std::abs(value) < kZeroTolerance) continue; - tmp_lp_multipliers_.push_back({row, value}); + + // This is the same as UnscaleLeftSolveValue(). Note that we don't need to + // scale by the objective factor here like we do in UnscaleDualValue(). + tmp_lp_multipliers_.push_back({row, row_factors_[row.value()] * value}); } - tmp_integer_multipliers_ = ScaleLpMultiplier( - /*take_objective_into_account=*/false, - /*ignore_trivial_constraints=*/true, tmp_lp_multipliers_, &scaling); + IgnoreTrivialConstraintMultipliers(&tmp_lp_multipliers_); + tmp_integer_multipliers_ = ScaleMultipliers( + tmp_lp_multipliers_, /*take_objective_into_account=*/false, &scaling); if (scaling == 0) { VLOG(1) << "Isse while computing the exact dual ray reason. Aborting."; return true; diff --git a/ortools/sat/linear_programming_constraint.h b/ortools/sat/linear_programming_constraint.h index bdf34fc686..a3f34096e9 100644 --- a/ortools/sat/linear_programming_constraint.h +++ b/ortools/sat/linear_programming_constraint.h @@ -328,11 +328,11 @@ class LinearProgrammingConstraint : public PropagatorInterface, // // Note that this will loose some precision, but our subsequent computation // will still be exact as it will work for any set of multiplier. - std::vector> ScaleLpMultiplier( - bool take_objective_into_account, bool ignore_trivial_constraints, + void IgnoreTrivialConstraintMultipliers( + std::vector>* lp_multipliers); + std::vector> ScaleMultipliers( absl::Span> lp_multipliers, - IntegerValue* scaling, - int64_t overflow_cap = std::numeric_limits::max()) const; + bool take_objective_into_account, IntegerValue* scaling) const; // Can we have an overflow if we scale each coefficients with // std::round(std::ldexp(coeff, power)) ? @@ -489,13 +489,11 @@ class LinearProgrammingConstraint : public PropagatorInterface, std::vector tmp_slack_rows_; std::vector> tmp_terms_; - // Used by AddCGCuts(). + // Used by ScaleMultipliers(). std::vector> tmp_lp_multipliers_; + std::vector> tmp_cg_multipliers_; std::vector> tmp_integer_multipliers_; - // Used by ScaleLpMultiplier(). - mutable std::vector> tmp_cp_multipliers_; - // Structures used for mirroring IntegerVariables inside the underlying LP // solver: an integer variable var is mirrored by mirror_lp_variable_[var]. // Note that these indices are dense in [0, mirror_lp_variable_.size()] so From 2245401fca1b5a343445f7ce0c78a997a5e70ba7 Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Wed, 9 Oct 2024 12:37:20 +0200 Subject: [PATCH 066/105] fix python linear_solver swig and test --- ortools/linear_solver/python/linear_solver.i | 4 ++-- ortools/linear_solver/python/lp_test.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/ortools/linear_solver/python/linear_solver.i b/ortools/linear_solver/python/linear_solver.i index 7a5bf5ef83..a475c55a77 100644 --- a/ortools/linear_solver/python/linear_solver.i +++ b/ortools/linear_solver/python/linear_solver.i @@ -374,8 +374,8 @@ PY_CONVERT(MPVariable); %rename (LookupVariable) operations_research::MPSolver::LookupVariableOrNull; %unignore operations_research::MPSolver::SetSolverSpecificParametersAsString; %unignore operations_research::MPSolver::NextSolution; -%unignore operations_research::MPSolver::ExportModelAsLpFormat; -%unignore operations_research::MPSolver::ExportModelAsMpsFormat; +%unignore operations_research::MPSolver::ExportModelAsLpFormat(bool); +%unignore operations_research::MPSolver::ExportModelAsMpsFormat(bool, bool); %unignore operations_research::MPSolver::WriteModelToMpsFile; %unignore operations_research::MPSolver::Write; diff --git a/ortools/linear_solver/python/lp_test.py b/ortools/linear_solver/python/lp_test.py index a52dc3467b..5b7c188880 100755 --- a/ortools/linear_solver/python/lp_test.py +++ b/ortools/linear_solver/python/lp_test.py @@ -349,7 +349,7 @@ class PyWrapLpTest(unittest.TestCase): sum_of_vars = sum([x1, x2, x3]) c2 = solver.Add(sum_of_vars <= 100.0, "OtherConstraintName") - mps_str = solver.ExportModelAsMpsFormat(fixed_format=False, obfuscated=False) + mps_str = solver.ExportModelAsMpsFormat(fixed_format=False, obfuscate=False) self.assertIn("ExportMps", mps_str) From f0647029ddf7eb68cb1f84a317ac6d93dfcedefd Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Wed, 9 Oct 2024 13:22:36 +0200 Subject: [PATCH 067/105] bazel runners do not support python 3.9 --- .github/workflows/amd64_linux_bazel.yml | 1 - .github/workflows/amd64_macos_bazel.yml | 1 - .github/workflows/amd64_windows_bazel.yml | 1 - 3 files changed, 3 deletions(-) diff --git a/.github/workflows/amd64_linux_bazel.yml b/.github/workflows/amd64_linux_bazel.yml index 7e4818dbbd..69625b4d1c 100644 --- a/.github/workflows/amd64_linux_bazel.yml +++ b/.github/workflows/amd64_linux_bazel.yml @@ -9,7 +9,6 @@ jobs: strategy: matrix: python: [ - {version: '3.9'}, {version: '3.10'}, {version: '3.11'}, {version: '3.12'}, diff --git a/.github/workflows/amd64_macos_bazel.yml b/.github/workflows/amd64_macos_bazel.yml index a478b5b604..2f5990146d 100644 --- a/.github/workflows/amd64_macos_bazel.yml +++ b/.github/workflows/amd64_macos_bazel.yml @@ -9,7 +9,6 @@ jobs: strategy: matrix: python: [ - {version: '3.9'}, {version: '3.10'}, {version: '3.11'}, {version: '3.12'}, diff --git a/.github/workflows/amd64_windows_bazel.yml b/.github/workflows/amd64_windows_bazel.yml index f33ca41ab5..30c9245878 100644 --- a/.github/workflows/amd64_windows_bazel.yml +++ b/.github/workflows/amd64_windows_bazel.yml @@ -10,7 +10,6 @@ jobs: matrix: runner: [windows-2022] python: [ - {version: '3.9'}, {version: '3.10'}, {version: '3.11'}, {version: '3.12'}, From 279f079f8cc62f8c873bfbc6ec1a4882de7cb768 Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Wed, 9 Oct 2024 13:46:26 +0200 Subject: [PATCH 068/105] bump jinja2 dep --- bazel/notebook_requirements.in | 1 + bazel/notebook_requirements.txt | 11 ++++++----- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/bazel/notebook_requirements.in b/bazel/notebook_requirements.in index b4285bbff7..1c78f86eb4 100644 --- a/bazel/notebook_requirements.in +++ b/bazel/notebook_requirements.in @@ -26,3 +26,4 @@ jupyter-server==2.14.2 tornado==6.4.1 Pygments==2.15.0 jsonschema==4.19.0 +jinja2==3.1.4 diff --git a/bazel/notebook_requirements.txt b/bazel/notebook_requirements.txt index afcaede6ae..f038789bb4 100644 --- a/bazel/notebook_requirements.txt +++ b/bazel/notebook_requirements.txt @@ -1,10 +1,10 @@ # -# This file is autogenerated by pip-compile with Python 3.12 +# This file is autogenerated by pip-compile with Python 3.11 # by the following command: # # bazel run //bazel:notebook_requirements.update # -absl-py==2.0.0 +absl-py==2.1.0 # via -r bazel/notebook_requirements.in anyio==4.0.0 # via @@ -85,8 +85,9 @@ isoduration==20.11.0 # via jsonschema jedi==0.19.0 # via ipython -jinja2==3.1.3 +jinja2==3.1.4 # via + # -r bazel/notebook_requirements.in # jupyter-server # jupyterlab # jupyterlab-server @@ -176,7 +177,7 @@ notebook-shim==0.2.3 # via # jupyterlab # notebook -numpy==2.1.0 +numpy==2.1.1 # via # -r bazel/notebook_requirements.in # pandas @@ -215,7 +216,7 @@ prometheus-client==0.17.1 # via jupyter-server prompt-toolkit==3.0.39 # via ipython -protobuf==5.27.3 +protobuf==5.27.5 # via # -r bazel/notebook_requirements.in # mypy-protobuf From 5766fabd321a5ea484cc33d37991a11cd563c5f7 Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Wed, 9 Oct 2024 14:02:15 +0200 Subject: [PATCH 069/105] fix java --- examples/java/LinearProgramming.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/java/LinearProgramming.java b/examples/java/LinearProgramming.java index 1c764f4fd0..79d9636976 100644 --- a/examples/java/LinearProgramming.java +++ b/examples/java/LinearProgramming.java @@ -65,7 +65,7 @@ public class LinearProgramming { System.out.println("Number of constraints = " + solver.numConstraints()); if (printModel) { - String model = solver.exportModelAsLpFormat(); + String model = solver.exportModelAsLpFormat(/* obfuscate = */false); System.out.println(model); } From cfb175ec6252f095f246bf028a257f9734e2436b Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Wed, 9 Oct 2024 15:52:14 +0200 Subject: [PATCH 070/105] remove python 3.9 from arm64 mac bazel --- .github/workflows/arm64_macos_bazel.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/workflows/arm64_macos_bazel.yml b/.github/workflows/arm64_macos_bazel.yml index 59d67a5d1e..f76442597d 100644 --- a/.github/workflows/arm64_macos_bazel.yml +++ b/.github/workflows/arm64_macos_bazel.yml @@ -9,7 +9,6 @@ jobs: strategy: matrix: python: [ - {version: '3.9'}, {version: '3.10'}, {version: '3.11'}, {version: '3.12'}, From 8f4b97c7370b4e3650880fd8b35d7697f640cfe6 Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Wed, 9 Oct 2024 15:57:14 +0200 Subject: [PATCH 071/105] add default parameter to exportModelAsLpFormat --- ortools/linear_solver/java/linear_solver.i | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ortools/linear_solver/java/linear_solver.i b/ortools/linear_solver/java/linear_solver.i index 2df3c50df6..83d9db7e56 100644 --- a/ortools/linear_solver/java/linear_solver.i +++ b/ortools/linear_solver/java/linear_solver.i @@ -188,7 +188,7 @@ PROTO2_RETURN( /** * Export the loaded model in LP format. */ - std::string exportModelAsLpFormat(bool obfuscate) { + std::string exportModelAsLpFormat(bool obfuscate = false) { operations_research::MPModelExportOptions options; options.obfuscate = obfuscate; operations_research::MPModelProto model; From b76e6e9865478817152e2384db56ff62f65373c8 Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Wed, 9 Oct 2024 15:57:36 +0200 Subject: [PATCH 072/105] [CP-SAT] cleanup presolve code around affine relations --- ortools/sat/presolve_context.cc | 79 ++++++++++++++------------------- ortools/sat/presolve_context.h | 10 ++--- 2 files changed, 39 insertions(+), 50 deletions(-) diff --git a/ortools/sat/presolve_context.cc b/ortools/sat/presolve_context.cc index 870ba4004c..5a819566a3 100644 --- a/ortools/sat/presolve_context.cc +++ b/ortools/sat/presolve_context.cc @@ -863,36 +863,29 @@ bool PresolveContext::AddRelation(int x, int y, int64_t c, int64_t o, return repo->TryAdd(x, y, c, o, allow_rep_x, allow_rep_y); } -bool PresolveContext::PropagateAffineRelation(int ref) { - const int var = PositiveRef(ref); +bool PresolveContext::PropagateAffineRelation(int var) { + DCHECK(RefIsPositive(var)); const AffineRelation::Relation r = GetAffineRelation(var); if (r.representative == var) return true; return PropagateAffineRelation(var, r.representative, r.coeff, r.offset); } -bool PresolveContext::PropagateAffineRelation(int ref, int rep, int64_t coeff, +bool PresolveContext::PropagateAffineRelation(int var, int rep, int64_t coeff, int64_t offset) { - DCHECK(!DomainIsEmpty(ref)); + DCHECK(RefIsPositive(var)); + DCHECK(RefIsPositive(rep)); + DCHECK(!DomainIsEmpty(var)); DCHECK(!DomainIsEmpty(rep)); - if (!RefIsPositive(rep)) { - rep = NegatedRef(rep); - coeff = -coeff; - } - if (!RefIsPositive(ref)) { - ref = NegatedRef(ref); - offset = -offset; - coeff = -coeff; - } // Propagate domains both ways. // var = coeff * rep + offset - if (!IntersectDomainWith(rep, DomainOf(ref) + if (!IntersectDomainWith(rep, DomainOf(var) .AdditionWith(Domain(-offset)) .InverseMultiplicationBy(coeff))) { return false; } if (!IntersectDomainWith( - ref, + var, DomainOf(rep).MultiplicationBy(coeff).AdditionWith(Domain(offset)))) { return false; } @@ -1059,21 +1052,19 @@ void PresolveContext::PermuteHintValues(const SparsePermutation& perm) { perm.ApplyToDenseCollection(hint_has_value_); } -bool PresolveContext::StoreAffineRelation(int ref_x, int ref_y, int64_t coeff, +bool PresolveContext::StoreAffineRelation(int var_x, int var_y, int64_t coeff, int64_t offset, bool debug_no_recursion) { - CHECK_NE(coeff, 0); + DCHECK(RefIsPositive(var_x)); + DCHECK(RefIsPositive(var_y)); + DCHECK_NE(coeff, 0); if (is_unsat_) return false; if (hint_is_loaded_) { - const int var_x = PositiveRef(ref_x); - const int var_y = PositiveRef(ref_y); if (!hint_has_value_[var_y] && hint_has_value_[var_x]) { hint_has_value_[var_y] = true; - const int64_t x_mult = RefIsPositive(ref_x) ? 1 : -1; - const int64_t y_mult = RefIsPositive(ref_y) ? 1 : -1; - hint_[var_y] = (hint_[var_x] * x_mult - offset) / coeff * y_mult; - if (hint_[var_y] * coeff * y_mult + offset != hint_[var_x] * x_mult) { + hint_[var_y] = (hint_[var_x] - offset) / coeff; + if (hint_[var_y] * coeff + offset != hint_[var_x]) { // TODO(user): Do we implement a rounding to closest instead of // routing towards 0. UpdateRuleStats( @@ -1083,10 +1074,8 @@ bool PresolveContext::StoreAffineRelation(int ref_x, int ref_y, int64_t coeff, } #ifdef CHECK_HINT - const int64_t vx = - RefIsPositive(ref_x) ? hint_[ref_x] : -hint_[NegatedRef(ref_x)]; - const int64_t vy = - RefIsPositive(ref_y) ? hint_[ref_y] : -hint_[NegatedRef(ref_y)]; + const int64_t vx = hint_[var_x]; + const int64_t vy = hint_[var_y]; if (vx != vy * coeff + offset) { LOG(FATAL) << "Affine relation incompatible with hint: " << vx << " != " << vy << " * " << coeff << " + " << offset; @@ -1094,30 +1083,30 @@ bool PresolveContext::StoreAffineRelation(int ref_x, int ref_y, int64_t coeff, #endif // TODO(user): I am not 100% sure why, but sometimes the representative is - // fixed but that is not propagated to ref_x or ref_y and this causes issues. - if (!PropagateAffineRelation(ref_x)) return false; - if (!PropagateAffineRelation(ref_y)) return false; - if (!PropagateAffineRelation(ref_x, ref_y, coeff, offset)) return false; + // fixed but that is not propagated to var_x or var_y and this causes issues. + if (!PropagateAffineRelation(var_x)) return false; + if (!PropagateAffineRelation(var_y)) return false; + if (!PropagateAffineRelation(var_x, var_y, coeff, offset)) return false; - if (IsFixed(ref_x)) { - const int64_t lhs = DomainOf(ref_x).FixedValue() - offset; + if (IsFixed(var_x)) { + const int64_t lhs = DomainOf(var_x).FixedValue() - offset; if (lhs % std::abs(coeff) != 0) { return NotifyThatModelIsUnsat(); } UpdateRuleStats("affine: fixed"); - return IntersectDomainWith(ref_y, Domain(lhs / coeff)); + return IntersectDomainWith(var_y, Domain(lhs / coeff)); } - if (IsFixed(ref_y)) { - const int64_t value_x = DomainOf(ref_y).FixedValue() * coeff + offset; + if (IsFixed(var_y)) { + const int64_t value_x = DomainOf(var_y).FixedValue() * coeff + offset; UpdateRuleStats("affine: fixed"); - return IntersectDomainWith(ref_x, Domain(value_x)); + return IntersectDomainWith(var_x, Domain(value_x)); } // If both are already in the same class, we need to make sure the relations // are compatible. - const AffineRelation::Relation rx = GetAffineRelation(ref_x); - const AffineRelation::Relation ry = GetAffineRelation(ref_y); + const AffineRelation::Relation rx = GetAffineRelation(var_x); + const AffineRelation::Relation ry = GetAffineRelation(var_y); if (rx.representative == ry.representative) { // x = rx.coeff * rep + rx.offset; // y = ry.coeff * rep + ry.offset; @@ -1138,18 +1127,18 @@ bool PresolveContext::StoreAffineRelation(int ref_x, int ref_y, int64_t coeff, if (!IntersectDomainWith(rx.representative, Domain(unique_value))) { return false; } - if (!IntersectDomainWith(ref_x, + if (!IntersectDomainWith(var_x, Domain(unique_value * rx.coeff + rx.offset))) { return false; } - if (!IntersectDomainWith(ref_y, + if (!IntersectDomainWith(var_y, Domain(unique_value * ry.coeff + ry.offset))) { return false; } return true; } - // ref_x = coeff * ref_y + offset; + // var_x = coeff * var_y + offset; // rx.coeff * rep_x + rx.offset = // coeff * (ry.coeff * rep_y + ry.offset) + offset // @@ -1179,7 +1168,7 @@ bool PresolveContext::StoreAffineRelation(int ref_x, int ref_y, int64_t coeff, } // Re-add the relation now that a will resolve to a multiple of b. - return StoreAffineRelation(ref_x, ref_y, coeff, offset, + return StoreAffineRelation(var_x, var_y, coeff, offset, /*debug_no_recursion=*/true); } @@ -1239,8 +1228,8 @@ bool PresolveContext::StoreAffineRelation(int ref_x, int ref_y, int64_t coeff, // as possible and not all call site do it. // // TODO(user): I am not sure this is needed given the propagation above. - if (!PropagateAffineRelation(ref_x)) return false; - if (!PropagateAffineRelation(ref_y)) return false; + if (!PropagateAffineRelation(var_x)) return false; + if (!PropagateAffineRelation(var_y)) return false; // These maps should only contains representative, so only need to remap // either x or y. diff --git a/ortools/sat/presolve_context.h b/ortools/sat/presolve_context.h index fc05904c09..7b11b6b66e 100644 --- a/ortools/sat/presolve_context.h +++ b/ortools/sat/presolve_context.h @@ -324,7 +324,7 @@ class PresolveContext { bool CanonicalizeAffineVariable(int ref, int64_t coeff, int64_t mod, int64_t rhs); - // Adds the relation (ref_x = coeff * ref_y + offset) to the repository. + // Adds the relation (var_x = coeff * var_y + offset) to the repository. // Returns false if we detect infeasability because of this. // // Once the relation is added, it doesn't need to be enforced by a constraint @@ -332,7 +332,7 @@ class PresolveContext { // them to the proto at the end of the presolve. // // Note that this should always add a relation, even though it might need to - // create a new representative for both ref_x and ref_y in some cases. Like if + // create a new representative for both var_x and var_y in some cases. Like if // x = 3z and y = 5t are already added, if we add x = 2y, we have 3z = 10t and // can only resolve this by creating a new variable r such that z = 10r and t // = 3r. @@ -340,7 +340,7 @@ class PresolveContext { // All involved variables will be marked to appear in the special // kAffineRelationConstraint. This will allow to identify when a variable is // no longer needed (only appear there and is not a representative). - bool StoreAffineRelation(int ref_x, int ref_y, int64_t coeff, int64_t offset, + bool StoreAffineRelation(int var_x, int var_y, int64_t coeff, int64_t offset, bool debug_no_recursion = false); // Adds the fact that ref_a == ref_b using StoreAffineRelation() above. @@ -362,8 +362,8 @@ class PresolveContext { // Makes sure the domain of ref and of its representative (ref = coeff * rep + // offset) are in sync. Returns false on unsat. - bool PropagateAffineRelation(int ref); - bool PropagateAffineRelation(int ref, int rep, int64_t coeff, int64_t offset); + bool PropagateAffineRelation(int var); + bool PropagateAffineRelation(int var, int rep, int64_t coeff, int64_t offset); // Creates the internal structure for any new variables in working_model. void InitializeNewDomains(); From 38bf143dae01dc64a7ceb3bbb1380e1bf472035d Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Wed, 9 Oct 2024 16:40:51 +0200 Subject: [PATCH 073/105] fix test for windows --- ortools/sat/work_assignment_test.cc | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/ortools/sat/work_assignment_test.cc b/ortools/sat/work_assignment_test.cc index 83de91dc08..8e3d785a21 100644 --- a/ortools/sat/work_assignment_test.cc +++ b/ortools/sat/work_assignment_test.cc @@ -146,9 +146,9 @@ class SharedTreeSolveTest : public testing::TestWithParam { params.set_num_workers(4); params.set_shared_tree_num_workers(4); params.set_cp_model_presolve(false); - params.MergeFrom(SatParameters{ - google::protobuf::contrib::parse_proto::ParseTextProtoOrDie( - GetParam())}); + const SatParameters to_merge = + google::protobuf::contrib::parse_proto::ParseTextProtoOrDie(GetParam()); + params.MergeFrom(to_merge); return params; } }; From fb359bac5eaf27fc5979367664fa30f0307d3065 Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Wed, 9 Oct 2024 16:32:31 +0200 Subject: [PATCH 074/105] cmake: Bump COIN-OR deps --- cmake/dependencies/CMakeLists.txt | 17 +++++++++++------ patches/cbc-2.10.patch | 6 +++--- patches/cgl-0.60.patch | 6 +++--- patches/{clp-1.17.4.patch => clp-1.17.patch} | 6 +++--- patches/coinutils-2.11.patch | 6 +++--- patches/osi-0.108.patch | 6 +++--- 6 files changed, 26 insertions(+), 21 deletions(-) rename patches/{clp-1.17.4.patch => clp-1.17.patch} (65%) diff --git a/cmake/dependencies/CMakeLists.txt b/cmake/dependencies/CMakeLists.txt index c86c82e53f..afed8d8b9d 100644 --- a/cmake/dependencies/CMakeLists.txt +++ b/cmake/dependencies/CMakeLists.txt @@ -300,7 +300,8 @@ if(BUILD_CoinUtils) FetchContent_Declare( CoinUtils GIT_REPOSITORY "https://github.com/Mizux/CoinUtils.git" - GIT_TAG "cmake/2.11.6" + GIT_TAG "stable/2.11" + #GIT_TAG "cmake/2.11.6" GIT_SHALLOW TRUE PATCH_COMMAND git apply --ignore-whitespace "${CMAKE_CURRENT_LIST_DIR}/../../patches/coinutils-2.11.patch") @@ -318,7 +319,8 @@ if(BUILD_Osi) FetchContent_Declare( Osi GIT_REPOSITORY "https://github.com/Mizux/Osi.git" - GIT_TAG "cmake/0.108.7" + GIT_TAG "stable/0.108" + #GIT_TAG "cmake/0.108.7" GIT_SHALLOW TRUE PATCH_COMMAND git apply --ignore-whitespace "${CMAKE_CURRENT_LIST_DIR}/../../patches/osi-0.108.patch") @@ -336,10 +338,11 @@ if(BUILD_Clp) FetchContent_Declare( Clp GIT_REPOSITORY "https://github.com/Mizux/Clp.git" - GIT_TAG "cmake/1.17.7" + GIT_TAG "stable/1.17" + #GIT_TAG "cmake/1.17.7" GIT_SHALLOW TRUE PATCH_COMMAND git apply --ignore-whitespace - "${CMAKE_CURRENT_LIST_DIR}/../../patches/clp-1.17.4.patch") + "${CMAKE_CURRENT_LIST_DIR}/../../patches/clp-1.17.patch") FetchContent_MakeAvailable(Clp) list(POP_BACK CMAKE_MESSAGE_INDENT) message(CHECK_PASS "fetched") @@ -354,7 +357,8 @@ if(BUILD_Cgl) FetchContent_Declare( Cgl GIT_REPOSITORY "https://github.com/Mizux/Cgl.git" - GIT_TAG "cmake/0.60.5" + GIT_TAG "stable/0.60" + #GIT_TAG "cmake/0.60.5" GIT_SHALLOW TRUE PATCH_COMMAND git apply --ignore-whitespace "${CMAKE_CURRENT_LIST_DIR}/../../patches/cgl-0.60.patch") @@ -372,7 +376,8 @@ if(BUILD_Cbc) FetchContent_Declare( Cbc GIT_REPOSITORY "https://github.com/Mizux/Cbc.git" - GIT_TAG "cmake/2.10.7" + GIT_TAG "stable/2.10" + #GIT_TAG "cmake/2.10.7" GIT_SHALLOW TRUE PATCH_COMMAND git apply --ignore-whitespace "${CMAKE_CURRENT_LIST_DIR}/../../patches/cbc-2.10.patch") diff --git a/patches/cbc-2.10.patch b/patches/cbc-2.10.patch index c189c9cf39..bc7a634b8f 100644 --- a/patches/cbc-2.10.patch +++ b/patches/cbc-2.10.patch @@ -1,12 +1,12 @@ diff --git a/CMakeLists.txt b/CMakeLists.txt -index 22b7a17f..0a5a18dd 100644 +index 6aa8dca..2fba8d5 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt -@@ -51,6 +51,7 @@ if(APPLE) +@@ -42,6 +42,7 @@ if(APPLE) CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-inconsistent-missing-override -Wno-unused-command-line-argument -Wno-unused-result -Wno-exceptions" ) + add_compile_options(-O1) - set(CMAKE_OSX_DEPLOYMENT_TARGET "10.9" CACHE STRING "Minimum OS X deployment version") + set(CMAKE_OSX_DEPLOYMENT_TARGET "10.15" CACHE STRING "Minimum OS X deployment version") endif(APPLE) diff --git a/patches/cgl-0.60.patch b/patches/cgl-0.60.patch index c3d8ddfa64..8d12c4e1a4 100644 --- a/patches/cgl-0.60.patch +++ b/patches/cgl-0.60.patch @@ -1,12 +1,12 @@ diff --git a/CMakeLists.txt b/CMakeLists.txt -index 4a70789..d19c2a9 100644 +index 7beea06..c0dfe10 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt -@@ -51,6 +51,7 @@ if(APPLE) +@@ -42,6 +42,7 @@ if(APPLE) CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-inconsistent-missing-override -Wno-unused-command-line-argument -Wno-unused-result -Wno-exceptions" ) + add_compile_options(-O1) - set(CMAKE_OSX_DEPLOYMENT_TARGET "10.9" CACHE STRING "Minimum OS X deployment version") + set(CMAKE_OSX_DEPLOYMENT_TARGET "10.15" CACHE STRING "Minimum OS X deployment version") endif(APPLE) diff --git a/patches/clp-1.17.4.patch b/patches/clp-1.17.patch similarity index 65% rename from patches/clp-1.17.4.patch rename to patches/clp-1.17.patch index 26d4c9dc85..2eb04aa2e8 100644 --- a/patches/clp-1.17.4.patch +++ b/patches/clp-1.17.patch @@ -1,12 +1,12 @@ diff --git a/CMakeLists.txt b/CMakeLists.txt -index e1d43115..6b2a17ad 100644 +index a029303..ecafdb3 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt -@@ -51,6 +51,7 @@ if(APPLE) +@@ -42,6 +42,7 @@ if(APPLE) CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-inconsistent-missing-override -Wno-unused-command-line-argument -Wno-unused-result -Wno-exceptions" ) + add_compile_options(-O1) - set(CMAKE_OSX_DEPLOYMENT_TARGET "10.9" CACHE STRING "Minimum OS X deployment version") + set(CMAKE_OSX_DEPLOYMENT_TARGET "10.15" CACHE STRING "Minimum OS X deployment version") endif(APPLE) diff --git a/patches/coinutils-2.11.patch b/patches/coinutils-2.11.patch index 01958318c0..157d127221 100644 --- a/patches/coinutils-2.11.patch +++ b/patches/coinutils-2.11.patch @@ -1,12 +1,12 @@ diff --git a/CMakeLists.txt b/CMakeLists.txt -index fcfa658..267ddd9 100644 +index cbd1e7f..458653f 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt -@@ -51,6 +51,7 @@ if(APPLE) +@@ -42,6 +42,7 @@ if(APPLE) CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-inconsistent-missing-override -Wno-unused-command-line-argument -Wno-unused-result -Wno-exceptions" ) + add_compile_options(-O1) - set(CMAKE_OSX_DEPLOYMENT_TARGET "10.9" CACHE STRING "Minimum OS X deployment version") + set(CMAKE_OSX_DEPLOYMENT_TARGET "10.15" CACHE STRING "Minimum OS X deployment version") endif(APPLE) diff --git a/patches/osi-0.108.patch b/patches/osi-0.108.patch index a282726abf..5ccd64f01e 100644 --- a/patches/osi-0.108.patch +++ b/patches/osi-0.108.patch @@ -1,12 +1,12 @@ diff --git a/CMakeLists.txt b/CMakeLists.txt -index 7b3cee2..2ac9c9d 100644 +index 273d523..40853ac 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt -@@ -51,6 +51,7 @@ if(APPLE) +@@ -42,6 +42,7 @@ if(APPLE) CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-inconsistent-missing-override -Wno-unused-command-line-argument -Wno-unused-result -Wno-exceptions" ) + add_compile_options(-O1) - set(CMAKE_OSX_DEPLOYMENT_TARGET "10.9" CACHE STRING "Minimum OS X deployment version") + set(CMAKE_OSX_DEPLOYMENT_TARGET "10.15" CACHE STRING "Minimum OS X deployment version") endif(APPLE) From eccff545c7fd76fc2b0888289aced5b54ec23b87 Mon Sep 17 00:00:00 2001 From: Mizux Seiha Date: Thu, 10 Oct 2024 10:53:06 +0200 Subject: [PATCH 075/105] cmake: Add rpath to re2 --- cmake/dependencies/CMakeLists.txt | 2 +- patches/re2-2024-04-01.patch | 18 ++++++++++++++++++ 2 files changed, 19 insertions(+), 1 deletion(-) create mode 100644 patches/re2-2024-04-01.patch diff --git a/cmake/dependencies/CMakeLists.txt b/cmake/dependencies/CMakeLists.txt index afed8d8b9d..61ac8c3c23 100644 --- a/cmake/dependencies/CMakeLists.txt +++ b/cmake/dependencies/CMakeLists.txt @@ -129,7 +129,7 @@ if(BUILD_re2) GIT_REPOSITORY "https://github.com/google/re2.git" GIT_TAG "2024-04-01" GIT_SHALLOW TRUE - #PATCH_COMMAND git apply --ignore-whitespace "${CMAKE_CURRENT_LIST_DIR}/../../patches/re2-2024-04-01.patch" + PATCH_COMMAND git apply --ignore-whitespace "${CMAKE_CURRENT_LIST_DIR}/../../patches/re2-2024-04-01.patch" ) FetchContent_MakeAvailable(re2) list(POP_BACK CMAKE_MESSAGE_INDENT) diff --git a/patches/re2-2024-04-01.patch b/patches/re2-2024-04-01.patch new file mode 100644 index 0000000000..73f73a69c1 --- /dev/null +++ b/patches/re2-2024-04-01.patch @@ -0,0 +1,18 @@ +diff --git a/CMakeLists.txt b/CMakeLists.txt +index bdac5af..cedaf6e 100644 +--- a/CMakeLists.txt ++++ b/CMakeLists.txt +@@ -131,6 +131,13 @@ set(RE2_HEADERS + + add_library(re2 ${RE2_SOURCES}) + target_compile_features(re2 PUBLIC cxx_std_14) ++if(APPLE) ++ set_target_properties(re2 PROPERTIES ++ INSTALL_RPATH "@loader_path") ++elseif(UNIX) ++ set_target_properties(re2 PROPERTIES ++ INSTALL_RPATH "$ORIGIN") ++endif() + target_include_directories(re2 PUBLIC $) + # CMake gives "set_target_properties called with incorrect number of arguments." + # errors if we don't quote ${RE2_HEADERS}, so quote it despite prevailing style. From 9c542ed12cc6ec562039bef00024b65264fd7d46 Mon Sep 17 00:00:00 2001 From: Mizux Seiha Date: Thu, 10 Oct 2024 12:51:37 +0200 Subject: [PATCH 076/105] cmake: Fix googletest rpath --- cmake/dependencies/CMakeLists.txt | 2 ++ patches/googletest-v1.15.2.patch | 18 ++++++++++++++++++ 2 files changed, 20 insertions(+) create mode 100644 patches/googletest-v1.15.2.patch diff --git a/cmake/dependencies/CMakeLists.txt b/cmake/dependencies/CMakeLists.txt index 61ac8c3c23..e434b6d141 100644 --- a/cmake/dependencies/CMakeLists.txt +++ b/cmake/dependencies/CMakeLists.txt @@ -403,6 +403,8 @@ if(BUILD_googletest) GIT_REPOSITORY https://github.com/google/googletest.git GIT_TAG v1.15.2 GIT_SHALLOW TRUE + PATCH_COMMAND git apply --ignore-whitespace + "${CMAKE_CURRENT_LIST_DIR}/../../patches/googletest-v1.15.2.patch" #PATCH_COMMAND git apply --ignore-whitespace "" ) set(gtest_force_shared_crt ON CACHE BOOL "" FORCE) diff --git a/patches/googletest-v1.15.2.patch b/patches/googletest-v1.15.2.patch new file mode 100644 index 0000000000..1d5da3c1f9 --- /dev/null +++ b/patches/googletest-v1.15.2.patch @@ -0,0 +1,18 @@ +diff --git a/googletest/cmake/internal_utils.cmake b/googletest/cmake/internal_utils.cmake +index 580ac1c..c6cc44c 100644 +--- a/googletest/cmake/internal_utils.cmake ++++ b/googletest/cmake/internal_utils.cmake +@@ -190,6 +190,13 @@ function(cxx_library_with_type name type cxx_flags) + COMPILE_DEFINITIONS "GTEST_CREATE_SHARED_LIBRARY=1") + target_compile_definitions(${name} INTERFACE + $) ++ if(APPLE) ++ set_target_properties(${name} PROPERTIES ++ INSTALL_RPATH "@loader_path") ++ elseif(UNIX) ++ set_target_properties(${name} PROPERTIES ++ INSTALL_RPATH "$ORIGIN") ++ endif() + endif() + if (DEFINED GTEST_HAS_PTHREAD) + target_link_libraries(${name} PUBLIC Threads::Threads) From e42e58b962d0f94bbd31762e107d39542e57d2de Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Fri, 11 Oct 2024 13:06:18 +0200 Subject: [PATCH 077/105] cmake: fix coinor patch --- patches/cbc-2.10.patch | 6 +++--- patches/cgl-0.60.patch | 6 +++--- patches/clp-1.17.patch | 6 +++--- patches/coinutils-2.11.patch | 6 +++--- patches/osi-0.108.patch | 6 +++--- 5 files changed, 15 insertions(+), 15 deletions(-) diff --git a/patches/cbc-2.10.patch b/patches/cbc-2.10.patch index bc7a634b8f..79830e9022 100644 --- a/patches/cbc-2.10.patch +++ b/patches/cbc-2.10.patch @@ -1,12 +1,12 @@ diff --git a/CMakeLists.txt b/CMakeLists.txt -index 6aa8dca..2fba8d5 100644 +index be94fca..1529011 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt -@@ -42,6 +42,7 @@ if(APPLE) +@@ -88,6 +88,7 @@ if(APPLE) CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-inconsistent-missing-override -Wno-unused-command-line-argument -Wno-unused-result -Wno-exceptions" ) + add_compile_options(-O1) set(CMAKE_OSX_DEPLOYMENT_TARGET "10.15" CACHE STRING "Minimum OS X deployment version") - endif(APPLE) + endif() diff --git a/patches/cgl-0.60.patch b/patches/cgl-0.60.patch index 8d12c4e1a4..411ee81d4d 100644 --- a/patches/cgl-0.60.patch +++ b/patches/cgl-0.60.patch @@ -1,12 +1,12 @@ diff --git a/CMakeLists.txt b/CMakeLists.txt -index 7beea06..c0dfe10 100644 +index 8c51561..f223f08 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt -@@ -42,6 +42,7 @@ if(APPLE) +@@ -88,6 +88,7 @@ if(APPLE) CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-inconsistent-missing-override -Wno-unused-command-line-argument -Wno-unused-result -Wno-exceptions" ) + add_compile_options(-O1) set(CMAKE_OSX_DEPLOYMENT_TARGET "10.15" CACHE STRING "Minimum OS X deployment version") - endif(APPLE) + endif() diff --git a/patches/clp-1.17.patch b/patches/clp-1.17.patch index 2eb04aa2e8..c97dce747c 100644 --- a/patches/clp-1.17.patch +++ b/patches/clp-1.17.patch @@ -1,12 +1,12 @@ diff --git a/CMakeLists.txt b/CMakeLists.txt -index a029303..ecafdb3 100644 +index bb95c63..7fde473 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt -@@ -42,6 +42,7 @@ if(APPLE) +@@ -88,6 +88,7 @@ if(APPLE) CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-inconsistent-missing-override -Wno-unused-command-line-argument -Wno-unused-result -Wno-exceptions" ) + add_compile_options(-O1) set(CMAKE_OSX_DEPLOYMENT_TARGET "10.15" CACHE STRING "Minimum OS X deployment version") - endif(APPLE) + endif() diff --git a/patches/coinutils-2.11.patch b/patches/coinutils-2.11.patch index 157d127221..9eb47c863e 100644 --- a/patches/coinutils-2.11.patch +++ b/patches/coinutils-2.11.patch @@ -1,12 +1,12 @@ diff --git a/CMakeLists.txt b/CMakeLists.txt -index cbd1e7f..458653f 100644 +index 3fc9cff..b2423fe 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt -@@ -42,6 +42,7 @@ if(APPLE) +@@ -88,6 +88,7 @@ if(APPLE) CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-inconsistent-missing-override -Wno-unused-command-line-argument -Wno-unused-result -Wno-exceptions" ) + add_compile_options(-O1) set(CMAKE_OSX_DEPLOYMENT_TARGET "10.15" CACHE STRING "Minimum OS X deployment version") - endif(APPLE) + endif() diff --git a/patches/osi-0.108.patch b/patches/osi-0.108.patch index 5ccd64f01e..367f1d30fc 100644 --- a/patches/osi-0.108.patch +++ b/patches/osi-0.108.patch @@ -1,12 +1,12 @@ diff --git a/CMakeLists.txt b/CMakeLists.txt -index 273d523..40853ac 100644 +index bc22fbd..1c2a604 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt -@@ -42,6 +42,7 @@ if(APPLE) +@@ -88,6 +88,7 @@ if(APPLE) CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-inconsistent-missing-override -Wno-unused-command-line-argument -Wno-unused-result -Wno-exceptions" ) + add_compile_options(-O1) set(CMAKE_OSX_DEPLOYMENT_TARGET "10.15" CACHE STRING "Minimum OS X deployment version") - endif(APPLE) + endif() From 001c515dde260d5fcc873e40eca70a2a04752b27 Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Fri, 11 Oct 2024 13:08:26 +0200 Subject: [PATCH 078/105] cmake: Fixing python wheel on linux --- cmake/python.cmake | 116 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 116 insertions(+) diff --git a/cmake/python.cmake b/cmake/python.cmake index 123468cf62..0d355885cf 100644 --- a/cmake/python.cmake +++ b/cmake/python.cmake @@ -435,6 +435,122 @@ add_custom_command( COMMAND ${CMAKE_COMMAND} -E remove -f ortools_timestamp COMMAND ${CMAKE_COMMAND} -E make_directory ${PYTHON_PROJECT}/.libs # Don't need to copy static lib on Windows. + COMMAND ${CMAKE_COMMAND} -E + $,copy,true> + # ortools direct deps + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + ${PYTHON_PROJECT}/.libs + COMMAND ${CMAKE_COMMAND} -E + $,copy,true> + $ + ${PYTHON_PROJECT}/.libs + COMMAND ${CMAKE_COMMAND} -E + $,copy,true> + $ + ${PYTHON_PROJECT}/.libs + + COMMAND ${CMAKE_COMMAND} -E + $,copy,true> + $ + ${PYTHON_PROJECT}/.libs + COMMAND ${CMAKE_COMMAND} -E + $,copy,true> + $ + ${PYTHON_PROJECT}/.libs + COMMAND ${CMAKE_COMMAND} -E + $,copy,true> + $ + #$ + #$ + ${PYTHON_PROJECT}/.libs + COMMAND ${CMAKE_COMMAND} -E + $,copy,true> + $ + ${PYTHON_PROJECT}/.libs + COMMAND ${CMAKE_COMMAND} -E + $,copy,true> + $ + #$ + $ + ${PYTHON_PROJECT}/.libs + + COMMAND ${CMAKE_COMMAND} -E + $,copy,true> + $ + ${PYTHON_PROJECT}/.libs + COMMAND ${CMAKE_COMMAND} -E $,SHARED_LIBRARY>,copy,true> $<$,SHARED_LIBRARY>:$> From bbf7fbd36aa7d1e8e826d685d5ef8a1dfc146de2 Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Fri, 11 Oct 2024 14:08:00 +0200 Subject: [PATCH 079/105] cmake: rework deps management --- cmake/check_deps.cmake | 28 ++++++++++++++++++++++++++-- cmake/cpp.cmake | 8 ++++---- 2 files changed, 30 insertions(+), 6 deletions(-) diff --git a/cmake/check_deps.cmake b/cmake/check_deps.cmake index 36736909ee..0b565d5b52 100644 --- a/cmake/check_deps.cmake +++ b/cmake/check_deps.cmake @@ -79,12 +79,36 @@ if(USE_COINOR) set(COINOR_DEPS Coin::CbcSolver Coin::OsiCbc Coin::ClpSolver Coin::OsiClp) endif() +if(USE_CPLEX) + if(NOT TARGET CPLEX::CPLEX) + message(FATAL_ERROR "Target CPLEX::CPLEX not available.") + endif() + set(CPLEX_DEPS CPLEX::CPLEX) +endif() + +if(USE_GLPK) + if(NOT TARGET GLPK::GLPK) + message(FATAL_ERROR "Target GLPK::GLPK not available.") + endif() + set(GLPK_DEPS GLPK::GLPK) +endif() + +if(USE_HIGHS) + if(NOT TARGET highs::highs) + message(FATAL_ERROR "Target highs::highs not available.") + endif() + set(HIGHS_DEPS highs::highs) +endif() + if(USE_PDLP AND BUILD_PDLP) set(PDLP_DEPS Eigen3::Eigen) endif() -if(USE_SCIP AND NOT TARGET libscip) - message(FATAL_ERROR "Target libscip not available.") +if(USE_SCIP) + if(NOT TARGET libscip) + message(FATAL_ERROR "Target libscip not available.") + endif() + set(SCIP_DEPS libscip) endif() # Check optional Dependencies diff --git a/cmake/cpp.cmake b/cmake/cpp.cmake index e384f314ba..89ae76355a 100644 --- a/cmake/cpp.cmake +++ b/cmake/cpp.cmake @@ -558,11 +558,11 @@ target_link_libraries(${PROJECT_NAME} PUBLIC protobuf::libprotobuf ${RE2_DEPS} ${COINOR_DEPS} - $<$:CPLEX::CPLEX> - $<$:GLPK::GLPK> - $<$:highs::highs> + ${CPLEX_DEPS} + ${GLPK_DEPS} + ${HIGHS_DEPS} ${PDLP_DEPS} - $<$:libscip> + ${SCIP_DEPS} Threads::Threads) if(WIN32) target_link_libraries(${PROJECT_NAME} PUBLIC psapi.lib ws2_32.lib) From d3178b3e57395548ffc42b5dff84a95dd748f29c Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Fri, 11 Oct 2024 14:35:04 +0200 Subject: [PATCH 080/105] python.cmake: cleanup math_opt --- cmake/python.cmake | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/cmake/python.cmake b/cmake/python.cmake index 0d355885cf..4bc52bf1c7 100644 --- a/cmake/python.cmake +++ b/cmake/python.cmake @@ -587,9 +587,11 @@ add_custom_command( $ ${PYTHON_PROJECT}/linear_solver COMMAND ${CMAKE_COMMAND} -E copy $ ${PYTHON_PROJECT}/linear_solver/python - COMMAND ${CMAKE_COMMAND} -E copy + COMMAND ${CMAKE_COMMAND} -E + $,copy,true> $ ${PYTHON_PROJECT}/math_opt/core/python - COMMAND ${CMAKE_COMMAND} -E copy + COMMAND ${CMAKE_COMMAND} -E + $,copy,true> $ ${PYTHON_PROJECT}/../pybind11_abseil COMMAND ${CMAKE_COMMAND} -E $,copy,true> From b505af98e4788c74c5fcd5b49414ca37051e0a3d Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Fri, 11 Oct 2024 14:41:28 +0200 Subject: [PATCH 081/105] cmake: fix pybind11_abseil patch --- patches/pybind11_abseil.patch | 27 +++++++++++++++++---------- 1 file changed, 17 insertions(+), 10 deletions(-) diff --git a/patches/pybind11_abseil.patch b/patches/pybind11_abseil.patch index 96d8d73337..69f1608f12 100644 --- a/patches/pybind11_abseil.patch +++ b/patches/pybind11_abseil.patch @@ -55,7 +55,7 @@ index ceb65a8..e142837 100644 include_directories(${TOP_LEVEL_DIR} ${pybind11_INCLUDE_DIRS}) diff --git a/cmake/dependencies/CMakeLists.txt b/cmake/dependencies/CMakeLists.txt new file mode 100644 -index 0000000..b67d564 +index 0000000..cb13e7e --- /dev/null +++ b/cmake/dependencies/CMakeLists.txt @@ -0,0 +1,19 @@ @@ -158,7 +158,7 @@ index 791c245..33e614a 100644 ) diff --git a/pybind11_abseil/CMakeLists.txt b/pybind11_abseil/CMakeLists.txt -index d1b7483..ce7fd72 100644 +index d1b7483..74e3443 100644 --- a/pybind11_abseil/CMakeLists.txt +++ b/pybind11_abseil/CMakeLists.txt @@ -42,14 +42,19 @@ target_link_libraries(ok_status_singleton_pyinit_google3 @@ -184,7 +184,7 @@ index d1b7483..ce7fd72 100644 target_link_libraries(ok_status_singleton PUBLIC ok_status_singleton_pyinit_google3) -@@ -150,14 +155,23 @@ target_link_libraries(status_pyinit_google3 PUBLIC register_status_bindings) +@@ -150,14 +155,30 @@ target_link_libraries(status_pyinit_google3 PUBLIC register_status_bindings) # status ==================================================================== @@ -195,25 +195,32 @@ index d1b7483..ce7fd72 100644 +set_target_properties(status_py_extension_stub PROPERTIES LIBRARY_OUTPUT_NAME "status") +# note: macOS is APPLE and also UNIX ! +if(APPLE) -+ set_target_properties(status_py_extension_stub PROPERTIES SUFFIX ".so") ++ set_target_properties(status_py_extension_stub PROPERTIES ++ SUFFIX ".so" ++ INSTALL_RPATH "@loader_path;@loader_path/../ortools/.libs" ++ ) + set_property(TARGET status_py_extension_stub APPEND PROPERTY + LINK_FLAGS "-flat_namespace -undefined suppress") ++elseif(UNIX) ++ set_target_properties(status_py_extension_stub PROPERTIES ++ INSTALL_RPATH "$ORIGIN:$ORIGIN/../ortools/.libs" ++ ) +endif() -+ -+add_library(pybind11_abseil::status ALIAS status_py_extension_stub) -target_include_directories(status INTERFACE $) -+target_include_directories(status_py_extension_stub INTERFACE $) ++add_library(pybind11_abseil::status ALIAS status_py_extension_stub) -set_target_properties(status PROPERTIES PREFIX "") -+set_target_properties(status_py_extension_stub PROPERTIES PREFIX "") ++target_include_directories(status_py_extension_stub INTERFACE $) -target_link_libraries(status PUBLIC status_pyinit_google3 absl::status) ++set_target_properties(status_py_extension_stub PROPERTIES PREFIX "") ++ +target_link_libraries(status_py_extension_stub PUBLIC status_pyinit_google3 absl::status) # import_status_module ========================================================= -@@ -167,7 +181,7 @@ add_library(pybind11_abseil::import_status_module ALIAS import_status_module) +@@ -167,7 +188,7 @@ add_library(pybind11_abseil::import_status_module ALIAS import_status_module) target_include_directories(import_status_module INTERFACE $) @@ -222,7 +229,7 @@ index d1b7483..ce7fd72 100644 # status_casters =============================================================== -@@ -175,25 +189,27 @@ add_library(status_casters INTERFACE) +@@ -175,25 +196,27 @@ add_library(status_casters INTERFACE) add_library(pybind11_abseil::status_casters ALIAS status_casters) target_include_directories(status_casters From 4adcb97dd593720ff51e43a8d4f44c5ef87030f5 Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Fri, 11 Oct 2024 15:37:21 +0200 Subject: [PATCH 082/105] cmake: generate compile_commands.json for lsp tools --- CMakeLists.txt | 3 +++ 1 file changed, 3 insertions(+) diff --git a/CMakeLists.txt b/CMakeLists.txt index 7e063f8f18..df74bc7b50 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -15,6 +15,9 @@ cmake_minimum_required(VERSION 3.20) list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake") +# Enable output of compile commands during generation. +option(CMAKE_EXPORT_COMPILE_COMMANDS "Export compile command" ON) + include(utils) set_version(VERSION) From 06d65a7575c534bacb6dfeb136cf3463cd9f6932 Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Sun, 13 Oct 2024 18:30:24 +0200 Subject: [PATCH 083/105] [FZ] revamp support for element and element2d constraints --- cmake/flatzinc.cmake | 2 - ortools/flatzinc/BUILD.bazel | 16 - ortools/flatzinc/checker.cc | 119 +++-- ortools/flatzinc/cp_model_fz_solver.cc | 142 +++-- ortools/flatzinc/fz.cc | 11 - .../flatzinc/mznlib/redefinitions-2.0.2.mzn | 26 + .../flatzinc/mznlib/redefinitions-2.5.2.mzn | 49 ++ ortools/flatzinc/parser_main.cc | 14 +- ortools/flatzinc/presolve.cc | 495 ------------------ ortools/flatzinc/presolve.h | 115 ---- 10 files changed, 258 insertions(+), 731 deletions(-) create mode 100644 ortools/flatzinc/mznlib/redefinitions-2.0.2.mzn create mode 100644 ortools/flatzinc/mznlib/redefinitions-2.5.2.mzn delete mode 100644 ortools/flatzinc/presolve.cc delete mode 100644 ortools/flatzinc/presolve.h diff --git a/cmake/flatzinc.cmake b/cmake/flatzinc.cmake index 55304f8935..6a74b799e8 100644 --- a/cmake/flatzinc.cmake +++ b/cmake/flatzinc.cmake @@ -70,8 +70,6 @@ add_library(flatzinc ortools/flatzinc/parser.yy.cc #ortools/flatzinc/parser_util.cc # Already #include in parser.tab.cc ortools/flatzinc/parser_util.h - ortools/flatzinc/presolve.cc - ortools/flatzinc/presolve.h ) ## Includes target_include_directories(flatzinc PUBLIC diff --git a/ortools/flatzinc/BUILD.bazel b/ortools/flatzinc/BUILD.bazel index d3e8b22116..112bbd75bd 100644 --- a/ortools/flatzinc/BUILD.bazel +++ b/ortools/flatzinc/BUILD.bazel @@ -109,21 +109,6 @@ cc_library( ], ) -cc_library( - name = "presolve", - srcs = ["presolve.cc"], - hdrs = ["presolve.h"], - deps = [ - ":model", - "//ortools/base", - "//ortools/base:hash", - "//ortools/graph:cliques", - "//ortools/util:logging", - "//ortools/util:saturated_arithmetic", - "@com_google_absl//absl/strings", - ], -) - cc_library( name = "checker", srcs = ["checker.cc"], @@ -172,7 +157,6 @@ cc_binary( ":cp_model_fz_solver", ":model", ":parser_lib", - ":presolve", "//ortools/base", "//ortools/base:path", "//ortools/base:threadpool", diff --git a/ortools/flatzinc/checker.cc b/ortools/flatzinc/checker.cc index e21375ffe2..6b179c311b 100644 --- a/ortools/flatzinc/checker.cc +++ b/ortools/flatzinc/checker.cc @@ -183,6 +183,27 @@ bool CheckArrayVarIntElement( return element == target; } +bool CheckOrtoolsArrayIntElement( + const Constraint& ct, const std::function& evaluator) { + const int64_t min_index = ct.arguments[1].values[0]; + const int64_t index = Eval(ct.arguments[0], evaluator) - min_index; + const int64_t element = EvalAt(ct.arguments[2], index, evaluator); + const int64_t target = Eval(ct.arguments[3], evaluator); + return element == target; +} + +bool CheckOrtoolsArrayIntElement2d( + const Constraint& ct, const std::function& evaluator) { + const int64_t min_index0 = ct.arguments[2].values[0]; + const int64_t min_index1 = ct.arguments[3].values[0]; + const int64_t span1 = ct.arguments[3].values[1] - min_index1 + 1; + const int64_t index0 = Eval(ct.arguments[0], evaluator) - min_index0; + const int64_t index1 = Eval(ct.arguments[1], evaluator) - min_index1; + const int64_t element = + EvalAt(ct.arguments[4], index0 * span1 + index1, evaluator); + const int64_t target = Eval(ct.arguments[5], evaluator); + return element == target; +} bool CheckAtMostInt(const Constraint& ct, const std::function& evaluator) { const int64_t expected = Eval(ct.arguments[0], evaluator); @@ -1184,7 +1205,6 @@ using CallMap = // They are created at compilation time when using the or-tools mzn library. CallMap CreateCallMap() { CallMap m; - m["fzn_all_different_int"] = CheckAllDifferentInt; m["alldifferent_except_0"] = CheckAlldifferentExcept0; m["among"] = CheckAmong; m["array_bool_and"] = CheckArrayBoolAnd; @@ -1193,130 +1213,137 @@ CallMap CreateCallMap() { m["array_bool_xor"] = CheckArrayBoolXor; m["array_int_element"] = CheckArrayIntElement; m["array_int_element_nonshifted"] = CheckArrayIntElementNonShifted; + m["array_int_maximum"] = CheckMaximumInt; + m["array_int_minimum"] = CheckMinimumInt; m["array_var_bool_element"] = CheckArrayVarIntElement; m["array_var_int_element"] = CheckArrayVarIntElement; m["at_most_int"] = CheckAtMostInt; m["bool_and"] = CheckBoolAnd; m["bool_clause"] = CheckBoolClause; - m["bool_eq"] = CheckIntEq; - m["bool2int"] = CheckIntEq; m["bool_eq_imp"] = CheckIntEqImp; m["bool_eq_reif"] = CheckIntEqReif; - m["bool_ge"] = CheckIntGe; + m["bool_eq"] = CheckIntEq; m["bool_ge_imp"] = CheckIntGeImp; m["bool_ge_reif"] = CheckIntGeReif; - m["bool_gt"] = CheckIntGt; + m["bool_ge"] = CheckIntGe; m["bool_gt_imp"] = CheckIntGtImp; m["bool_gt_reif"] = CheckIntGtReif; - m["bool_le"] = CheckIntLe; + m["bool_gt"] = CheckIntGt; m["bool_le_imp"] = CheckIntLeImp; m["bool_le_reif"] = CheckIntLeReif; + m["bool_le"] = CheckIntLe; m["bool_left_imp"] = CheckIntLe; m["bool_lin_eq"] = CheckIntLinEq; m["bool_lin_le"] = CheckIntLinLe; - m["bool_lt"] = CheckIntLt; m["bool_lt_imp"] = CheckIntLtImp; m["bool_lt_reif"] = CheckIntLtReif; - m["bool_ne"] = CheckIntNe; + m["bool_lt"] = CheckIntLt; m["bool_ne_imp"] = CheckIntNeImp; m["bool_ne_reif"] = CheckIntNeReif; + m["bool_ne"] = CheckIntNe; m["bool_not"] = CheckBoolNot; m["bool_or"] = CheckBoolOr; m["bool_right_imp"] = CheckIntGe; m["bool_xor"] = CheckBoolXor; - m["ortools_circuit"] = CheckCircuit; + m["bool2int"] = CheckIntEq; m["count_eq"] = CheckCountEq; - m["count"] = CheckCountEq; m["count_geq"] = CheckCountGeq; m["count_gt"] = CheckCountGt; m["count_leq"] = CheckCountLeq; m["count_lt"] = CheckCountLt; m["count_neq"] = CheckCountNeq; m["count_reif"] = CheckCountReif; - m["fzn_cumulative"] = CheckCumulative; - m["var_cumulative"] = CheckCumulative; - m["variable_cumulative"] = CheckCumulative; - m["fixed_cumulative"] = CheckCumulative; - m["ortools_cumulative_opt"] = CheckCumulativeOpt; - m["fzn_diffn"] = CheckDiffn; + m["count"] = CheckCountEq; m["diffn_k_with_sizes"] = CheckDiffnK; - m["fzn_diffn_nonstrict"] = CheckDiffnNonStrict; m["diffn_nonstrict_k_with_sizes"] = CheckDiffnNonStrictK; - m["fzn_disjunctive"] = CheckDisjunctive; - m["fzn_disjunctive_strict"] = CheckDisjunctiveStrict; - m["ortools_disjunctive_strict_opt"] = CheckDisjunctiveStrictOpt; m["false_constraint"] = CheckFalseConstraint; - m["global_cardinality"] = CheckGlobalCardinality; + m["fixed_cumulative"] = CheckCumulative; + m["fzn_all_different_int"] = CheckAllDifferentInt; + m["fzn_cumulative"] = CheckCumulative; + m["fzn_diffn_nonstrict"] = CheckDiffnNonStrict; + m["fzn_diffn"] = CheckDiffn; + m["fzn_disjunctive_strict"] = CheckDisjunctiveStrict; + m["fzn_disjunctive"] = CheckDisjunctive; m["global_cardinality_closed"] = CheckGlobalCardinalityClosed; - m["global_cardinality_low_up"] = CheckGlobalCardinalityLowUp; m["global_cardinality_low_up_closed"] = CheckGlobalCardinalityLowUpClosed; + m["global_cardinality_low_up"] = CheckGlobalCardinalityLowUp; m["global_cardinality_old"] = CheckGlobalCardinalityOld; + m["global_cardinality"] = CheckGlobalCardinality; m["int_abs"] = CheckIntAbs; m["int_div"] = CheckIntDiv; - m["int_eq"] = CheckIntEq; m["int_eq_imp"] = CheckIntEqImp; m["int_eq_reif"] = CheckIntEqReif; - m["int_ge"] = CheckIntGe; + m["int_eq"] = CheckIntEq; m["int_ge_imp"] = CheckIntGeImp; m["int_ge_reif"] = CheckIntGeReif; - m["int_gt"] = CheckIntGt; + m["int_ge"] = CheckIntGe; m["int_gt_imp"] = CheckIntGtImp; m["int_gt_reif"] = CheckIntGtReif; - m["int_le"] = CheckIntLe; + m["int_gt"] = CheckIntGt; + m["int_in"] = CheckSetIn; m["int_le_imp"] = CheckIntLeImp; m["int_le_reif"] = CheckIntLeReif; - m["int_lin_eq"] = CheckIntLinEq; + m["int_le"] = CheckIntLe; m["int_lin_eq_imp"] = CheckIntLinEqImp; m["int_lin_eq_reif"] = CheckIntLinEqReif; - m["int_lin_ge"] = CheckIntLinGe; + m["int_lin_eq"] = CheckIntLinEq; m["int_lin_ge_imp"] = CheckIntLinGeImp; m["int_lin_ge_reif"] = CheckIntLinGeReif; - m["int_lin_le"] = CheckIntLinLe; + m["int_lin_ge"] = CheckIntLinGe; m["int_lin_le_imp"] = CheckIntLinLeImp; m["int_lin_le_reif"] = CheckIntLinLeReif; - m["int_lin_ne"] = CheckIntLinNe; + m["int_lin_le"] = CheckIntLinLe; m["int_lin_ne_imp"] = CheckIntLinNeImp; m["int_lin_ne_reif"] = CheckIntLinNeReif; - m["int_lt"] = CheckIntLt; + m["int_lin_ne"] = CheckIntLinNe; m["int_lt_imp"] = CheckIntLtImp; m["int_lt_reif"] = CheckIntLtReif; + m["int_lt"] = CheckIntLt; m["int_max"] = CheckIntMax; m["int_min"] = CheckIntMin; m["int_minus"] = CheckIntMinus; m["int_mod"] = CheckIntMod; - m["int_ne"] = CheckIntNe; m["int_ne_imp"] = CheckIntNeImp; m["int_ne_reif"] = CheckIntNeReif; + m["int_ne"] = CheckIntNe; m["int_negate"] = CheckIntNegate; + m["int_not_in"] = CheckSetNotIn; m["int_plus"] = CheckIntPlus; m["int_times"] = CheckIntTimes; - m["ortools_inverse"] = CheckInverse; m["lex_less_bool"] = CheckLexLessInt; m["lex_less_int"] = CheckLexLessInt; m["lex_lesseq_bool"] = CheckLexLesseqInt; m["lex_lesseq_int"] = CheckLexLesseqInt; m["maximum_arg_int"] = CheckMaximumArgInt; m["maximum_int"] = CheckMaximumInt; - m["array_int_maximum"] = CheckMaximumInt; m["minimum_arg_int"] = CheckMinimumArgInt; m["minimum_int"] = CheckMinimumInt; - m["array_int_minimum"] = CheckMinimumInt; - m["ortools_network_flow"] = CheckNetworkFlow; - m["ortools_network_flow_cost"] = CheckNetworkFlowCost; m["nvalue"] = CheckNvalue; + m["ortools_array_bool_element"] = CheckOrtoolsArrayIntElement; + m["ortools_array_int_element"] = CheckOrtoolsArrayIntElement; + m["ortools_array_var_bool_element"] = CheckOrtoolsArrayIntElement; + m["ortools_array_var_bool_element2d"] = CheckOrtoolsArrayIntElement2d; + m["ortools_array_var_int_element"] = CheckOrtoolsArrayIntElement; + m["ortools_array_var_int_element2d"] = CheckOrtoolsArrayIntElement2d; + m["ortools_circuit"] = CheckCircuit; + m["ortools_cumulative_opt"] = CheckCumulativeOpt; + m["ortools_disjunctive_strict_opt"] = CheckDisjunctiveStrictOpt; + m["ortools_inverse"] = CheckInverse; + m["ortools_network_flow_cost"] = CheckNetworkFlowCost; + m["ortools_network_flow"] = CheckNetworkFlow; m["ortools_regular"] = CheckRegular; - m["regular_nfa"] = CheckRegularNfa; - m["set_in"] = CheckSetIn; - m["int_in"] = CheckSetIn; - m["set_not_in"] = CheckSetNotIn; - m["int_not_in"] = CheckSetNotIn; - m["set_in_reif"] = CheckSetInReif; - m["sliding_sum"] = CheckSlidingSum; - m["sort"] = CheckSort; m["ortools_subcircuit"] = CheckSubCircuit; - m["symmetric_all_different"] = CheckSymmetricAllDifferent; m["ortools_table_bool"] = CheckTableInt; m["ortools_table_int"] = CheckTableInt; + m["regular_nfa"] = CheckRegularNfa; + m["set_in_reif"] = CheckSetInReif; + m["set_in"] = CheckSetIn; + m["set_not_in"] = CheckSetNotIn; + m["sliding_sum"] = CheckSlidingSum; + m["sort"] = CheckSort; + m["symmetric_all_different"] = CheckSymmetricAllDifferent; + m["var_cumulative"] = CheckCumulative; + m["variable_cumulative"] = CheckCumulative; return m; } diff --git a/ortools/flatzinc/cp_model_fz_solver.cc b/ortools/flatzinc/cp_model_fz_solver.cc index d343d1b4b7..25438654a6 100644 --- a/ortools/flatzinc/cp_model_fz_solver.cc +++ b/ortools/flatzinc/cp_model_fz_solver.cc @@ -19,6 +19,7 @@ #include #include #include +#include #include #include "absl/container/flat_hash_map.h" @@ -76,6 +77,9 @@ struct CpModelProtoWithMapping { std::vector LookupVars(const fz::Argument& argument); std::vector LookupVarsOrValues(const fz::Argument& argument); + // Encoding literals. + int GetOrCreateVarEqValueLiteral(int var, int64_t value); + // Create and return the indices of the IntervalConstraint corresponding // to the flatzinc "interval" specified by a start var and a size var. // This method will cache intervals with the key . @@ -134,6 +138,7 @@ struct CpModelProtoWithMapping { absl::flat_hash_map, int> interval_key_to_index; absl::flat_hash_map var_to_lit_implies_greater_than_zero; + absl::flat_hash_map, int> value_encoding_literals; }; int CpModelProtoWithMapping::LookupConstant(int64_t value) { @@ -227,6 +232,34 @@ std::vector CpModelProtoWithMapping::LookupVarsOrValues( return result; } +int CpModelProtoWithMapping::GetOrCreateVarEqValueLiteral(int var, + int64_t value) { + const std::pair key = {var, value}; + const auto it = value_encoding_literals.find(key); + if (it != value_encoding_literals.end()) { + return it->second; + } + const int result = proto.variables_size(); + IntegerVariableProto* var_proto = proto.add_variables(); + var_proto->add_domain(0); + var_proto->add_domain(1); + value_encoding_literals[key] = result; + + ConstraintProto* pos_enforcement = AddEnforcedConstraint(result); + pos_enforcement->mutable_linear()->add_vars(var); + pos_enforcement->mutable_linear()->add_coeffs(1); + pos_enforcement->mutable_linear()->add_domain(value); + pos_enforcement->mutable_linear()->add_domain(value); + + ConstraintProto* neg_enforcement = AddEnforcedConstraint(NegatedRef(result)); + neg_enforcement->mutable_linear()->add_vars(var); + neg_enforcement->mutable_linear()->add_coeffs(1); + const Domain complement = Domain(value).Complement().IntersectionWith( + ReadDomainFromProto(proto.variables(var))); + FillDomainInProto(complement, neg_enforcement->mutable_linear()); + return result; +} + ConstraintProto* CpModelProtoWithMapping::AddEnforcedConstraint(int literal) { ConstraintProto* result = proto.add_constraints(); if (literal != kNoVar) { @@ -634,46 +667,87 @@ void CpModelProtoWithMapping::FillConstraint(const fz::Constraint& fz_ct, fz_ct.type == "array_var_int_element" || fz_ct.type == "array_var_bool_element" || fz_ct.type == "array_int_element_nonshifted") { + // Compatibility with the old format. + CHECK(fz_ct.arguments[0].type == fz::Argument::VAR_REF || + fz_ct.arguments[0].type == fz::Argument::INT_VALUE); + auto* arg = ct->mutable_element(); + arg->set_index(LookupVar(fz_ct.arguments[0])); + arg->set_target(LookupVar(fz_ct.arguments[2])); + + if (!absl::EndsWith(fz_ct.type, "_nonshifted")) { + // Add a dummy variable at position zero because flatzinc index start + // at 1. + // TODO(user): Make sure that zero is not in the index domain... + arg->add_vars(LookupConstant(0)); + } + for (const int var : LookupVars(fz_ct.arguments[1])) arg->add_vars(var); + } else if (fz_ct.type == "ortools_array_int_element" || + fz_ct.type == "ortools_array_bool_element" || + fz_ct.type == "ortools_array_var_int_element" || + fz_ct.type == "ortools_array_var_bool_element") { if (fz_ct.arguments[0].type == fz::Argument::VAR_REF || fz_ct.arguments[0].type == fz::Argument::INT_VALUE) { auto* arg = ct->mutable_element(); arg->set_index(LookupVar(fz_ct.arguments[0])); - arg->set_target(LookupVar(fz_ct.arguments[2])); - - if (!absl::EndsWith(fz_ct.type, "_nonshifted")) { - // Add a dummy variable at position zero because flatzinc index start - // at 1. - // TODO(user): Make sure that zero is not in the index domain... - arg->add_vars(LookupConstant(0)); - } - for (const int var : LookupVars(fz_ct.arguments[1])) arg->add_vars(var); - } else { - // Special case added by the presolve or in flatzinc. We encode this - // as a table constraint. - CHECK(!absl::EndsWith(fz_ct.type, "_nonshifted")); - auto* arg = ct->mutable_table(); - - // the constraint is: - // values[coeff1 * vars[0] + coeff2 * vars[1] + offset] == target. - for (const int var : LookupVars(fz_ct.arguments[0])) arg->add_vars(var); - arg->add_vars(LookupVar(fz_ct.arguments[2])); // the target - - const std::vector& values = fz_ct.arguments[1].values; - const int64_t coeff1 = fz_ct.arguments[3].values[0]; - const int64_t coeff2 = fz_ct.arguments[3].values[1]; - const int64_t offset = fz_ct.arguments[4].values[0] - 1; - - for (const int64_t a : AllValuesInDomain(proto.variables(arg->vars(0)))) { - for (const int64_t b : - AllValuesInDomain(proto.variables(arg->vars(1)))) { - const int index = coeff1 * a + coeff2 * b + offset; - CHECK_GE(index, 0); - CHECK_LT(index, values.size()); - arg->add_values(a); - arg->add_values(b); - arg->add_values(values[index]); + arg->set_target(LookupVar(fz_ct.arguments[3])); + CHECK_EQ(fz_ct.arguments[1].type, fz::Argument::INT_INTERVAL); + const int64_t min_index = fz_ct.arguments[1].values.front(); + if (min_index > 0) { + const int zero_cst = LookupConstant(0); + for (int i = 0; i < min_index; ++i) { + arg->add_vars(zero_cst); } } + for (const int var : LookupVars(fz_ct.arguments[2])) arg->add_vars(var); + } + } else if (fz_ct.type == "ortools_array_var_int_element2d" || + fz_ct.type == "ortools_array_var_bool_element2d") { + const int index1 = LookupVar(fz_ct.arguments[0]); + const int index2 = LookupVar(fz_ct.arguments[1]); + const int target = LookupVar(fz_ct.arguments[5]); + + CHECK_EQ(fz_ct.arguments[2].type, fz::Argument::INT_INTERVAL); + CHECK_EQ(fz_ct.arguments[3].type, fz::Argument::INT_INTERVAL); + const int64_t min_1 = fz_ct.arguments[2].values[0]; + const int64_t max_1 = fz_ct.arguments[2].values[1]; + const int64_t min_2 = fz_ct.arguments[3].values[0]; + const int64_t max_2 = fz_ct.arguments[3].values[1]; + + if (fz_ct.arguments[4].type == fz::Argument::INT_LIST) { + // If the array is constant, we encode this as a table constraint. + auto* arg = ct->mutable_table(); + arg->add_vars(index1); + arg->add_vars(index2); + arg->add_vars(target); + + int i = 0; + for (int64_t val_1 = min_1; val_1 <= max_1; ++val_1) { + for (int64_t val_2 = min_2; val_2 <= max_2; ++val_2) { + arg->add_values(val_1); + arg->add_values(val_2); + arg->add_values(fz_ct.arguments[4].ValueAt(i++)); + } + } + CHECK_EQ(i, fz_ct.arguments[4].Size()); + } else { + std::vector elems = LookupVars(fz_ct.arguments[4]); + int i = 0; + for (int64_t val_1 = min_1; val_1 <= max_1; ++val_1) { + const int lit1 = GetOrCreateVarEqValueLiteral(index1, val_1); + for (int64_t val_2 = min_2; val_2 <= max_2; ++val_2) { + const int lit2 = GetOrCreateVarEqValueLiteral(index2, val_2); + if (i != 0) ct = proto.add_constraints(); // new constraint. + ct->add_enforcement_literal(lit1); + ct->add_enforcement_literal(lit2); + ct->mutable_linear()->add_vars(target); + ct->mutable_linear()->add_coeffs(1); + ct->mutable_linear()->add_vars(elems[i++]); + ct->mutable_linear()->add_coeffs(-1); + ct->mutable_linear()->add_domain(0); + ct->mutable_linear()->add_domain(0); + } + } + CHECK_EQ(i, fz_ct.arguments[4].Size()); } } else if (fz_ct.type == "ortools_table_int") { auto* arg = ct->mutable_table(); diff --git a/ortools/flatzinc/fz.cc b/ortools/flatzinc/fz.cc index 042102dae7..ee1ce9ef11 100644 --- a/ortools/flatzinc/fz.cc +++ b/ortools/flatzinc/fz.cc @@ -39,7 +39,6 @@ #include "ortools/flatzinc/cp_model_fz_solver.h" #include "ortools/flatzinc/model.h" #include "ortools/flatzinc/parser.h" -#include "ortools/flatzinc/presolve.h" #include "ortools/util/logging.h" ABSL_FLAG(double, time_limit, 0, "time limit in seconds."); @@ -50,7 +49,6 @@ ABSL_FLAG(bool, free_search, false, "If false, the solver must follow the defined search." "If true, other search are allowed."); ABSL_FLAG(int, threads, 0, "Number of threads the solver will use."); -ABSL_FLAG(bool, presolve, true, "Presolve the model to simplify it."); ABSL_FLAG(bool, statistics, false, "Print solver statistics after search."); ABSL_FLAG(bool, read_from_stdin, false, "Read the FlatZinc from stdin, not from a file."); @@ -153,15 +151,6 @@ Model ParseFlatzincModel(const std::string& input, bool input_is_filename, " parsed in ", timer.GetInMs(), " ms"); SOLVER_LOG(logger, ""); - // Presolve the model. - Presolver presolve(logger); - SOLVER_LOG(logger, "Presolve model"); - timer.Reset(); - timer.Start(); - presolve.Run(&model); - SOLVER_LOG(logger, " - done in ", timer.GetInMs(), " ms"); - SOLVER_LOG(logger); - // Print statistics. ModelStatistics stats(model, logger); stats.BuildStatistics(); diff --git a/ortools/flatzinc/mznlib/redefinitions-2.0.2.mzn b/ortools/flatzinc/mznlib/redefinitions-2.0.2.mzn new file mode 100644 index 0000000000..74e28cd766 --- /dev/null +++ b/ortools/flatzinc/mznlib/redefinitions-2.0.2.mzn @@ -0,0 +1,26 @@ +% Ignore. +predicate symmetry_breaking_constraint(var bool: b) = b; + +predicate redundant_constraint(var bool: b) = b; + +% array_var_bool_element_nonshifted. +predicate ortools_array_var_bool_element(var int: idx, + set of int: domain_of_x, + array [int] of var bool: x, + var bool: c); + +predicate array_var_bool_element_nonshifted(var int: idx, + array [int] of var bool: x, + var bool: c) = + ortools_array_var_bool_element(idx, index_set(x), x, c); + +% array_var_int_element_nonshifted. +predicate ortools_array_var_int_element(var int: idx, + set of int: domain_of_x, + array [int] of var int: x, + var int: c); + +predicate array_var_int_element_nonshifted(var int: idx, + array [int] of var int: x, + var int: c) = + ortools_array_var_int_element(idx, index_set(x), x, c); diff --git a/ortools/flatzinc/mznlib/redefinitions-2.5.2.mzn b/ortools/flatzinc/mznlib/redefinitions-2.5.2.mzn new file mode 100644 index 0000000000..e60bd8661f --- /dev/null +++ b/ortools/flatzinc/mznlib/redefinitions-2.5.2.mzn @@ -0,0 +1,49 @@ +% array_var_bool_element2d_nonshifted. +predicate ortools_array_var_bool_element2d(var int: idx1, + var int: idx2, + set of int: domain_of_x_1, + set of int: domain_of_x_2, + array[int] of var bool: x, + var bool: c); + +predicate array_var_bool_element2d_nonshifted(var int: idx1, + var int: idx2, + array[int,int] of var bool: x, + var bool: c) = + ortools_array_var_bool_element2d(idx1, + idx2, + index_set_1of2(x), + index_set_2of2(x), + array1d(x), + c); + +% array_var_int_element2d_nonshifted. +predicate ortools_array_var_int_element2d(var int: idx1, + var int: idx2, + set of int: domain_of_x_1, + set of int: domain_of_x_2, + array[int] of var int: x, + var int: c); + +predicate array_var_int_element2d_nonshifted(var int: idx1, + var int: idx2, + array[int,int] of var int: x, + var int: c) = + ortools_array_var_int_element2d(idx1, + idx2, + index_set_1of2(x), + index_set_2of2(x), + array1d(x), + c); + +predicate array_var_float_element2d_nonshifted(var int: idx1, var int: idx2, array[int,int] of var float: x, var float: c) = + let { + int: dim = card(index_set_2of2(x)); + int: min_flat = min(index_set_1of2(x))*dim+min(index_set_2of2(x))-1; + } in array_var_float_element_nonshifted((idx1*dim+idx2-min_flat)::domain, array1d(x), c); + +predicate array_var_set_element2d_nonshifted(var int: idx1, var int: idx2, array[int,int] of var set of int: x, var set of int: c) = + let { + int: dim = card(index_set_2of2(x)); + int: min_flat = min(index_set_1of2(x))*dim+min(index_set_2of2(x))-1; + } in array_var_set_element_nonshifted((idx1*dim+idx2-min_flat)::domain, array1d(x), c); diff --git a/ortools/flatzinc/parser_main.cc b/ortools/flatzinc/parser_main.cc index eaebbedd00..f571606c4f 100644 --- a/ortools/flatzinc/parser_main.cc +++ b/ortools/flatzinc/parser_main.cc @@ -25,7 +25,6 @@ #include "ortools/base/timer.h" #include "ortools/flatzinc/model.h" #include "ortools/flatzinc/parser.h" -#include "ortools/flatzinc/presolve.h" #include "ortools/util/logging.h" ABSL_FLAG(std::string, input, "", "Input file in the flatzinc format."); @@ -35,7 +34,7 @@ ABSL_FLAG(bool, statistics, false, "Print model statistics"); namespace operations_research { namespace fz { -void ParseFile(const std::string& filename, bool presolve) { +void ParseFile(const std::string& filename) { WallTimer timer; timer.Start(); @@ -58,14 +57,6 @@ void ParseFile(const std::string& filename, bool presolve) { Model model(problem_name); CHECK(ParseFlatzincFile(filename, &model)); - if (presolve) { - SOLVER_LOG(&logger, "Presolve model"); - timer.Reset(); - timer.Start(); - Presolver presolve(&logger); - presolve.Run(&model); - SOLVER_LOG(&logger, " - done in ", timer.GetInMs(), " ms"); - } if (absl::GetFlag(FLAGS_statistics)) { ModelStatistics stats(model, &logger); stats.BuildStatistics(); @@ -85,7 +76,6 @@ int main(int argc, char** argv) { absl::SetProgramUsageMessage(kUsage); absl::ParseCommandLine(argc, argv); google::InitGoogleLogging(argv[0]); - operations_research::fz::ParseFile(absl::GetFlag(FLAGS_input), - absl::GetFlag(FLAGS_presolve)); + operations_research::fz::ParseFile(absl::GetFlag(FLAGS_input)); return 0; } diff --git a/ortools/flatzinc/presolve.cc b/ortools/flatzinc/presolve.cc deleted file mode 100644 index 2887f8db46..0000000000 --- a/ortools/flatzinc/presolve.cc +++ /dev/null @@ -1,495 +0,0 @@ -// Copyright 2010-2024 Google LLC -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "ortools/flatzinc/presolve.h" - -#include -#include -#include -#include -#include - -#include "absl/container/flat_hash_set.h" -#include "absl/flags/flag.h" -#include "absl/log/check.h" -#include "absl/strings/string_view.h" -#include "absl/types/span.h" -#include "ortools/flatzinc/model.h" -#include "ortools/util/logging.h" - -ABSL_FLAG(bool, fz_floats_are_ints, false, - "Interpret floats as integers in all variables and constraints."); - -namespace operations_research { -namespace fz { -namespace { -enum PresolveState { ALWAYS_FALSE, ALWAYS_TRUE, UNDECIDED }; - -template -bool IsArrayBoolean(const std::vector& values) { - for (int i = 0; i < values.size(); ++i) { - if (values[i] != 0 && values[i] != 1) { - return false; - } - } - return true; -} - -template -bool AtMostOne0OrAtMostOne1(const std::vector& values) { - CHECK(IsArrayBoolean(values)); - int num_zero = 0; - int num_one = 0; - for (T val : values) { - if (val) { - num_one++; - } else { - num_zero++; - } - if (num_one > 1 && num_zero > 1) { - return false; - } - } - return true; -} - -template -void AppendIfNotInSet(T* value, absl::flat_hash_set* s, - std::vector* vec) { - if (s->insert(value).second) { - vec->push_back(value); - } - DCHECK_EQ(s->size(), vec->size()); -} - -} // namespace - -// Note on documentation -// -// In order to document presolve rules, we will use the following naming -// convention: -// - x, x1, xi, y, y1, yi denote integer variables -// - b, b1, bi denote boolean variables -// - c, c1, ci denote integer constants -// - t, t1, ti denote boolean constants -// - => x after a constraint denotes the target variable of this constraint. -// Arguments are listed in order. - -// Propagates cast constraint. -// Rule 1: -// Input: bool2int(b, c) or bool2int(t, x) -// Output: int_eq(...) -// -// Rule 2: -// Input: bool2int(b, x) -// Action: Replace all instances of x by b. -// Output: inactive constraint -void Presolver::PresolveBool2Int(Constraint* ct) { - DCHECK_EQ(ct->type, "bool2int"); - if (ct->arguments[0].HasOneValue() || ct->arguments[1].HasOneValue()) { - // Rule 1. - UpdateRuleStats("bool2int: rename to int_eq"); - ct->type = "int_eq"; - } else { - // Rule 2. - UpdateRuleStats("bool2int: merge boolean and integer variables."); - AddVariableSubstitution(ct->arguments[1].Var(), ct->arguments[0].Var()); - ct->MarkAsInactive(); - } -} - -// Propagates cast constraint. -// Rule 1: -// Input: int2float(x, y) -// Action: Replace all instances of y by x. -// Output: inactive constraint -void Presolver::PresolveInt2Float(Constraint* ct) { - DCHECK_EQ(ct->type, "int2float"); - // Rule 1. - UpdateRuleStats("int2float: merge integer and floating point variables."); - AddVariableSubstitution(ct->arguments[1].Var(), ct->arguments[0].Var()); - ct->MarkAsInactive(); -} - -void Presolver::PresolveStoreFlatteningMapping(Constraint* ct) { - CHECK_EQ(3, ct->arguments[1].variables.size()); - Variable* const var0 = ct->arguments[1].variables[0]; - Variable* const var1 = ct->arguments[1].variables[1]; - Variable* const var2 = ct->arguments[1].variables[2]; - const int64_t coeff0 = ct->arguments[0].values[0]; - const int64_t coeff1 = ct->arguments[0].values[1]; - const int64_t coeff2 = ct->arguments[0].values[2]; - const int64_t rhs = ct->arguments[2].Value(); - if (coeff0 == -1 && coeff2 == 1 && !array2d_index_map_.contains(var0)) { - array2d_index_map_[var0] = - Array2DIndexMapping(var1, coeff1, var2, -rhs, ct); - UpdateRuleStats("int_lin_eq: store 2d flattening mapping"); - } else if (coeff0 == -1 && coeff1 == 1 && - !array2d_index_map_.contains(var0)) { - array2d_index_map_[var0] = - Array2DIndexMapping(var2, coeff2, var1, -rhs, ct); - UpdateRuleStats("int_lin_eq: store 2d flattening mapping"); - } else if (coeff2 == -1 && coeff1 == 1 && - !array2d_index_map_.contains(var2)) { - array2d_index_map_[var2] = - Array2DIndexMapping(var0, coeff0, var1, -rhs, ct); - UpdateRuleStats("int_lin_eq: store 2d flattening mapping"); - } else if (coeff2 == -1 && coeff0 == 1 && - !array2d_index_map_.contains(var2)) { - array2d_index_map_[var2] = - Array2DIndexMapping(var1, coeff1, var0, -rhs, ct); - UpdateRuleStats("int_lin_eq: store 2d flattening mapping"); - } -} - -namespace { -bool IsIncreasingAndContiguous(absl::Span values) { - for (int i = 0; i < values.size() - 1; ++i) { - if (values[i + 1] != values[i] + 1) { - return false; - } - } - return true; -} - -bool AreOnesFollowedByMinusOne(absl::Span coeffs) { - CHECK(!coeffs.empty()); - for (int i = 0; i < coeffs.size() - 1; ++i) { - if (coeffs[i] != 1) { - return false; - } - } - return coeffs.back() == -1; -} - -template -bool IsStrictPrefix(const std::vector& v1, const std::vector& v2) { - if (v1.size() >= v2.size()) { - return false; - } - for (int i = 0; i < v1.size(); ++i) { - if (v1[i] != v2[i]) { - return false; - } - } - return true; -} -} // namespace - -// Rewrite array element: array_int_element: -// -// Rule 1: -// Input : array_int_element(x, [c1, .., cn], y) with x = a * x1 + x2 + b -// Output: array_int_element([x1, x2], [c_a1, .., c_am], b, [a, b]) -// to be interpreted by the extraction process. -// -// Rule 2: -// Input : array_int_element(x, [c1, .., cn], y) with x0 ci = c0 + i -// Output: int_lin_eq([-1, 1], [y, x], 1 - c) (e.g. y = x + c - 1) -void Presolver::PresolveSimplifyElement(Constraint* ct) { - if (ct->arguments[0].variables.size() != 1) return; - Variable* const index_var = ct->arguments[0].Var(); - - // Rule 1. - if (array2d_index_map_.contains(index_var)) { - UpdateRuleStats("array_int_element: rewrite as a 2d element"); - const Array2DIndexMapping& mapping = array2d_index_map_[index_var]; - // Rewrite constraint. - ct->arguments[0] = - Argument::VarRefArray({mapping.variable1, mapping.variable2}); - std::vector coefs; - coefs.push_back(mapping.coefficient); - coefs.push_back(1); - ct->arguments.push_back(Argument::IntegerList(coefs)); - ct->arguments.push_back(Argument::IntegerValue(mapping.offset)); - index_var->active = false; - mapping.constraint->MarkAsInactive(); - return; - } - - // Rule 2. - if (IsIncreasingAndContiguous(ct->arguments[1].values) && - ct->arguments[2].type == Argument::VAR_REF) { - const int64_t start = ct->arguments[1].values.front(); - Variable* const index = ct->arguments[0].Var(); - Variable* const target = ct->arguments[2].Var(); - UpdateRuleStats("array_int_element: rewrite as a linear constraint"); - - if (start == 1) { - ct->type = "int_eq"; - ct->RemoveArg(1); - } else { - // Rewrite constraint into a int_lin_eq - ct->type = "int_lin_eq"; - ct->arguments[0] = Argument::IntegerList({-1, 1}); - ct->arguments[1] = Argument::VarRefArray({target, index}); - ct->arguments[2] = Argument::IntegerValue(1 - start); - } - } -} - -void Presolver::Run(Model* model) { - // Should rewrite float constraints. - if (absl::GetFlag(FLAGS_fz_floats_are_ints)) { - // Treat float variables as int variables, convert constraints to int. - for (Constraint* const ct : model->constraints()) { - const std::string& id = ct->type; - if (id == "int2float") { - ct->type = "int_eq"; - } else if (id == "float_lin_le") { - ct->type = "int_lin_le"; - } else if (id == "float_lin_eq") { - ct->type = "int_lin_eq"; - } - } - } - - // Regroup increasing sequence of int_lin_eq([1,..,1,-1], [x1, ..., xn, yn]) - // into sequence of int_plus(x1, x2, y2), int_plus(y2, x3, y3)... - std::vector current_variables; - Variable* target_variable = nullptr; - Constraint* first_constraint = nullptr; - for (Constraint* const ct : model->constraints()) { - if (target_variable == nullptr) { - if (ct->type == "int_lin_eq" && ct->arguments[0].values.size() == 3 && - AreOnesFollowedByMinusOne(ct->arguments[0].values) && - ct->arguments[1].values.empty() && ct->arguments[2].Value() == 0) { - current_variables = ct->arguments[1].variables; - target_variable = current_variables.back(); - current_variables.pop_back(); - first_constraint = ct; - } - } else { - if (ct->type == "int_lin_eq" && - AreOnesFollowedByMinusOne(ct->arguments[0].values) && - ct->arguments[0].values.size() == current_variables.size() + 2 && - IsStrictPrefix(current_variables, ct->arguments[1].variables)) { - current_variables = ct->arguments[1].variables; - // Rewrite ct into int_plus. - ct->type = "int_plus"; - ct->arguments.clear(); - ct->arguments.push_back(Argument::VarRef(target_variable)); - ct->arguments.push_back( - Argument::VarRef(current_variables[current_variables.size() - 2])); - ct->arguments.push_back(Argument::VarRef(current_variables.back())); - target_variable = current_variables.back(); - current_variables.pop_back(); - - // We clean the first constraint too. - if (first_constraint != nullptr) { - first_constraint = nullptr; - } - } else { - current_variables.clear(); - target_variable = nullptr; - } - } - } - - // First pass. - for (Constraint* const ct : model->constraints()) { - if (ct->active && ct->type == "bool2int") { - PresolveBool2Int(ct); - } else if (ct->active && ct->type == "int2float") { - PresolveInt2Float(ct); - } else if (ct->active && ct->type == "int_lin_eq" && - ct->arguments[1].variables.size() == 3 && - ct->strong_propagation) { - PresolveStoreFlatteningMapping(ct); - } - } - if (!var_representative_map_.empty()) { - // Some new substitutions were introduced. Let's process them. - SubstituteEverywhere(model); - var_representative_map_.clear(); - var_representative_vector_.clear(); - } - - // Second pass. - for (Constraint* const ct : model->constraints()) { - if (ct->type == "array_int_element" || ct->type == "array_bool_element") { - PresolveSimplifyElement(ct); - } - } - - // Third pass: process objective with floating point coefficients. - Variable* float_objective_var = nullptr; - for (Variable* var : model->variables()) { - if (!var->active) continue; - if (var->domain.is_float) { - CHECK(float_objective_var == nullptr); - float_objective_var = var; - } - } - - Constraint* float_objective_ct = nullptr; - if (float_objective_var != nullptr) { - for (Constraint* ct : model->constraints()) { - if (!ct->active) continue; - if (ct->type == "float_lin_eq") { - CHECK(float_objective_ct == nullptr); - float_objective_ct = ct; - break; - } - } - } - - if (float_objective_ct != nullptr || float_objective_var != nullptr) { - CHECK(float_objective_ct != nullptr); - CHECK(float_objective_var != nullptr); - const int arity = float_objective_ct->arguments[0].Size(); - CHECK_EQ(float_objective_ct->arguments[1].variables[arity - 1], - float_objective_var); - CHECK_EQ(float_objective_ct->arguments[0].floats[arity - 1], -1.0); - for (int i = 0; i + 1 < arity; ++i) { - model->AddFloatingPointObjectiveTerm( - float_objective_ct->arguments[1].variables[i], - float_objective_ct->arguments[0].floats[i]); - } - model->SetFloatingPointObjectiveOffset( - -float_objective_ct->arguments[2].floats[0]); - model->ClearObjective(); - float_objective_var->active = false; - float_objective_ct->active = false; - } - - // Report presolve rules statistics. - if (!successful_rules_.empty()) { - for (const auto& rule : successful_rules_) { - if (rule.second == 1) { - SOLVER_LOG(logger_, " - rule '", rule.first, "' was applied 1 time"); - } else { - SOLVER_LOG(logger_, " - rule '", rule.first, "' was applied ", - rule.second, " times"); - } - } - } -} - -// ----- Substitution support ----- - -void Presolver::AddVariableSubstitution(Variable* from, Variable* to) { - CHECK(from != nullptr); - CHECK(to != nullptr); - // Apply the substitutions, if any. - from = FindRepresentativeOfVar(from); - to = FindRepresentativeOfVar(to); - if (to->temporary) { - // Let's switch to keep a non temporary as representative. - Variable* tmp = to; - to = from; - from = tmp; - } - if (from != to) { - CHECK(to->Merge(from->name, from->domain, from->temporary)); - from->active = false; - var_representative_map_[from] = to; - var_representative_vector_.push_back(from); - } -} - -Variable* Presolver::FindRepresentativeOfVar(Variable* var) { - if (var == nullptr) return nullptr; - Variable* start_var = var; - // First loop: find the top parent. - for (;;) { - const auto& it = var_representative_map_.find(var); - Variable* parent = it == var_representative_map_.end() ? var : it->second; - if (parent == var) break; - var = parent; - } - // Second loop: attach all the path to the top parent. - while (start_var != var) { - Variable* const parent = var_representative_map_[start_var]; - var_representative_map_[start_var] = var; - start_var = parent; - } - const auto& iter = var_representative_map_.find(var); - return iter == var_representative_map_.end() ? var : iter->second; -} - -void Presolver::SubstituteEverywhere(Model* model) { - // Rewrite the constraints. - for (Constraint* const ct : model->constraints()) { - if (ct != nullptr && ct->active) { - for (int i = 0; i < ct->arguments.size(); ++i) { - Argument& argument = ct->arguments[i]; - switch (argument.type) { - case Argument::VAR_REF: - case Argument::VAR_REF_ARRAY: { - for (int i = 0; i < argument.variables.size(); ++i) { - Variable* const old_var = argument.variables[i]; - Variable* const new_var = FindRepresentativeOfVar(old_var); - if (new_var != old_var) { - argument.variables[i] = new_var; - } - } - break; - } - default: { - } - } - } - } - } - // Rewrite the search. - for (Annotation* const ann : model->mutable_search_annotations()) { - SubstituteAnnotation(ann); - } - // Rewrite the output. - for (SolutionOutputSpecs* const output : model->mutable_output()) { - output->variable = FindRepresentativeOfVar(output->variable); - for (int i = 0; i < output->flat_variables.size(); ++i) { - output->flat_variables[i] = - FindRepresentativeOfVar(output->flat_variables[i]); - } - } - // Do not forget to merge domain that could have evolved asynchronously - // during presolve. - for (const auto& iter : var_representative_map_) { - iter.second->domain.IntersectWithDomain(iter.first->domain); - } - - // Change the objective variable. - Variable* const current_objective = model->objective(); - if (current_objective == nullptr) return; - Variable* const new_objective = FindRepresentativeOfVar(current_objective); - if (new_objective != current_objective) { - model->SetObjective(new_objective); - } -} - -void Presolver::SubstituteAnnotation(Annotation* ann) { - // TODO(user): Remove recursion. - switch (ann->type) { - case Annotation::ANNOTATION_LIST: - case Annotation::FUNCTION_CALL: { - for (int i = 0; i < ann->annotations.size(); ++i) { - SubstituteAnnotation(&ann->annotations[i]); - } - break; - } - case Annotation::VAR_REF: - case Annotation::VAR_REF_ARRAY: { - for (int i = 0; i < ann->variables.size(); ++i) { - ann->variables[i] = FindRepresentativeOfVar(ann->variables[i]); - } - break; - } - default: { - } - } -} - -} // namespace fz -} // namespace operations_research diff --git a/ortools/flatzinc/presolve.h b/ortools/flatzinc/presolve.h deleted file mode 100644 index 38675968c7..0000000000 --- a/ortools/flatzinc/presolve.h +++ /dev/null @@ -1,115 +0,0 @@ -// Copyright 2010-2024 Google LLC -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#ifndef OR_TOOLS_FLATZINC_PRESOLVE_H_ -#define OR_TOOLS_FLATZINC_PRESOLVE_H_ - -#include -#include -#include -#include -#include - -#include "absl/container/flat_hash_map.h" -#include "absl/container/flat_hash_set.h" -#include "absl/strings/match.h" -#include "ortools/base/hash.h" -#include "ortools/base/logging.h" -#include "ortools/base/types.h" -#include "ortools/flatzinc/model.h" -#include "ortools/util/logging.h" - -namespace operations_research { -namespace fz { -// The Presolver "pre-solves" a Model by applying some iterative -// transformations to it, which may simplify and/or shrink the model. -// -// TODO(user): Error reporting of unfeasible models. -class Presolver { - public: - explicit Presolver(SolverLogger* logger) : logger_(logger) {} - // Recursively apply all the pre-solve rules to the model, until exhaustion. - // The reduced model will: - // - Have some unused variables. - // - Have some unused constraints (marked as inactive). - // - Have some modified constraints (for example, they will no longer - // refer to unused variables). - void Run(Model* model); - - private: - // This struct stores the mapping of two index variables (of a 2D array; not - // included here) onto a single index variable (of the flattened 1D array). - // The original 2D array could be trimmed in the process; so we also need an - // offset. - // Eg. new_index_var = index_var1 * int_coeff + index_var2 + int_offset - struct Array2DIndexMapping { - Variable* variable1; - int64_t coefficient; - Variable* variable2; - int64_t offset; - Constraint* constraint; - - Array2DIndexMapping() - : variable1(nullptr), - coefficient(0), - variable2(nullptr), - offset(0), - constraint(nullptr) {} - Array2DIndexMapping(Variable* v1, int64_t c, Variable* v2, int64_t o, - Constraint* ct) - : variable1(v1), - coefficient(c), - variable2(v2), - offset(o), - constraint(ct) {} - }; - - // Substitution support. - void SubstituteEverywhere(Model* model); - void SubstituteAnnotation(Annotation* ann); - - // Presolve rules. - void PresolveBool2Int(Constraint* ct); - void PresolveInt2Float(Constraint* ct); - void PresolveStoreFlatteningMapping(Constraint* ct); - void PresolveSimplifyElement(Constraint* ct); - - // Helpers. - void UpdateRuleStats(const std::string& rule_name) { - successful_rules_[rule_name]++; - } - - // The presolver will discover some equivalence classes of variables [two - // variable are equivalent when replacing one by the other leads to the same - // logical model]. We will store them here, using a Union-find data structure. - // See http://en.wikipedia.org/wiki/Disjoint-set_data_structure. - // Note that the equivalence is directed. We prefer to replace all instances - // of 'from' with 'to', rather than the opposite. - void AddVariableSubstitution(Variable* from, Variable* to); - Variable* FindRepresentativeOfVar(Variable* var); - absl::flat_hash_map var_representative_map_; - std::vector var_representative_vector_; - - // Stores array2d_index_map_[z] = a * x + y + b. - absl::flat_hash_map array2d_index_map_; - - // Count applications of presolve rules. Use a sorted map for reporting - // purposes. - std::map successful_rules_; - - SolverLogger* logger_; -}; -} // namespace fz -} // namespace operations_research - -#endif // OR_TOOLS_FLATZINC_PRESOLVE_H_ From 4fd0acf85b8fd2307a1f737ef79045bb3f2394c0 Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Fri, 11 Oct 2024 17:08:37 +0200 Subject: [PATCH 084/105] bazel: default to python 3.11 --- WORKSPACE | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/WORKSPACE b/WORKSPACE index 20028d1505..04e851c51d 100644 --- a/WORKSPACE +++ b/WORKSPACE @@ -110,13 +110,13 @@ load("@rules_python//python:repositories.bzl", "py_repositories") py_repositories() load("@rules_python//python:repositories.bzl", "python_register_toolchains") -DEFAULT_PYTHON = "3.12" +DEFAULT_PYTHON = "3.11" python_register_toolchains( - name = "python3_12", + name = "python3_11", python_version = DEFAULT_PYTHON, ignore_root_user_error=True, ) -load("@python3_12//:defs.bzl", "interpreter") +load("@python3_11//:defs.bzl", "interpreter") # Create a central external repo, @pip_deps, that contains Bazel targets for all the # third-party packages specified in the bazel/requirements.txt file. From c3b4aaf4d1c44d57eba182ca6f27ddb8250b49cc Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Fri, 11 Oct 2024 17:09:02 +0200 Subject: [PATCH 085/105] export from google3 --- ortools/glop/revised_simplex.h | 4 ++++ ortools/linear_solver/java/linear_solver.i | 2 +- ortools/math_opt/samples/cpp/BUILD.bazel | 2 -- ortools/math_opt/solvers/BUILD.bazel | 1 + 4 files changed, 6 insertions(+), 3 deletions(-) diff --git a/ortools/glop/revised_simplex.h b/ortools/glop/revised_simplex.h index 90e55698e4..8b2ec798da 100644 --- a/ortools/glop/revised_simplex.h +++ b/ortools/glop/revised_simplex.h @@ -187,6 +187,10 @@ class RevisedSimplex { double DeterministicTime() const; bool objective_limit_reached() const { return objective_limit_reached_; } + DenseColumn::ConstView GetDualSquaredNorms() { + return dual_edge_norms_.GetEdgeSquaredNorms(); + } + const DenseBitRow& GetNotBasicBitRow() const { return variables_info_.GetNotBasicBitRow(); } diff --git a/ortools/linear_solver/java/linear_solver.i b/ortools/linear_solver/java/linear_solver.i index 83d9db7e56..cfd1c1add8 100644 --- a/ortools/linear_solver/java/linear_solver.i +++ b/ortools/linear_solver/java/linear_solver.i @@ -402,7 +402,7 @@ PROTO2_RETURN( %rename (suppressOutput) operations_research::MPSolver::SuppressOutput; // no test %rename (lookupConstraintOrNull) operations_research::MPSolver::LookupConstraintOrNull; // no test %rename (lookupVariableOrNull) operations_research::MPSolver::LookupVariableOrNull; // no test -%rename (write) operations_research::MPSolver::Write; +%rename (write) operations_research::MPSolver::Write; // no test // Expose very advanced parts of the MPSolver API. For expert users only. %rename (computeConstraintActivities) operations_research::MPSolver::ComputeConstraintActivities; diff --git a/ortools/math_opt/samples/cpp/BUILD.bazel b/ortools/math_opt/samples/cpp/BUILD.bazel index 8f0d32c2c0..c7b4dabff8 100644 --- a/ortools/math_opt/samples/cpp/BUILD.bazel +++ b/ortools/math_opt/samples/cpp/BUILD.bazel @@ -11,8 +11,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -package(default_visibility = ["//ortools/math_opt:__subpackages__"]) - cc_binary( name = "basic_example", srcs = ["basic_example.cc"], diff --git a/ortools/math_opt/solvers/BUILD.bazel b/ortools/math_opt/solvers/BUILD.bazel index e7e8054a97..023e966a49 100644 --- a/ortools/math_opt/solvers/BUILD.bazel +++ b/ortools/math_opt/solvers/BUILD.bazel @@ -618,6 +618,7 @@ cc_test( "//ortools/math_opt/solver_tests:lp_parameter_tests", "//ortools/math_opt/solver_tests:lp_tests", "//ortools/math_opt/solver_tests:mip_tests", + "//ortools/math_opt/solver_tests:multi_objective_tests", "//ortools/math_opt/solver_tests:status_tests", "//ortools/math_opt/testing:param_name", "@com_google_absl//absl/status", From 49062997cbf54eae7b524398371f5b615c5b0a91 Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Fri, 11 Oct 2024 17:09:16 +0200 Subject: [PATCH 086/105] routing: export from google3 --- ortools/constraint_solver/constraint_solver.h | 10 +- .../constraint_solver/constraint_solveri.h | 101 ++++-- ortools/constraint_solver/local_search.cc | 288 ++++++++++++------ ortools/routing/decision_builders.cc | 3 + ortools/routing/filters.cc | 96 +++--- ortools/routing/filters.h | 233 ++++++++------ ortools/routing/neighborhoods.cc | 192 +++++++----- ortools/routing/neighborhoods.h | 34 ++- ortools/routing/routing.cc | 173 +++++++---- ortools/routing/routing.h | 117 +++++-- ortools/routing/types.h | 4 + 11 files changed, 834 insertions(+), 417 deletions(-) diff --git a/ortools/constraint_solver/constraint_solver.h b/ortools/constraint_solver/constraint_solver.h index 203cb42ba5..300db61a58 100644 --- a/ortools/constraint_solver/constraint_solver.h +++ b/ortools/constraint_solver/constraint_solver.h @@ -2782,11 +2782,17 @@ class Solver { /// Local Search Operators. LocalSearchOperator* MakeOperator( const std::vector& vars, LocalSearchOperators op, - std::function&(int, int)> get_neighbors = nullptr); + std::function&(int, int)> get_incoming_neighbors = + nullptr, + std::function&(int, int)> get_outgoing_neighbors = + nullptr); LocalSearchOperator* MakeOperator( const std::vector& vars, const std::vector& secondary_vars, LocalSearchOperators op, - std::function&(int, int)> get_neighbors = nullptr); + std::function&(int, int)> get_incoming_neighbors = + nullptr, + std::function&(int, int)> get_outgoing_neighbors = + nullptr); // TODO(user): Make the callback an IndexEvaluator2 when there are no // secondary variables. LocalSearchOperator* MakeOperator(const std::vector& vars, diff --git a/ortools/constraint_solver/constraint_solveri.h b/ortools/constraint_solver/constraint_solveri.h index 23eaa17077..63b4beb231 100644 --- a/ortools/constraint_solver/constraint_solveri.h +++ b/ortools/constraint_solver/constraint_solveri.h @@ -509,7 +509,7 @@ class CallMethod0 : public Demon { ~CallMethod0() override {} - void Run(Solver* const s) override { (constraint_->*method_)(); } + void Run(Solver* const) override { (constraint_->*method_)(); } std::string DebugString() const override { return "CallMethod_" + name_ + "(" + constraint_->DebugString() + ")"; @@ -548,7 +548,7 @@ class CallMethod1 : public Demon { ~CallMethod1() override {} - void Run(Solver* const s) override { (constraint_->*method_)(param1_); } + void Run(Solver* const) override { (constraint_->*method_)(param1_); } std::string DebugString() const override { return absl::StrCat("CallMethod_", name_, "(", constraint_->DebugString(), @@ -582,7 +582,7 @@ class CallMethod2 : public Demon { ~CallMethod2() override {} - void Run(Solver* const s) override { + void Run(Solver* const) override { (constraint_->*method_)(param1_, param2_); } @@ -622,7 +622,7 @@ class CallMethod3 : public Demon { ~CallMethod3() override {} - void Run(Solver* const s) override { + void Run(Solver* const) override { (constraint_->*method_)(param1_, param2_, param3_); } @@ -666,7 +666,7 @@ class DelayedCallMethod0 : public Demon { ~DelayedCallMethod0() override {} - void Run(Solver* const s) override { (constraint_->*method_)(); } + void Run(Solver* const) override { (constraint_->*method_)(); } Solver::DemonPriority priority() const override { return Solver::DELAYED_PRIORITY; @@ -700,7 +700,7 @@ class DelayedCallMethod1 : public Demon { ~DelayedCallMethod1() override {} - void Run(Solver* const s) override { (constraint_->*method_)(param1_); } + void Run(Solver* const) override { (constraint_->*method_)(param1_); } Solver::DemonPriority priority() const override { return Solver::DELAYED_PRIORITY; @@ -740,7 +740,7 @@ class DelayedCallMethod2 : public Demon { ~DelayedCallMethod2() override {} - void Run(Solver* const s) override { + void Run(Solver* const) override { (constraint_->*method_)(param1_, param2_); } @@ -1158,7 +1158,7 @@ class IntVarLocalSearchOperator : public LocalSearchOperator { } /// Returns the variable of given index. IntVar* Var(int64_t index) const { return vars_[index]; } - virtual bool SkipUnchanged(int index) const { return false; } + virtual bool SkipUnchanged(int) const { return false; } int64_t OldValue(int64_t index) const { return state_.CommittedValue(index); } int64_t PrevValue(int64_t index) const { return state_.CheckPointValue(index); @@ -1380,23 +1380,31 @@ class PathOperator : public IntVarLocalSearchOperator { /// 'start_empty_path_class' can be nullptr in which case no symmetries will /// be removed. std::function start_empty_path_class; - /// Callback returning neighbors of a node on a path starting at start_node. - std::function&(/*node=*/int, /*start_node=*/int)> - get_neighbors; + /// Callbacks returning incoming/outgoing neighbors of a node on a path + /// starting at start_node. + std::function&( + /*node=*/int, /*start_node=*/int)> + get_incoming_neighbors; + std::function&( + /*node=*/int, /*start_node=*/int)> + get_outgoing_neighbors; }; /// Builds an instance of PathOperator from next and path variables. PathOperator(const std::vector& next_vars, const std::vector& path_vars, IterationParameters iteration_parameters); - PathOperator(const std::vector& next_vars, - const std::vector& path_vars, int number_of_base_nodes, - bool skip_locally_optimal_paths, bool accept_path_end_base, - std::function start_empty_path_class, - std::function&(int, int)> get_neighbors) + PathOperator( + const std::vector& next_vars, + const std::vector& path_vars, int number_of_base_nodes, + bool skip_locally_optimal_paths, bool accept_path_end_base, + std::function start_empty_path_class, + std::function&(int, int)> get_incoming_neighbors, + std::function&(int, int)> get_outgoing_neighbors) : PathOperator(next_vars, path_vars, {number_of_base_nodes, skip_locally_optimal_paths, accept_path_end_base, std::move(start_empty_path_class), - std::move(get_neighbors)}) {} + std::move(get_incoming_neighbors), + std::move(get_outgoing_neighbors)}) {} ~PathOperator() override {} virtual bool MakeNeighbor() = 0; void Reset() override; @@ -1488,7 +1496,7 @@ class PathOperator : public IntVarLocalSearchOperator { // TODO(user): ideally this should be OnSamePath(int64_t node1, int64_t // node2); /// it's currently way more complicated to implement. - virtual bool OnSamePathAsPreviousBase(int64_t base_index) { return false; } + virtual bool OnSamePathAsPreviousBase(int64_t) { return false; } /// Returns the index of the node to which the base node of index base_index /// must be set to when it reaches the end of a path. /// By default, it is set to the start of the current path. @@ -1504,7 +1512,7 @@ class PathOperator : public IntVarLocalSearchOperator { } /// Indicates if alternatives should be considered when iterating over base /// nodes. - virtual bool ConsiderAlternatives(int64_t base_index) const { return false; } + virtual bool ConsiderAlternatives(int64_t) const { return false; } int64_t OldNext(int64_t node) const { DCHECK(!IsPathEnd(node)); @@ -1580,7 +1588,7 @@ class PathOperator : public IntVarLocalSearchOperator { /// Handling node alternatives. /// Adds a set of node alternatives to the neighborhood. No node can be in - /// two altrnatives. + /// two alternatives. int AddAlternativeSet(const std::vector& alternative_set) { const int alternative = alternative_sets_.size(); for (int64_t node : alternative_set) { @@ -1635,14 +1643,48 @@ class PathOperator : public IntVarLocalSearchOperator { int64_t exclude) const; bool HasNeighbors() const { - return iteration_parameters_.get_neighbors != nullptr; + return iteration_parameters_.get_incoming_neighbors != nullptr || + iteration_parameters_.get_outgoing_neighbors != nullptr; } - int GetNeighborForBaseNode(int64_t base_index) const { + struct Neighbor { + // Index of the neighbor node. + int neighbor; + // True if 'neighbor' is an outgoing neighbor (i.e. arc main_node->neighbor) + // and false if it's an incoming one (arc neighbor->main_node). + bool outgoing; + }; + Neighbor GetNeighborForBaseNode(int64_t base_index) const { DCHECK(HasNeighbors()); - return iteration_parameters_.get_neighbors( - BaseNode(base_index), - StartNode(base_index))[calls_per_base_node_[base_index]]; + const int64_t node = BaseNode(base_index); + const int64_t start = StartNode(base_index); + + const int num_calls = calls_per_base_node_[base_index]; + const auto& get_incoming_neighbors = + iteration_parameters_.get_incoming_neighbors; + const std::vector& incoming_neighbors = + IsPathStart(node) || get_incoming_neighbors == nullptr + ? std::vector() + : get_incoming_neighbors(node, start); + + if (num_calls < incoming_neighbors.size()) { + // Incoming neighbor. + DCHECK(!IsPathStart(node)); + return {.neighbor = incoming_neighbors[num_calls], .outgoing = false}; + } + + // Outgoing neighbor. + if (IsPathEnd(node)) return {.neighbor = -1, .outgoing = true}; + const auto& get_outgoing_neighbors = + iteration_parameters_.get_outgoing_neighbors; + if (get_outgoing_neighbors == nullptr) { + DCHECK(IsPathStart(node)); + return {.neighbor = -1, .outgoing = true}; + } + const int index = num_calls - incoming_neighbors.size(); + DCHECK_LT(index, get_outgoing_neighbors(node, start).size()); + return {.neighbor = get_outgoing_neighbors(node, start)[index], + .outgoing = true}; } const int number_of_nexts_; @@ -1758,7 +1800,8 @@ LocalSearchOperator* MakeLocalSearchOperatorWithNeighbors( Solver* solver, const std::vector& vars, const std::vector& secondary_vars, std::function start_empty_path_class, - std::function&(int, int)> get_neighbors); + std::function&(int, int)> get_incoming_neighbors, + std::function&(int, int)> get_outgoing_neighbors); /// Classes to which this template function can be applied to as of 04/2014. /// Usage: LocalSearchOperator* op = MakeLocalSearchOperator(...); @@ -2144,9 +2187,9 @@ class LocalSearchFilter : public BaseObject { public: /// Lets the filter know what delta and deltadelta will be passed in the next /// Accept(). - virtual void Relax(const Assignment* delta, const Assignment* deltadelta) {} + virtual void Relax(const Assignment*, const Assignment*) {} /// Dual of Relax(), lets the filter know that the delta was accepted. - virtual void Commit(const Assignment* delta, const Assignment* deltadelta) {} + virtual void Commit(const Assignment*, const Assignment*) {} /// Accepts a "delta" given the assignment with which the filter has been /// synchronized; the delta holds the variables which have been modified and @@ -2264,7 +2307,7 @@ class IntVarLocalSearchFilter : public LocalSearchFilter { bool IsVarSynced(int index) const { return var_synced_[index]; } protected: - virtual void OnSynchronize(const Assignment* delta) {} + virtual void OnSynchronize(const Assignment*) {} void SynchronizeOnAssignment(const Assignment* assignment); private: diff --git a/ortools/constraint_solver/local_search.cc b/ortools/constraint_solver/local_search.cc index 3bcd9f993b..f95fee2d9f 100644 --- a/ortools/constraint_solver/local_search.cc +++ b/ortools/constraint_solver/local_search.cc @@ -535,11 +535,27 @@ bool PathOperator::IncrementPosition() { base_sibling_alternatives_[i] = 0; } } - if (iteration_parameters_.get_neighbors != nullptr && - ++calls_per_base_node_[i] < - iteration_parameters_.get_neighbors(BaseNode(i), StartNode(i)) - .size()) { - break; + if (HasNeighbors()) { + const int64_t base_node = BaseNode(i); + const int64_t start_node = StartNode(i); + const int num_incoming_neighbors = + IsPathStart(base_node) || + iteration_parameters_.get_incoming_neighbors == nullptr + ? 0 + : iteration_parameters_ + .get_incoming_neighbors(base_node, start_node) + .size(); + const int num_outgoing_neighbors = + IsPathEnd(base_node) || + iteration_parameters_.get_outgoing_neighbors == nullptr + ? 0 + : iteration_parameters_ + .get_outgoing_neighbors(base_node, start_node) + .size(); + if (++calls_per_base_node_[i] < + num_incoming_neighbors + num_outgoing_neighbors) { + break; + } } calls_per_base_node_[i] = 0; base_alternatives_[i] = 0; @@ -894,17 +910,26 @@ bool PathOperator::CheckChainValidity(int64_t before_chain, int64_t chain_end, // 1 -> 3 -> 2 -> 4 -> 5 // 1 -> 4 -> 3 -> 2 -> 5 // 1 -> 2 -> 4 -> 3 -> 5 + +using NeighborAccessor = + std::function&(/*node=*/int, /*start_node=*/int)>; class TwoOpt : public PathOperator { public: - TwoOpt( - const std::vector& vars, - const std::vector& secondary_vars, - std::function start_empty_path_class, - std::function&(int, int)> get_neighbors = nullptr) - : PathOperator( - vars, secondary_vars, get_neighbors == nullptr ? 2 : 1, - /*skip_locally_optimal_paths=*/true, /*accept_path_end_base=*/true, - std::move(start_empty_path_class), std::move(get_neighbors)), + TwoOpt(const std::vector& vars, + const std::vector& secondary_vars, + std::function start_empty_path_class, + NeighborAccessor get_incoming_neighbors = nullptr, + NeighborAccessor get_outgoing_neighbors = nullptr) + : PathOperator(vars, secondary_vars, + (get_incoming_neighbors == nullptr && + get_outgoing_neighbors == nullptr) + ? 2 + : 1, + /*skip_locally_optimal_paths=*/true, + /*accept_path_end_base=*/true, + std::move(start_empty_path_class), + std::move(get_incoming_neighbors), + std::move(get_outgoing_neighbors)), last_base_(-1), last_(-1) {} ~TwoOpt() override {} @@ -939,17 +964,27 @@ class TwoOpt : public PathOperator { bool TwoOpt::MakeNeighbor() { const int64_t node0 = BaseNode(0); - int64_t node1 = -1; + int64_t before_chain = node0; + int64_t after_chain = -1; if (HasNeighbors()) { - const int64_t neighbor = GetNeighborForBaseNode(0); - if (IsInactive(neighbor)) return false; + const auto [neighbor, outgoing] = GetNeighborForBaseNode(0); + if (neighbor < 0 || IsInactive(neighbor)) return false; if (CurrentNodePathStart(node0) != CurrentNodePathStart(neighbor)) { return false; } - node1 = Next(neighbor); + if (outgoing) { + if (IsPathEnd(neighbor)) return false; + // Reverse the chain starting *after" node0 and ending with 'neighbor'. + after_chain = Next(neighbor); + } else { + if (IsPathStart(neighbor)) return false; + // Reverse the chain starting with 'neighbor' and ending before node0. + before_chain = Prev(neighbor); + after_chain = node0; + } } else { DCHECK_EQ(StartNode(0), StartNode(1)); - node1 = BaseNode(1); + after_chain = BaseNode(1); } // Incrementality is disabled with neighbors. if (last_base_ != node0 || last_ == -1 || HasNeighbors()) { @@ -959,9 +994,9 @@ bool TwoOpt::MakeNeighbor() { return false; } last_base_ = node0; - last_ = Next(node0); + last_ = Next(before_chain); int64_t chain_last; - if (ReverseChain(node0, node1, &chain_last) + if (ReverseChain(before_chain, after_chain, &chain_last) // Check there are more than one node in the chain (reversing a // single node is a NOP). && last_ != chain_last) { @@ -970,9 +1005,10 @@ bool TwoOpt::MakeNeighbor() { last_ = -1; return false; } + DCHECK_EQ(before_chain, node0); const int64_t to_move = Next(last_); - DCHECK_EQ(Next(to_move), node1); - return MoveChain(last_, to_move, node0); + DCHECK_EQ(Next(to_move), after_chain); + return MoveChain(last_, to_move, before_chain); } // ----- Relocate ----- @@ -995,12 +1031,19 @@ class Relocate : public PathOperator { Relocate(const std::vector& vars, const std::vector& secondary_vars, const std::string& name, std::function start_empty_path_class, - std::function&(int, int)> get_neighbors, - int64_t chain_length = 1LL, bool single_path = false) + NeighborAccessor get_incoming_neighbors, + NeighborAccessor get_outgoing_neighbors, int64_t chain_length = 1LL, + bool single_path = false) : PathOperator( - vars, secondary_vars, get_neighbors == nullptr ? 2 : 1, - /*skip_locally_optimal_paths=*/true, /*accept_path_end_base=*/false, - std::move(start_empty_path_class), std::move(get_neighbors)), + vars, secondary_vars, + (get_incoming_neighbors == nullptr && + get_outgoing_neighbors == nullptr) + ? 2 + : 1, + /*skip_locally_optimal_paths=*/true, + /*accept_path_end_base=*/false, std::move(start_empty_path_class), + chain_length == 1 ? std::move(get_incoming_neighbors) : nullptr, + std::move(get_outgoing_neighbors)), chain_length_(chain_length), single_path_(single_path), name_(name) { @@ -1009,20 +1052,23 @@ class Relocate : public PathOperator { Relocate(const std::vector& vars, const std::vector& secondary_vars, std::function start_empty_path_class, - std::function&(int, int)> get_neighbors, - int64_t chain_length = 1LL, bool single_path = false) + NeighborAccessor get_incoming_neighbors, + NeighborAccessor get_outgoing_neighbors, int64_t chain_length = 1LL, + bool single_path = false) : Relocate(vars, secondary_vars, absl::StrCat("Relocate<", chain_length, ">"), - std::move(start_empty_path_class), std::move(get_neighbors), - chain_length, single_path) {} + std::move(start_empty_path_class), + std::move(get_incoming_neighbors), + std::move(get_outgoing_neighbors), chain_length, single_path) { + } Relocate(const std::vector& vars, const std::vector& secondary_vars, std::function start_empty_path_class, int64_t chain_length = 1LL, bool single_path = false) : Relocate(vars, secondary_vars, absl::StrCat("Relocate<", chain_length, ">"), - std::move(start_empty_path_class), nullptr, chain_length, - single_path) {} + std::move(start_empty_path_class), nullptr, nullptr, + chain_length, single_path) {} ~Relocate() override {} bool MakeNeighbor() override; @@ -1052,14 +1098,24 @@ bool Relocate::MakeNeighbor() { return !IsPathEnd(chain_end) && MoveChain(before_chain, chain_end, destination); }; + const int64_t node0 = BaseNode(0); if (HasNeighbors()) { - const int64_t node = GetNeighborForBaseNode(0); - if (IsInactive(node)) return false; - return do_move(/*before_chain=*/Prev(node), - /*destination=*/BaseNode(0)); + const auto [neighbor, outgoing] = GetNeighborForBaseNode(0); + if (neighbor < 0 || IsInactive(neighbor)) return false; + if (outgoing) { + return do_move(/*before_chain=*/Prev(neighbor), + /*destination=*/node0); + } + DCHECK_EQ(chain_length_, 1); + // TODO(user): Handle chain_length_ > 1 for incoming neighbors by going + // backwards on the chain. NOTE: In this setting it makes sense to have path + // ends as base nodes as we move the chain "before" the base node. + DCHECK(!IsPathStart(node0)) << "Path starts have no incoming neighbors."; + return do_move(/*before_chain=*/Prev(neighbor), + /*destination=*/Prev(node0)); } DCHECK(!single_path_ || StartNode(0) == StartNode(1)); - return do_move(/*before_chain=*/BaseNode(0), /*destination=*/BaseNode(1)); + return do_move(/*before_chain=*/node0, /*destination=*/BaseNode(1)); } // ----- Exchange ----- @@ -1074,15 +1130,21 @@ bool Relocate::MakeNeighbor() { class Exchange : public PathOperator { public: - Exchange( - const std::vector& vars, - const std::vector& secondary_vars, - std::function start_empty_path_class, - std::function&(int, int)> get_neighbors = nullptr) - : PathOperator(vars, secondary_vars, get_neighbors == nullptr ? 2 : 1, + Exchange(const std::vector& vars, + const std::vector& secondary_vars, + std::function start_empty_path_class, + NeighborAccessor get_incoming_neighbors = nullptr, + NeighborAccessor get_outgoing_neighbors = nullptr) + : PathOperator(vars, secondary_vars, + (get_incoming_neighbors == nullptr && + get_outgoing_neighbors == nullptr) + ? 2 + : 1, /*skip_locally_optimal_paths=*/true, /*accept_path_end_base=*/false, - std::move(start_empty_path_class), get_neighbors) {} + std::move(start_empty_path_class), + std::move(get_incoming_neighbors), + std::move(get_outgoing_neighbors)) {} ~Exchange() override {} bool MakeNeighbor() override; @@ -1091,19 +1153,28 @@ class Exchange : public PathOperator { bool Exchange::MakeNeighbor() { const auto do_move = [this](int64_t node1, int64_t node2) { - if (IsPathEnd(node1) || IsPathEnd(node2)) return false; + if (IsPathEnd(node1) || IsPathEnd(node2) || IsPathStart(node1) || + IsPathStart(node2)) { + return false; + } if (node1 == node2) return false; const int64_t prev_node1 = Prev(node1); const bool ok = MoveChain(prev_node1, node1, Prev(node2)); return MoveChain(Prev(node2), node2, prev_node1) || ok; }; + const int64_t node0 = BaseNode(0); if (HasNeighbors()) { - const int64_t node = GetNeighborForBaseNode(0); - if (IsInactive(node)) return false; - DCHECK(!IsPathStart(node)); - return do_move(Next(BaseNode(0)), node); + const auto [neighbor, outgoing] = GetNeighborForBaseNode(0); + if (neighbor < 0 || IsInactive(neighbor)) return false; + if (outgoing) { + // Exchange node0's next with 'neighbor'. + return do_move(Next(node0), neighbor); + } + DCHECK(!IsPathStart(node0)) << "Path starts have no incoming neighbors."; + // Exchange node0's prev with 'neighbor'. + return do_move(Prev(node0), neighbor); } - return do_move(Next(BaseNode(0)), Next(BaseNode(1))); + return do_move(Next(node0), Next(BaseNode(1))); } // ----- Cross ----- @@ -1120,15 +1191,17 @@ bool Exchange::MakeNeighbor() { class Cross : public PathOperator { public: - Cross( - const std::vector& vars, - const std::vector& secondary_vars, - std::function start_empty_path_class, - std::function&(int, int)> get_neighbors = nullptr) + Cross(const std::vector& vars, + const std::vector& secondary_vars, + std::function start_empty_path_class, + NeighborAccessor get_incoming_neighbors = nullptr, + NeighborAccessor get_outgoing_neighbors = nullptr) : PathOperator( - vars, secondary_vars, get_neighbors == nullptr ? 2 : 1, - /*skip_locally_optimal_paths=*/true, /*accept_path_end_base=*/true, - std::move(start_empty_path_class), std::move(get_neighbors)) {} + vars, secondary_vars, get_outgoing_neighbors == nullptr ? 2 : 1, + /*skip_locally_optimal_paths=*/true, + /*accept_path_end_base=*/true, std::move(start_empty_path_class), + std::move(get_incoming_neighbors), + std::move(get_outgoing_neighbors)) {} ~Cross() override {} bool MakeNeighbor() override; @@ -1141,8 +1214,11 @@ bool Cross::MakeNeighbor() { const int64_t node0 = BaseNode(0); int64_t node1 = -1; if (node0 == start0) return false; + bool cross_path_starts = false; if (HasNeighbors()) { - const int64_t neighbor = GetNeighborForBaseNode(0); + const auto [neighbor, outgoing] = GetNeighborForBaseNode(0); + if (neighbor < 0) return false; + cross_path_starts = outgoing; DCHECK(!IsPathStart(neighbor)); if (IsInactive(neighbor)) return false; start1 = CurrentNodePathStart(neighbor); @@ -1153,20 +1229,16 @@ bool Cross::MakeNeighbor() { // If we are crossing path ends, node0 is the start of a chain and neighbor // is the last node before the other chain starting at node1, therefore // node1 = next(neighbor). - // TODO(user): When neighbors are considered, explore if having two - // versions of Cross makes sense, one exchanging path starts, the other - // path ends. Rationale: neighborhoods might not be symmetric. In practice, - // in particular when used through RoutingModel, neighborhoods are - // actually symmetric. - node1 = (start0 < start1) ? Prev(neighbor) : Next(neighbor); + node1 = cross_path_starts ? Prev(neighbor) : Next(neighbor); } else { start1 = StartNode(1); node1 = BaseNode(1); + cross_path_starts = start0 < start1; } if (start1 == start0 || node1 == start1) return false; bool moved = false; - if (start0 < start1) { + if (cross_path_starts) { // Cross path starts. // If two paths are equivalent don't exchange the full paths. if (PathClassFromStartNode(start0) == PathClassFromStartNode(start1) && @@ -1174,11 +1246,10 @@ bool Cross::MakeNeighbor() { IsPathEnd(Next(node1))) { return false; } - const int first1 = Next(start1); if (!IsPathEnd(node0)) moved |= MoveChain(start0, node0, start1); if (!IsPathEnd(node1)) moved |= MoveChain(Prev(first1), node1, start0); - } else { // start1 > start0. + } else { // Cross path ends. // If paths are equivalent, every end crossing has a corresponding start // crossing, we don't generate those symmetric neighbors. @@ -1213,10 +1284,12 @@ class BaseInactiveNodeToPathOperator : public PathOperator { const std::vector& vars, const std::vector& secondary_vars, int number_of_base_nodes, std::function start_empty_path_class, - std::function&(int, int)> get_neighbors = nullptr) + NeighborAccessor get_incoming_neighbors = nullptr, + NeighborAccessor get_outgoing_neighbors = nullptr) : PathOperator(vars, secondary_vars, number_of_base_nodes, false, false, std::move(start_empty_path_class), - std::move(get_neighbors)), + std::move(get_incoming_neighbors), + std::move(get_outgoing_neighbors)), inactive_node_(0) { // TODO(user): Activate skipping optimal paths. } @@ -1265,14 +1338,15 @@ bool BaseInactiveNodeToPathOperator::MakeOneNeighbor() { class MakeActiveOperator : public BaseInactiveNodeToPathOperator { public: - MakeActiveOperator( - const std::vector& vars, - const std::vector& secondary_vars, - std::function start_empty_path_class, - std::function&(int, int)> get_neighbors = nullptr) + MakeActiveOperator(const std::vector& vars, + const std::vector& secondary_vars, + std::function start_empty_path_class, + NeighborAccessor get_incoming_neighbors = nullptr, + NeighborAccessor get_outgoing_neighbors = nullptr) : BaseInactiveNodeToPathOperator(vars, secondary_vars, 1, std::move(start_empty_path_class), - std::move(get_neighbors)) {} + std::move(get_incoming_neighbors), + std::move(get_outgoing_neighbors)) {} ~MakeActiveOperator() override {} bool MakeNeighbor() override; @@ -1360,7 +1434,7 @@ class MakeInactiveOperator : public PathOperator { const std::vector& secondary_vars, std::function start_empty_path_class) : PathOperator(vars, secondary_vars, 1, true, false, - std::move(start_empty_path_class), nullptr) {} + std::move(start_empty_path_class), nullptr, nullptr) {} ~MakeInactiveOperator() override {} bool MakeNeighbor() override { const int64_t base = BaseNode(0); @@ -1385,7 +1459,7 @@ class RelocateAndMakeInactiveOperator : public PathOperator { const std::vector& secondary_vars, std::function start_empty_path_class) : PathOperator(vars, secondary_vars, 2, true, false, - std::move(start_empty_path_class), nullptr) {} + std::move(start_empty_path_class), nullptr, nullptr) {} ~RelocateAndMakeInactiveOperator() override {} bool MakeNeighbor() override { const int64_t destination = BaseNode(1); @@ -1421,7 +1495,7 @@ class MakeChainInactiveOperator : public PathOperator { : PathOperator(vars, secondary_vars, 2, /*skip_locally_optimal_paths=*/true, /*accept_path_end_base=*/false, - std::move(start_empty_path_class), nullptr) {} + std::move(start_empty_path_class), nullptr, nullptr) {} ~MakeChainInactiveOperator() override {} bool MakeNeighbor() override { const int64_t chain_end = BaseNode(1); @@ -1636,7 +1710,8 @@ class TSPOpt : public PathOperator { TSPOpt::TSPOpt(const std::vector& vars, const std::vector& secondary_vars, Solver::IndexEvaluator3 evaluator, int chain_length) - : PathOperator(vars, secondary_vars, 1, true, false, nullptr, nullptr), + : PathOperator(vars, secondary_vars, 1, true, false, nullptr, nullptr, + nullptr), hamiltonian_path_solver_(cost_), evaluator_(std::move(evaluator)), chain_length_(chain_length) {} @@ -1714,7 +1789,8 @@ class TSPLns : public PathOperator { TSPLns::TSPLns(const std::vector& vars, const std::vector& secondary_vars, Solver::IndexEvaluator3 evaluator, int tsp_size) - : PathOperator(vars, secondary_vars, 1, true, false, nullptr, nullptr), + : PathOperator(vars, secondary_vars, 1, true, false, nullptr, nullptr, + nullptr), hamiltonian_path_solver_(cost_), evaluator_(std::move(evaluator)), tsp_size_(tsp_size), @@ -1921,7 +1997,8 @@ class LinKernighan : public PathOperator { LinKernighan::LinKernighan(const std::vector& vars, const std::vector& secondary_vars, const Solver::IndexEvaluator3& evaluator, bool topt) - : PathOperator(vars, secondary_vars, 1, true, false, nullptr, nullptr), + : PathOperator(vars, secondary_vars, 1, true, false, nullptr, nullptr, + nullptr), evaluator_(evaluator), neighbors_(evaluator, *this, kNeighbors), topt_(topt) { @@ -2064,7 +2141,7 @@ class PathLns : public PathOperator { const std::vector& secondary_vars, int number_of_chunks, int chunk_size, bool unactive_fragments) : PathOperator(vars, secondary_vars, number_of_chunks, true, true, - nullptr, nullptr), + nullptr, nullptr, nullptr), number_of_chunks_(number_of_chunks), chunk_size_(chunk_size), unactive_fragments_(unactive_fragments) { @@ -2574,10 +2651,11 @@ LocalSearchOperator* MakeLocalSearchOperatorWithNeighbors( Solver* solver, const std::vector& vars, const std::vector& secondary_vars, std::function start_empty_path_class, - std::function&(int, int)> get_neighbors) { - return solver->RevAlloc(new T(vars, secondary_vars, - std::move(start_empty_path_class), - std::move(get_neighbors))); + NeighborAccessor get_incoming_neighbors, + NeighborAccessor get_outgoing_neighbors) { + return solver->RevAlloc(new T( + vars, secondary_vars, std::move(start_empty_path_class), + std::move(get_incoming_neighbors), std::move(get_outgoing_neighbors))); } #define MAKE_LOCAL_SEARCH_OPERATOR(OperatorClass) \ @@ -2607,10 +2685,12 @@ LocalSearchOperator* MakeLocalSearchOperatorWithNeighbors( Solver * solver, const std::vector& vars, \ const std::vector& secondary_vars, \ std::function start_empty_path_class, \ - std::function&(int, int)> get_neighbors) { \ + NeighborAccessor get_incoming_neighbors, \ + NeighborAccessor get_outgoing_neighbors) { \ return solver->RevAlloc(new OperatorClass( \ vars, secondary_vars, std::move(start_empty_path_class), \ - std::move(get_neighbors))); \ + std::move(get_incoming_neighbors), \ + std::move(get_outgoing_neighbors))); \ } MAKE_LOCAL_SEARCH_OPERATOR(TwoOpt) @@ -2638,19 +2718,23 @@ MAKE_LOCAL_SEARCH_OPERATOR(RelocateAndMakeInactiveOperator) // MakeLocalSearchOperator functions. LocalSearchOperator* Solver::MakeOperator( const std::vector& vars, Solver::LocalSearchOperators op, - std::function&(int, int)> get_neighbors) { + NeighborAccessor get_incoming_neighbors, + NeighborAccessor get_outgoing_neighbors) { return MakeOperator(vars, std::vector(), op, - std::move(get_neighbors)); + std::move(get_incoming_neighbors), + std::move(get_outgoing_neighbors)); } LocalSearchOperator* Solver::MakeOperator( const std::vector& vars, const std::vector& secondary_vars, Solver::LocalSearchOperators op, - std::function&(int, int)> get_neighbors) { + NeighborAccessor get_incoming_neighbors, + NeighborAccessor get_outgoing_neighbors) { switch (op) { case Solver::TWOOPT: { return MakeLocalSearchOperatorWithNeighbors( - this, vars, secondary_vars, nullptr, std::move(get_neighbors)); + this, vars, secondary_vars, nullptr, + std::move(get_incoming_neighbors), std::move(get_outgoing_neighbors)); } case Solver::OROPT: { std::vector operators; @@ -2659,22 +2743,26 @@ LocalSearchOperator* Solver::MakeOperator( RevAlloc(new Relocate(vars, secondary_vars, /*name=*/absl::StrCat("OrOpt<", i, ">"), /*start_empty_path_class=*/nullptr, - /*get_neighbors=*/nullptr, /*chain_length=*/i, - /*single_path=*/true))); + /*get_incoming_neighbors=*/nullptr, + /*get_outgoing_neighbors=*/nullptr, + /*chain_length=*/i, /*single_path=*/true))); } return ConcatenateOperators(operators); } case Solver::RELOCATE: { return MakeLocalSearchOperatorWithNeighbors( - this, vars, secondary_vars, nullptr, std::move(get_neighbors)); + this, vars, secondary_vars, nullptr, + std::move(get_incoming_neighbors), std::move(get_outgoing_neighbors)); } case Solver::EXCHANGE: { return MakeLocalSearchOperatorWithNeighbors( - this, vars, secondary_vars, nullptr, std::move(get_neighbors)); + this, vars, secondary_vars, nullptr, + std::move(get_incoming_neighbors), std::move(get_outgoing_neighbors)); } case Solver::CROSS: { return MakeLocalSearchOperatorWithNeighbors( - this, vars, secondary_vars, nullptr, std::move(get_neighbors)); + this, vars, secondary_vars, nullptr, + std::move(get_incoming_neighbors), std::move(get_outgoing_neighbors)); } case Solver::MAKEACTIVE: { return MakeLocalSearchOperator( diff --git a/ortools/routing/decision_builders.cc b/ortools/routing/decision_builders.cc index a8ea4f2f6c..56cbfe2ddd 100644 --- a/ortools/routing/decision_builders.cc +++ b/ortools/routing/decision_builders.cc @@ -677,6 +677,7 @@ class RestoreDimensionValuesForUnchangedRoutes : public DecisionBuilder { explicit RestoreDimensionValuesForUnchangedRoutes(RoutingModel* model) : model_(model) { model_->AddAtSolutionCallback([this]() { AtSolution(); }); + model_->AddRestoreDimensionValuesResetCallback([this]() { Reset(); }); next_last_value_.resize(model_->Nexts().size(), -1); } @@ -688,6 +689,8 @@ class RestoreDimensionValuesForUnchangedRoutes : public DecisionBuilder { return MakeDecision(s); } + void Reset() { next_last_value_.assign(model_->Nexts().size(), -1); } + private: // Initialize() is lazy to make sure all dimensions have been instantiated // when initialization is done. diff --git a/ortools/routing/filters.cc b/ortools/routing/filters.cc index 6382937902..e9cc17ffac 100644 --- a/ortools/routing/filters.cc +++ b/ortools/routing/filters.cc @@ -41,7 +41,6 @@ #include "absl/types/span.h" #include "ortools/base/logging.h" #include "ortools/base/map_util.h" -#include "ortools/base/small_map.h" #include "ortools/base/strong_vector.h" #include "ortools/base/types.h" #include "ortools/constraint_solver/constraint_solver.h" @@ -81,7 +80,7 @@ class RouteConstraintFilter : public BasePathFilter { vehicle_to_start_[v] = start; } } - ~RouteConstraintFilter() override {} + ~RouteConstraintFilter() override = default; std::string DebugString() const override { return "RouteConstraintFilter"; } int64_t GetSynchronizedObjectiveValue() const override { return current_vehicle_cost_; @@ -691,7 +690,7 @@ namespace { class VehicleAmortizedCostFilter : public BasePathFilter { public: explicit VehicleAmortizedCostFilter(const RoutingModel& routing_model); - ~VehicleAmortizedCostFilter() override {} + ~VehicleAmortizedCostFilter() override = default; std::string DebugString() const override { return "VehicleAmortizedCostFilter"; } @@ -848,7 +847,7 @@ namespace { class TypeRegulationsFilter : public BasePathFilter { public: explicit TypeRegulationsFilter(const RoutingModel& model); - ~TypeRegulationsFilter() override {} + ~TypeRegulationsFilter() override = default; std::string DebugString() const override { return "TypeRegulationsFilter"; } private: @@ -1006,7 +1005,7 @@ class ChainCumulFilter : public BasePathFilter { public: ChainCumulFilter(const RoutingModel& routing_model, const RoutingDimension& dimension); - ~ChainCumulFilter() override {} + ~ChainCumulFilter() override = default; std::string DebugString() const override { return "ChainCumulFilter(" + name_ + ")"; } @@ -1119,7 +1118,7 @@ class PathCumulFilter : public BasePathFilter { const RoutingDimension& dimension, bool propagate_own_objective_value, bool filter_objective_cost, bool may_use_optimizers); - ~PathCumulFilter() override {} + ~PathCumulFilter() override = default; std::string DebugString() const override { return "PathCumulFilter(" + name_ + ")"; } @@ -1154,10 +1153,15 @@ class PathCumulFilter : public BasePathFilter { int64_t bound = -1; int64_t coefficient = 0; }; - struct Interval { + struct InitialInterval { int64_t min; int64_t max; }; + + // Data extractors used in constructor. + + std::vector ExtractInitialCumulIntervals(); + std::vector ExtractInitialSlackIntervals(); std::vector> ExtractNodeIndexToPrecedences() const; std::vector ExtractCumulSoftUpperBounds() const; @@ -1351,9 +1355,9 @@ class PathCumulFilter : public BasePathFilter { const RoutingModel& routing_model_; const RoutingDimension& dimension_; - const std::vector cumuls_; - const std::vector slacks_; std::vector start_to_vehicle_; + const std::vector initial_cumul_; + const std::vector initial_slack_; const std::vector evaluators_; const std::vector vehicle_span_upper_bounds_; const bool has_vehicle_span_upper_bounds_; @@ -1419,6 +1423,26 @@ std::vector SumOfVectors(const std::vector& v1, } } // namespace +std::vector +PathCumulFilter::ExtractInitialCumulIntervals() { + std::vector intervals; + intervals.reserve(dimension_.cumuls().size()); + for (const IntVar* cumul : dimension_.cumuls()) { + intervals.push_back({cumul->Min(), cumul->Max()}); + } + return intervals; +} + +std::vector +PathCumulFilter::ExtractInitialSlackIntervals() { + std::vector intervals; + intervals.reserve(dimension_.slacks().size()); + for (const IntVar* slack : dimension_.slacks()) { + intervals.push_back({slack->Min(), slack->Max()}); + } + return intervals; +} + std::vector PathCumulFilter::ExtractCumulSoftUpperBounds() const { const int num_cumuls = dimension_.cumuls().size(); @@ -1506,8 +1530,8 @@ PathCumulFilter::PathCumulFilter(const RoutingModel& routing_model, routing_model.GetPathsMetadata()), routing_model_(routing_model), dimension_(dimension), - cumuls_(dimension.cumuls()), - slacks_(dimension.slacks()), + initial_cumul_(ExtractInitialCumulIntervals()), + initial_slack_(ExtractInitialSlackIntervals()), evaluators_(ExtractEvaluators()), vehicle_span_upper_bounds_(dimension.vehicle_span_upper_bounds()), has_vehicle_span_upper_bounds_(absl::c_any_of( @@ -1544,17 +1568,16 @@ PathCumulFilter::PathCumulFilter(const RoutingModel& routing_model, may_use_optimizers_(may_use_optimizers), propagate_own_objective_value_(propagate_own_objective_value) { bool has_cumul_hard_bounds = false; - for (const IntVar* const slack : slacks_) { - if (slack->Min() > 0) { + for (const InitialInterval& slack : initial_slack_) { + if (slack.min > 0) { has_cumul_hard_bounds = true; break; } } - for (int i = 0; i < cumuls_.size(); ++i) { - IntVar* const cumul_var = cumuls_[i]; - if (cumul_var->Min() > 0 || - cumul_var->Max() < std::numeric_limits::max()) { + for (const InitialInterval& cumul : initial_cumul_) { + if (cumul.min > 0 || cumul.max < kint64max) { has_cumul_hard_bounds = true; + break; } } if (!has_cumul_hard_bounds) { @@ -1573,7 +1596,7 @@ PathCumulFilter::PathCumulFilter(const RoutingModel& routing_model, const std::vector& node_precedences = dimension.GetNodePrecedences(); if (!node_precedences.empty()) { - current_min_max_node_cumuls_.resize(cumuls_.size(), {-1, -1}); + current_min_max_node_cumuls_.resize(initial_cumul_.size(), {-1, -1}); } #ifndef NDEBUG @@ -1624,12 +1647,12 @@ int64_t PathCumulFilter::GetCumulSoftLowerBoundCost(int64_t node, int64_t PathCumulFilter::GetPathCumulSoftLowerBoundCost( const PathTransits& path_transits, int path) const { int64_t node = path_transits.Node(path, path_transits.PathSize(path) - 1); - int64_t cumul = cumuls_[node]->Max(); + int64_t cumul = initial_cumul_[node].max; int64_t current_cumul_cost_value = GetCumulSoftLowerBoundCost(node, cumul); for (int i = path_transits.PathSize(path) - 2; i >= 0; --i) { node = path_transits.Node(path, i); cumul = CapSub(cumul, path_transits.Transit(path, i)); - cumul = std::min(cumuls_[node]->Max(), cumul); + cumul = std::min(initial_cumul_[node].max, cumul); CapAddTo(GetCumulSoftLowerBoundCost(node, cumul), ¤t_cumul_cost_value); } @@ -1668,7 +1691,7 @@ void PathCumulFilter::OnBeforeSynchronizePaths() { current_path_transits_.ReserveTransits(r, number_of_route_arcs); // Second pass: update cumul, transit and cost values. node = Start(r); - int64_t cumul = cumuls_[node]->Min(); + int64_t cumul = initial_cumul_[node].min; min_path_cumuls_.clear(); min_path_cumuls_.push_back(cumul); @@ -1681,12 +1704,12 @@ void PathCumulFilter::OnBeforeSynchronizePaths() { const int64_t next = Value(node); const int64_t transit = (*evaluators_[vehicle])(node, next); CapAddTo(transit, &total_transit); - const int64_t transit_slack = CapAdd(transit, slacks_[node]->Min()); + const int64_t transit_slack = CapAdd(transit, initial_slack_[node].min); current_path_transits_.PushTransit(r, node, next, transit_slack); CapAddTo(transit_slack, &cumul); cumul = dimension_.GetFirstPossibleGreaterOrEqualValueForNode(next, cumul); - cumul = std::max(cumuls_[next]->Min(), cumul); + cumul = std::max(initial_cumul_[next].min, cumul); min_path_cumuls_.push_back(cumul); node = next; CapAddTo(GetCumulSoftCost(node, cumul), ¤t_cumul_cost_value); @@ -1816,7 +1839,7 @@ void PathCumulFilter::OnBeforeSynchronizePaths() { bool PathCumulFilter::AcceptPath(int64_t path_start, int64_t /*chain_start*/, int64_t /*chain_end*/) { int64_t node = path_start; - int64_t cumul = cumuls_[node]->Min(); + int64_t cumul = initial_cumul_[node].min; int64_t cumul_cost_delta = 0; int64_t total_transit = 0; const int path = delta_path_transits_.AddPaths(1); @@ -1847,14 +1870,14 @@ bool PathCumulFilter::AcceptPath(int64_t path_start, int64_t /*chain_start*/, const int64_t next = GetNext(node); const int64_t transit = (*evaluators_[vehicle])(node, next); CapAddTo(transit, &total_transit); - const int64_t transit_slack = CapAdd(transit, slacks_[node]->Min()); + const int64_t transit_slack = CapAdd(transit, initial_slack_[node].min); delta_path_transits_.PushTransit(path, node, next, transit_slack); CapAddTo(transit_slack, &cumul); cumul = dimension_.GetFirstPossibleGreaterOrEqualValueForNode(next, cumul); - if (cumul > std::min(capacity, cumuls_[next]->Max())) { + if (cumul > std::min(capacity, initial_cumul_[next].max)) { return false; } - cumul = std::max(cumuls_[next]->Min(), cumul); + cumul = std::max(initial_cumul_[next].min, cumul); min_path_cumuls_.push_back(cumul); node = next; if (filter_vehicle_costs) { @@ -1900,7 +1923,7 @@ bool PathCumulFilter::AcceptPath(int64_t path_start, int64_t /*chain_start*/, // [max_start, min_end[ during which the route will have to happen, // then the duration of break that must happen during this interval. int64_t min_total_break = 0; - int64_t max_path_end = cumuls_[routing_model_.End(vehicle)]->Max(); + int64_t max_path_end = initial_cumul_[routing_model_.End(vehicle)].max; const int64_t max_start = ComputePathMaxStartFromEndCumul( delta_path_transits_, path, path_start, max_path_end); for (const IntervalVar* br : @@ -2169,7 +2192,7 @@ bool PathCumulFilter::PickupToDeliveryLimitsRespected( for (int i = path_transits.PathSize(path) - 2; i >= 0; i--) { const int node_index = path_transits.Node(path, i); max_cumul = CapSub(max_cumul, path_transits.Transit(path, i)); - max_cumul = std::min(cumuls_[node_index]->Max(), max_cumul); + max_cumul = std::min(initial_cumul_[node_index].max, max_cumul); using PDPosition = RoutingModel::PickupDeliveryPosition; if (routing_model_.IsPickup(node_index)) { @@ -2215,13 +2238,14 @@ void PathCumulFilter::StoreMinMaxCumulOfNodesOnPath( const int path_size = path_transits.PathSize(path); DCHECK_EQ(min_path_cumuls.size(), path_size); - int64_t max_cumul = cumuls_[path_transits.Node(path, path_size - 1)]->Max(); + int64_t max_cumul = + initial_cumul_[path_transits.Node(path, path_size - 1)].max; for (int i = path_size - 1; i >= 0; i--) { const int node_index = path_transits.Node(path, i); if (i < path_size - 1) { max_cumul = CapSub(max_cumul, path_transits.Transit(path, i)); - max_cumul = std::min(cumuls_[node_index]->Max(), max_cumul); + max_cumul = std::min(initial_cumul_[node_index].max, max_cumul); } if (is_delta && node_index_to_precedences_[node_index].empty()) { @@ -2249,12 +2273,12 @@ int64_t PathCumulFilter::ComputePathMaxStartFromEndCumul( int64_t min_end_cumul) const { int64_t cumul_from_min_end = min_end_cumul; int64_t cumul_from_max_end = - cumuls_[routing_model_.End(start_to_vehicle_[path_start])]->Max(); + initial_cumul_[routing_model_.End(start_to_vehicle_[path_start])].max; for (int i = path_transits.PathSize(path) - 2; i >= 0; --i) { const int64_t transit = path_transits.Transit(path, i); const int64_t node = path_transits.Node(path, i); cumul_from_min_end = - std::min(cumuls_[node]->Max(), CapSub(cumul_from_min_end, transit)); + std::min(initial_cumul_[node].max, CapSub(cumul_from_min_end, transit)); cumul_from_max_end = dimension_.GetLastPossibleLessOrEqualValueForNode( node, CapSub(cumul_from_max_end, transit)); } @@ -2502,7 +2526,7 @@ class PickupDeliveryFilter : public BasePathFilter { const std::vector& pairs, const std::vector& vehicle_policies); - ~PickupDeliveryFilter() override {} + ~PickupDeliveryFilter() override = default; bool AcceptPath(int64_t path_start, int64_t chain_start, int64_t chain_end) override; std::string DebugString() const override { return "PickupDeliveryFilter"; } @@ -2695,7 +2719,7 @@ namespace { class VehicleVarFilter : public BasePathFilter { public: explicit VehicleVarFilter(const RoutingModel& routing_model); - ~VehicleVarFilter() override {} + ~VehicleVarFilter() override = default; bool AcceptPath(int64_t path_start, int64_t chain_start, int64_t chain_end) override; std::string DebugString() const override { return "VehicleVariableFilter"; } @@ -3414,7 +3438,7 @@ namespace { class CPFeasibilityFilter : public IntVarLocalSearchFilter { public: explicit CPFeasibilityFilter(RoutingModel* routing_model); - ~CPFeasibilityFilter() override {} + ~CPFeasibilityFilter() override = default; std::string DebugString() const override { return "CPFeasibilityFilter"; } bool Accept(const Assignment* delta, const Assignment* deltadelta, int64_t objective_min, int64_t objective_max) override; diff --git a/ortools/routing/filters.h b/ortools/routing/filters.h index 2a7bd2800b..34c172911a 100644 --- a/ortools/routing/filters.h +++ b/ortools/routing/filters.h @@ -14,6 +14,7 @@ #ifndef OR_TOOLS_ROUTING_FILTERS_H_ #define OR_TOOLS_ROUTING_FILTERS_H_ +#include #include #include #include @@ -34,6 +35,7 @@ #include "ortools/routing/types.h" #include "ortools/util/bitset.h" #include "ortools/util/range_minimum_query.h" +#include "ortools/util/saturated_arithmetic.h" namespace operations_research::routing { @@ -181,17 +183,70 @@ class DimensionValues { DimensionValues(int num_paths, int num_nodes) : range_of_path_(num_paths, {.begin = 0, .end = 0}), committed_range_of_path_(num_paths, {.begin = 0, .end = 0}), - span_min_(num_paths, 0), - span_max_(num_paths, kint64max), + span_(num_paths, {kint64min, kint64max}), changed_paths_(num_paths), max_num_committed_elements_(16 * num_nodes) { nodes_.reserve(max_num_committed_elements_); transit_.reserve(max_num_committed_elements_); - transit_sum_.reserve(max_num_committed_elements_); - cumul_min_.reserve(max_num_committed_elements_); - cumul_max_.reserve(max_num_committed_elements_); + travel_.reserve(max_num_committed_elements_); + travel_sum_.reserve(max_num_committed_elements_); + cumul_.reserve(max_num_committed_elements_); } + struct Interval { + int64_t min; + int64_t max; + // Tests inequality between intervals. + bool operator!=(const Interval& other) const { + return min != other.min || max != other.max; + } + // Tests equality between intervals. + bool operator==(const Interval& other) const { + return min == other.min && max == other.max; + } + // Returns true iff the interval is empty. + bool IsEmpty() const { return min > max; } + // Increases the min to be at least lower_bound, + // returns true iff the interval is nonempty. + bool IncreaseMin(int64_t lower_bound) { + min = std::max(min, lower_bound); + return min <= max; + } + // Decreases the max to be at most upper_bound, + // returns true iff the interval is nonempty. + bool DecreaseMax(int64_t upper_bound) { + max = std::min(max, upper_bound); + return min <= max; + } + // Intersects this interval with the other, returns true iff the interval + // is nonempty. + bool IntersectWith(const Interval& other) { + min = std::max(min, other.min); + max = std::min(max, other.max); + return min <= max; + } + // A set addition, with intervals: adds other.min to the min, other.max to + // the max, with CapAdd(). + void Add(const Interval& other) { + DCHECK(!IsEmpty()); + DCHECK(!other.IsEmpty()); + min = CapAdd(min, other.min); + max = CapAdd(max, other.max); + } + // A set subtraction, with intervals: subtracts other.max from the min, + // other.min from the max, with CapSub(). + void Subtract(const Interval& other) { + DCHECK(!IsEmpty()); + DCHECK(!other.IsEmpty()); + min = CapSub(min, other.max); + max = CapSub(max, other.min); + } + // Returns an interval containing all integers: {kint64min, kint64max}. + static Interval AllIntegers() { + return {.min = kint64min, .max = kint64max}; + } + }; + // Adds a node to new nodes. void PushNode(int node) { nodes_.push_back(node); } @@ -205,13 +260,12 @@ class DimensionValues { changed_paths_.Set(path); // Allocate dimension values. We allocate n cells for all dimension values, // even transits, so they can all be indexed by the same range_of_path. - transit_.resize(nodes_.size(), 0); - transit_sum_.resize(nodes_.size(), 0); - cumul_min_.resize(nodes_.size(), kint64min); - cumul_max_.resize(nodes_.size(), kint64max); + transit_.resize(nodes_.size(), Interval::AllIntegers()); + travel_.resize(nodes_.size(), 0); + travel_sum_.resize(nodes_.size(), 0); + cumul_.resize(nodes_.size(), Interval::AllIntegers()); num_current_elements_ = nodes_.size(); - span_min_.Set(path, 0); - span_max_.Set(path, kint64max); + span_.Set(path, Interval::AllIntegers()); } // Resets all path to empty, in both committed and current state. @@ -224,13 +278,11 @@ class DimensionValues { num_committed_elements_ = 0; nodes_.clear(); transit_.clear(); - transit_sum_.clear(); - cumul_min_.clear(); - cumul_max_.clear(); - span_min_.Revert(); - span_min_.SetAllAndCommit(0); - span_max_.Revert(); - span_max_.SetAllAndCommit(kint64max); + travel_.clear(); + travel_sum_.clear(); + cumul_.clear(); + span_.Revert(); + span_.SetAllAndCommit(Interval::AllIntegers()); } // Clears the changed state, make it point to the committed state. @@ -242,11 +294,10 @@ class DimensionValues { num_current_elements_ = num_committed_elements_; nodes_.resize(num_current_elements_); transit_.resize(num_current_elements_); - transit_sum_.resize(num_current_elements_); - cumul_min_.resize(num_current_elements_); - cumul_max_.resize(num_current_elements_); - span_min_.Revert(); - span_max_.Revert(); + travel_.resize(num_current_elements_); + travel_sum_.resize(num_current_elements_); + cumul_.resize(num_current_elements_); + span_.Revert(); } // Makes the committed state point to the current state. @@ -258,17 +309,16 @@ class DimensionValues { } changed_paths_.SparseClearAll(); num_committed_elements_ = num_current_elements_; - span_min_.Commit(); - span_max_.Commit(); + span_.Commit(); // If the committed data would take too much space, compact the data: // copy committed data to the end of vectors, erase old data, refresh // indexing (range_of_path_). if (num_current_elements_ <= max_num_committed_elements_) return; temp_nodes_.clear(); temp_transit_.clear(); - temp_transit_sum_.clear(); - temp_cumul_min_.clear(); - temp_cumul_max_.clear(); + temp_travel_.clear(); + temp_travel_sum_.clear(); + temp_cumul_.clear(); for (int path = 0; path < range_of_path_.size(); ++path) { if (committed_range_of_path_[path].Size() == 0) continue; const size_t new_begin = temp_nodes_.size(); @@ -277,21 +327,21 @@ class DimensionValues { nodes_.begin() + end); temp_transit_.insert(temp_transit_.end(), transit_.begin() + begin, transit_.begin() + end); - temp_transit_sum_.insert(temp_transit_sum_.end(), - transit_sum_.begin() + begin, - transit_sum_.begin() + end); - temp_cumul_min_.insert(temp_cumul_min_.end(), cumul_min_.begin() + begin, - cumul_min_.begin() + end); - temp_cumul_max_.insert(temp_cumul_max_.end(), cumul_max_.begin() + begin, - cumul_max_.begin() + end); + temp_travel_.insert(temp_travel_.end(), travel_.begin() + begin, + travel_.begin() + end); + temp_travel_sum_.insert(temp_travel_sum_.end(), + travel_sum_.begin() + begin, + travel_sum_.begin() + end); + temp_cumul_.insert(temp_cumul_.end(), cumul_.begin() + begin, + cumul_.begin() + end); committed_range_of_path_[path] = {.begin = new_begin, .end = temp_nodes_.size()}; } std::swap(nodes_, temp_nodes_); std::swap(transit_, temp_transit_); - std::swap(transit_sum_, temp_transit_sum_); - std::swap(cumul_min_, temp_cumul_min_); - std::swap(cumul_max_, temp_cumul_max_); + std::swap(travel_, temp_travel_); + std::swap(travel_sum_, temp_travel_sum_); + std::swap(cumul_, temp_cumul_); range_of_path_ = committed_range_of_path_; num_committed_elements_ = nodes_.size(); num_current_elements_ = nodes_.size(); @@ -310,7 +360,7 @@ class DimensionValues { } // Returns a const view of the transits of the path, in the current state. - absl::Span Transits(int path) const { + absl::Span Transits(int path) const { auto [begin, end] = range_of_path_[path]; // When the path is not empty, #transits = #nodes - 1. // When the path is empty, begin = end, return empty span. @@ -319,7 +369,7 @@ class DimensionValues { } // Returns a mutable view of the transits of the path, in the current state. - absl::Span MutableTransits(int path) { + absl::Span MutableTransits(int path) { auto [begin, end] = range_of_path_[path]; // When the path is not empty, #transits = #nodes - 1. // When the path is empty, begin = end, return empty span. @@ -327,56 +377,51 @@ class DimensionValues { return absl::MakeSpan(transit_.data() + begin, transit_.data() + end); } - // Returns a const view of the transits sums of the path, in the current + // Returns a const view of the travels of the path, in the current // state. - absl::Span TransitSums(int path) const { - const auto [begin, end] = range_of_path_[path]; - return absl::MakeConstSpan(transit_sum_.data() + begin, - transit_sum_.data() + end); + absl::Span Travels(int path) const { + auto [begin, end] = range_of_path_[path]; + if (begin < end) --end; + return absl::MakeConstSpan(travel_.data() + begin, travel_.data() + end); } - // Returns a mutable view of the transits sums of the path, in the current + // Returns a mutable view of the travels of the path, in the current // state. - absl::Span MutableTransitSums(int path) { + absl::Span MutableTravels(int path) { + auto [begin, end] = range_of_path_[path]; + if (begin < end) --end; + return absl::MakeSpan(travel_.data() + begin, travel_.data() + end); + } + + // Returns a const view of the travel sums of the path, in the current state. + absl::Span TravelSums(int path) const { const auto [begin, end] = range_of_path_[path]; - return absl::MakeSpan(transit_sum_.data() + begin, - transit_sum_.data() + end); + return absl::MakeConstSpan(travel_sum_.data() + begin, + travel_sum_.data() + end); + } + + // Returns a mutable view of the travel sums of the path in the current state. + absl::Span MutableTravelSums(int path) { + const auto [begin, end] = range_of_path_[path]; + return absl::MakeSpan(travel_sum_.data() + begin, travel_sum_.data() + end); } // Returns a const view of the cumul mins of the path, in the current state. - absl::Span CumulMins(int path) const { + absl::Span Cumuls(int path) const { const auto [begin, end] = range_of_path_[path]; - return absl::MakeConstSpan(cumul_min_.data() + begin, - cumul_min_.data() + end); + return absl::MakeConstSpan(cumul_.data() + begin, cumul_.data() + end); } // Returns a mutable view of the cumul mins of the path, in the current state. - absl::Span MutableCumulMins(int path) { + absl::Span MutableCumuls(int path) { const auto [begin, end] = range_of_path_[path]; - return absl::MakeSpan(cumul_min_.data() + begin, cumul_min_.data() + end); + return absl::MakeSpan(cumul_.data() + begin, cumul_.data() + end); } - // Returns a const view of the cumul maxs of the path, in the current state. - absl::Span CumulMaxs(int path) const { - const auto [begin, end] = range_of_path_[path]; - return absl::MakeConstSpan(cumul_max_.data() + begin, - cumul_max_.data() + end); - } - - // Returns a mutable view of the cumul maxs of the path, in the current state. - absl::Span MutableCumulMaxs(int path) { - const auto [begin, end] = range_of_path_[path]; - return absl::MakeSpan(cumul_max_.data() + begin, cumul_max_.data() + end); - } - - // Returns the min span of the path, in the current state. - int64_t SpanMin(int path) const { return span_min_.Get(path); } - // Returns the max span of the path in the current state. - int64_t SpanMax(int path) const { return span_max_.Get(path); } - // Sets the min span of the path, in the current state. - void SetSpanMin(int path, int64_t value) { span_min_.Set(path, value); } - // Sets the max span of the path, in the current state. - void SetSpanMax(int path, int64_t value) { span_max_.Set(path, value); } + // Returns the span interval of the path, in the current state. + Interval Span(int path) const { return span_.Get(path); } + // Sets the span interval of the path, in the current state. + void SetSpan(int path, Interval interval) { span_.Set(path, interval); } // Returns the number of nodes of the path, in the current state. int NumNodes(int path) const { return range_of_path_[path].Size(); } @@ -390,17 +435,28 @@ class DimensionValues { private: // These vectors hold the data of both committed and current states. // The ranges below determine which indices are associated to each path and - // each state. + // each state. It is up to the user to maintain the following invariants: + // If range_of_path_[p] == {.begin = b, .end = e}, then, in the current + // state: + // - nodes_[i] for i in [b, e) are the nodes of the path p. + // - cumul_[r] + transit_[r] == cumul_[r+1] for r in [b, e-1). + // - travel_[r] <= transit_[r].min for r in [b, e-1). + // - travel_sum_[r] == sum_{r' in [0, r')} travel_[r'], for r in [b+1, e) + // - cumul[b] + span_[p] == cumul[e-1]. + // + // The same invariants should hold for committed_range_of_path_ and the + // committed state. std::vector nodes_; - std::vector transit_; - std::vector transit_sum_; - std::vector cumul_min_; - std::vector cumul_max_; + std::vector transit_; + std::vector travel_; + std::vector travel_sum_; + std::vector cumul_; + // Temporary vectors used in Commit() during compaction. std::vector temp_nodes_; - std::vector temp_transit_; - std::vector temp_transit_sum_; - std::vector temp_cumul_min_; - std::vector temp_cumul_max_; + std::vector temp_transit_; + std::vector temp_travel_; + std::vector temp_travel_sum_; + std::vector temp_cumul_; // A path has a range of indices in the committed state and another one in the // current state. struct Range { @@ -411,8 +467,7 @@ class DimensionValues { std::vector range_of_path_; std::vector committed_range_of_path_; // Associates span to each path. - CommittableVector span_min_; - CommittableVector span_max_; + CommittableVector span_; // Stores whether each path has been changed since last committed state. SparseBitset changed_paths_; // Threshold for the size of the committed vector. This is purely heuristic: @@ -538,7 +593,7 @@ class PathState { class NodeRange; struct ChainBounds { - ChainBounds() {} + ChainBounds() = default; ChainBounds(int begin_index, int end_index) : begin_index(begin_index), end_index(end_index) {} int begin_index; @@ -1074,7 +1129,7 @@ LocalSearchFilter* MakeLightVehicleBreaksFilter( // nodes, and does not store them. class WeightedWaveletTree { public: - WeightedWaveletTree() {} + WeightedWaveletTree() = default; // Clears all trees, which invalidates all further range queries on currently // existing trees. This does *not* release memory held by this object. @@ -1302,7 +1357,7 @@ class BasePathFilter : public IntVarLocalSearchFilter { public: BasePathFilter(const std::vector& nexts, int next_domain_size, const PathsMetadata& paths_metadata); - ~BasePathFilter() override {} + ~BasePathFilter() override = default; bool Accept(const Assignment* delta, const Assignment* deltadelta, int64_t objective_min, int64_t objective_max) override; void OnSynchronize(const Assignment* delta) override; diff --git a/ortools/routing/neighborhoods.cc b/ortools/routing/neighborhoods.cc index 89e7ad8de7..d9ca4586f3 100644 --- a/ortools/routing/neighborhoods.cc +++ b/ortools/routing/neighborhoods.cc @@ -29,18 +29,25 @@ #include "ortools/util/saturated_arithmetic.h" namespace operations_research::routing { +using NeighborAccessor = + std::function&(/*node=*/int, /*start_node=*/int)>; MakeRelocateNeighborsOperator::MakeRelocateNeighborsOperator( const std::vector& vars, const std::vector& secondary_vars, std::function start_empty_path_class, - std::function&(int, int)> get_neighbors, + NeighborAccessor get_incoming_neighbors, + NeighborAccessor get_outgoing_neighbors, RoutingTransitCallback2 arc_evaluator) - : PathOperator(vars, secondary_vars, - /*number_of_base_nodes=*/get_neighbors == nullptr ? 2 : 1, - /*skip_locally_optimal_paths=*/true, - /*accept_path_end_base=*/false, - std::move(start_empty_path_class), std::move(get_neighbors)), + : PathOperator( + vars, secondary_vars, + /*number_of_base_nodes=*/ + get_incoming_neighbors == nullptr && get_outgoing_neighbors == nullptr + ? 2 + : 1, + /*skip_locally_optimal_paths=*/true, + /*accept_path_end_base=*/false, std::move(start_empty_path_class), + std::move(get_incoming_neighbors), std::move(get_outgoing_neighbors)), arc_evaluator_(std::move(arc_evaluator)) {} bool MakeRelocateNeighborsOperator::MakeNeighbor() { @@ -65,9 +72,14 @@ bool MakeRelocateNeighborsOperator::MakeNeighbor() { return MoveChainAndRepair(before_chain, chain_end, destination); }; if (HasNeighbors()) { - const int64_t node = GetNeighborForBaseNode(0); - if (IsInactive(node)) return false; - return do_move(/*before_chain=*/Prev(node), + const auto [neighbor, outgoing] = GetNeighborForBaseNode(0); + if (neighbor < 0 || IsInactive(neighbor)) return false; + if (!outgoing) { + // TODO(user): Handle incoming neighbors by going backwards on the + // chain. + return false; + } + return do_move(/*before_chain=*/Prev(neighbor), /*destination=*/BaseNode(0)); } else { return do_move(/*before_chain=*/BaseNode(0), @@ -127,7 +139,7 @@ SwapActiveToShortestPathOperator::SwapActiveToShortestPathOperator( std::vector> alternative_sets, RoutingTransitCallback2 arc_evaluator) : PathOperator(vars, secondary_vars, 1, true, false, - std::move(start_empty_path_class), nullptr), + std::move(start_empty_path_class), nullptr, nullptr), arc_evaluator_(std::move(arc_evaluator)), alternative_sets_(std::move(alternative_sets)), to_alternative_set_(vars.size(), -1), @@ -243,7 +255,7 @@ MakePairActiveOperator::MakePairActiveOperator( std::function start_empty_path_class, const std::vector& pairs) : PathOperator(vars, secondary_vars, 2, false, true, - std::move(start_empty_path_class), nullptr), + std::move(start_empty_path_class), nullptr, nullptr), inactive_pair_(0), inactive_pair_first_index_(0), inactive_pair_second_index_(0), @@ -323,7 +335,7 @@ MakePairInactiveOperator::MakePairInactiveOperator( std::function start_empty_path_class, const std::vector& pairs) : PathOperator(vars, secondary_vars, 1, true, false, - std::move(start_empty_path_class), nullptr) { + std::move(start_empty_path_class), nullptr, nullptr) { AddPairAlternativeSets(pairs); } @@ -344,7 +356,12 @@ PairRelocateOperator::PairRelocateOperator( std::function start_empty_path_class, const std::vector& pairs) : PathOperator(vars, secondary_vars, 3, true, false, - std::move(start_empty_path_class), nullptr) { + std::move(start_empty_path_class), nullptr, nullptr) { + // TODO(user): Add a version where a (first_node, second_node) pair are + // added respectively after first_node_neighbor and second_node_neighbor. + // This requires a complete restructuring of the code, since we would require + // scanning neighbors for a non-base node (second_node is an active sibling + // of first_node). AddPairAlternativeSets(pairs); } @@ -410,15 +427,17 @@ int64_t PairRelocateOperator::GetBaseNodeRestartPosition(int base_index) { GroupPairAndRelocateOperator::GroupPairAndRelocateOperator( const std::vector& vars, const std::vector& secondary_vars, - std::function start_empty_path_class, - std::function&(int, int)> get_neighbors, + std::function start_empty_path_class, NeighborAccessor, + NeighborAccessor get_outgoing_neighbors, const std::vector& pairs) - : PathOperator(vars, secondary_vars, - /*number_of_base_nodes=*/get_neighbors == nullptr ? 2 : 1, - /*skip_locally_optimal_paths=*/true, - /*accept_path_end_base=*/false, - std::move(start_empty_path_class), - std::move(get_neighbors)) { + : PathOperator( + vars, secondary_vars, + /*number_of_base_nodes=*/ + get_outgoing_neighbors == nullptr ? 2 : 1, + /*skip_locally_optimal_paths=*/true, + /*accept_path_end_base=*/false, std::move(start_empty_path_class), + nullptr, // We don't use incoming neighbors for this operator. + std::move(get_outgoing_neighbors)) { AddPairAlternativeSets(pairs); } @@ -432,24 +451,30 @@ bool GroupPairAndRelocateOperator::MakeNeighbor() { const bool ok = MoveChain(Prev(node), node, destination); return MoveChain(Prev(sibling), sibling, node) || ok; }; - return HasNeighbors() - ? do_move(/*node=*/GetNeighborForBaseNode(0), - /*destination=*/BaseNode(0)) - : do_move(/*node=*/Next(BaseNode(0)), /*destination=*/BaseNode(1)); + if (HasNeighbors()) { + const auto [neighbor, outgoing] = GetNeighborForBaseNode(0); + if (neighbor < 0) return false; + DCHECK(outgoing); + return do_move(/*node=*/neighbor, /*destination=*/BaseNode(0)); + } + return do_move(/*node=*/Next(BaseNode(0)), /*destination=*/BaseNode(1)); } LightPairRelocateOperator::LightPairRelocateOperator( const std::vector& vars, const std::vector& secondary_vars, - std::function start_empty_path_class, - std::function&(int, int)> get_neighbors, + std::function start_empty_path_class, NeighborAccessor, + NeighborAccessor get_outgoing_neighbors, const std::vector& pairs, std::function force_lifo) : PathOperator(vars, secondary_vars, - /*number_of_base_nodes=*/get_neighbors == nullptr ? 2 : 1, + /*number_of_base_nodes=*/ + get_outgoing_neighbors == nullptr ? 2 : 1, /*skip_locally_optimal_paths=*/true, /*accept_path_end_base=*/false, - std::move(start_empty_path_class), std::move(get_neighbors)), + std::move(start_empty_path_class), + nullptr, // Incoming neighbors not used as of 09/2024. + std::move(get_outgoing_neighbors)), force_lifo_(std::move(force_lifo)) { AddPairAlternativeSets(pairs); } @@ -462,7 +487,7 @@ LightPairRelocateOperator::LightPairRelocateOperator( std::function force_lifo) : LightPairRelocateOperator(vars, secondary_vars, std::move(start_empty_path_class), nullptr, - pairs, std::move(force_lifo)) {} + nullptr, pairs, std::move(force_lifo)) {} bool LightPairRelocateOperator::MakeNeighbor() { const auto do_move = [this](int64_t node, int64_t destination, @@ -517,27 +542,36 @@ bool LightPairRelocateOperator::MakeNeighbor() { return MoveChain(Prev(sibling), sibling, Prev(destination_sibling)) || ok; } }; - // TODO(user): Add support for lifo for neighbor-based move. - return HasNeighbors() - ? do_move(/*node=*/GetNeighborForBaseNode(0), - /*destination=*/BaseNode(0), - /*destination_is_lifo=*/false) - : do_move(/*node=*/Next(BaseNode(0)), /*destination=*/BaseNode(1), - force_lifo_ != nullptr && force_lifo_(StartNode(1))); + if (HasNeighbors()) { + const auto [neighbor, outgoing] = GetNeighborForBaseNode(0); + if (neighbor < 0) return false; + // TODO(user): Add support for incoming neighbors. + DCHECK(outgoing); + // TODO(user): Add support for lifo for neighbor-based move. + return do_move(/*node=*/neighbor, /*destination=*/BaseNode(0), + /*destination_is_lifo=*/false); + } + return do_move(/*node=*/Next(BaseNode(0)), /*destination=*/BaseNode(1), + force_lifo_ != nullptr && force_lifo_(StartNode(1))); } PairExchangeOperator::PairExchangeOperator( const std::vector& vars, const std::vector& secondary_vars, std::function start_empty_path_class, - std::function&(int, int)> get_neighbors, + NeighborAccessor get_incoming_neighbors, + NeighborAccessor get_outgoing_neighbors, const std::vector& pairs) - : PathOperator(vars, secondary_vars, - /*number_of_base_nodes=*/get_neighbors == nullptr ? 2 : 1, - /*skip_locally_optimal_paths=*/true, - /*accept_path_end_base=*/true, - std::move(start_empty_path_class), - std::move(get_neighbors)) { + : PathOperator( + vars, secondary_vars, + /*number_of_base_nodes=*/ + get_incoming_neighbors == nullptr && get_outgoing_neighbors == nullptr + ? 2 + : 1, + /*skip_locally_optimal_paths=*/true, + /*accept_path_end_base=*/false, std::move(start_empty_path_class), + std::move(get_incoming_neighbors), + std::move(get_outgoing_neighbors)) { AddPairAlternativeSets(pairs); } @@ -551,9 +585,15 @@ bool PairExchangeOperator::MakeNeighbor() { if (!HasNeighbors()) { node2 = BaseNode(1); } else { - const int64_t neighbor = GetNeighborForBaseNode(0); - if (IsInactive(neighbor) || IsPathStart(neighbor)) return false; - node2 = Prev(neighbor); + const auto [neighbor, outgoing] = GetNeighborForBaseNode(0); + if (neighbor < 0 || IsInactive(neighbor)) return false; + if (outgoing) { + if (IsPathStart(neighbor)) return false; + } else if (IsPathEnd(neighbor)) { + return false; + } + node2 = outgoing ? Prev(neighbor) : Next(neighbor); + if (IsPathEnd(node2)) return false; } int64_t prev2, sibling2, sibling_prev2 = -1; if (!GetPreviousAndSibling(node2, &prev2, &sibling2, &sibling_prev2)) { @@ -619,7 +659,7 @@ PairExchangeRelocateOperator::PairExchangeRelocateOperator( std::function start_empty_path_class, const std::vector& pairs) : PathOperator(vars, secondary_vars, 6, true, false, - std::move(start_empty_path_class), nullptr) { + std::move(start_empty_path_class), nullptr, nullptr) { AddPairAlternativeSets(pairs); } @@ -877,7 +917,7 @@ IndexPairSwapActiveOperator::IndexPairSwapActiveOperator( std::function start_empty_path_class, const std::vector& pairs) : PathOperator(vars, secondary_vars, 1, true, false, - std::move(start_empty_path_class), nullptr), + std::move(start_empty_path_class), nullptr, nullptr), inactive_node_(0) { AddPairAlternativeSets(pairs); } @@ -925,7 +965,7 @@ RelocateExpensiveChain::RelocateExpensiveChain( int num_arcs_to_consider, std::function arc_cost_for_path_start) : PathOperator(vars, secondary_vars, 1, false, false, - std::move(start_empty_path_class), nullptr), + std::move(start_empty_path_class), nullptr, nullptr), num_arcs_to_consider_(num_arcs_to_consider), current_path_(0), current_expensive_arc_indices_({-1, -1}), @@ -936,6 +976,8 @@ RelocateExpensiveChain::RelocateExpensiveChain( } bool RelocateExpensiveChain::MakeNeighbor() { + // TODO(user): Consider node neighbors? The operator would no longer be + // a path operator though, because we would no longer have any base nodes. const int first_arc_index = current_expensive_arc_indices_.first; const int second_arc_index = current_expensive_arc_indices_.second; DCHECK_LE(0, first_arc_index); @@ -1043,14 +1085,17 @@ PickupAndDeliveryData::PickupAndDeliveryData( RelocateSubtrip::RelocateSubtrip( const std::vector& vars, const std::vector& secondary_vars, - std::function start_empty_path_class, - std::function&(int, int)> get_neighbors, + std::function start_empty_path_class, NeighborAccessor, + NeighborAccessor get_outgoing_neighbors, absl::Span pairs) - : PathOperator(vars, secondary_vars, - /*number_of_base_nodes=*/get_neighbors == nullptr ? 2 : 1, - /*skip_locally_optimal_paths=*/true, - /*accept_path_end_base=*/false, - std::move(start_empty_path_class), std::move(get_neighbors)), + : PathOperator( + vars, secondary_vars, + /*number_of_base_nodes=*/ + get_outgoing_neighbors == nullptr ? 2 : 1, + /*skip_locally_optimal_paths=*/true, + /*accept_path_end_base=*/false, std::move(start_empty_path_class), + nullptr, // Incoming neighbors aren't supported as of 09/2024. + std::move(get_outgoing_neighbors)), pd_data_(number_of_nexts_, pairs) { opened_pairs_set_.resize(pairs.size(), false); } @@ -1162,23 +1207,30 @@ bool RelocateSubtrip::MakeNeighbor() { return false; } }; - return HasNeighbors() - ? do_move(/*node=*/GetNeighborForBaseNode(0), - /*insertion_node=*/BaseNode(0)) - : do_move(/*node=*/BaseNode(0), /*insertion_node=*/BaseNode(1)); + if (HasNeighbors()) { + const auto [neighbor, outgoing] = GetNeighborForBaseNode(0); + if (neighbor < 0) return false; + DCHECK(outgoing); + if (IsInactive(neighbor)) return false; + return do_move(/*node=*/neighbor, /*insertion_node=*/BaseNode(0)); + } + return do_move(/*node=*/BaseNode(0), /*insertion_node=*/BaseNode(1)); } ExchangeSubtrip::ExchangeSubtrip( const std::vector& vars, const std::vector& secondary_vars, - std::function start_empty_path_class, - std::function&(int, int)> get_neighbors, + std::function start_empty_path_class, NeighborAccessor, + NeighborAccessor get_outgoing_neighbors, absl::Span pairs) - : PathOperator(vars, secondary_vars, - /*number_of_base_nodes=*/get_neighbors == nullptr ? 2 : 1, - /*skip_locally_optimal_paths=*/true, - /*accept_path_end_base=*/false, - std::move(start_empty_path_class), std::move(get_neighbors)), + : PathOperator( + vars, secondary_vars, + /*number_of_base_nodes=*/ + get_outgoing_neighbors == nullptr ? 2 : 1, + /*skip_locally_optimal_paths=*/true, + /*accept_path_end_base=*/false, std::move(start_empty_path_class), + nullptr, // Incoming neighbors aren't supported as of 09/2024. + std::move(get_outgoing_neighbors)), pd_data_(number_of_nexts_, pairs) { opened_pairs_set_.resize(pairs.size(), false); } @@ -1200,7 +1252,9 @@ bool ExchangeSubtrip::MakeNeighbor() { int64_t node1 = -1; if (HasNeighbors()) { const int64_t node = BaseNode(0); - const int64_t neighbor = GetNeighborForBaseNode(0); + const auto [neighbor, outgoing] = GetNeighborForBaseNode(0); + if (neighbor < 0) return false; + DCHECK(outgoing); if (IsInactive(neighbor)) return false; if (pd_data_.IsDeliveryNode(node) && pd_data_.IsDeliveryNode(Prev(neighbor))) { diff --git a/ortools/routing/neighborhoods.h b/ortools/routing/neighborhoods.h index a4b3a8090d..85fb0bf23d 100644 --- a/ortools/routing/neighborhoods.h +++ b/ortools/routing/neighborhoods.h @@ -56,16 +56,17 @@ class MakeRelocateNeighborsOperator : public PathOperator { const std::vector& vars, const std::vector& secondary_vars, std::function start_empty_path_class, - std::function&(int, int)> get_neighbors, + std::function&(int, int)> get_incoming_neighbors, + std::function&(int, int)> get_outgoing_neighbors, RoutingTransitCallback2 arc_evaluator); MakeRelocateNeighborsOperator( const std::vector& vars, const std::vector& secondary_vars, std::function start_empty_path_class, RoutingTransitCallback2 arc_evaluator) - : MakeRelocateNeighborsOperator(vars, secondary_vars, - std::move(start_empty_path_class), - nullptr, std::move(arc_evaluator)) {} + : MakeRelocateNeighborsOperator( + vars, secondary_vars, std::move(start_empty_path_class), nullptr, + nullptr, std::move(arc_evaluator)) {} ~MakeRelocateNeighborsOperator() override {} bool MakeNeighbor() override; @@ -253,7 +254,8 @@ class GroupPairAndRelocateOperator : public PathOperator { const std::vector& vars, const std::vector& secondary_vars, std::function start_empty_path_class, - std::function&(int, int)> get_neighbors, + std::function&(int, int)> get_incoming_neighbors, + std::function&(int, int)> get_outgoing_neighbors, const std::vector& pairs); GroupPairAndRelocateOperator( const std::vector& vars, @@ -262,7 +264,7 @@ class GroupPairAndRelocateOperator : public PathOperator { const std::vector& pairs) : GroupPairAndRelocateOperator(vars, secondary_vars, std::move(start_empty_path_class), nullptr, - pairs) {} + nullptr, pairs) {} ~GroupPairAndRelocateOperator() override {} bool MakeNeighbor() override; @@ -286,7 +288,8 @@ class LightPairRelocateOperator : public PathOperator { const std::vector& vars, const std::vector& secondary_vars, std::function start_empty_path_class, - std::function&(int, int)> get_neighbors, + std::function&(int, int)> get_incoming_neighbors, + std::function&(int, int)> get_outgoing_neighbors, const std::vector& pairs, std::function force_lifo = nullptr); LightPairRelocateOperator(const std::vector& vars, @@ -317,7 +320,8 @@ class PairExchangeOperator : public PathOperator { const std::vector& vars, const std::vector& secondary_vars, std::function start_empty_path_class, - std::function&(int, int)> get_neighbors, + std::function&(int, int)> get_incoming_neighbors, + std::function&(int, int)> get_outgoing_neighbors, const std::vector& pairs); PairExchangeOperator(const std::vector& vars, const std::vector& secondary_vars, @@ -325,7 +329,7 @@ class PairExchangeOperator : public PathOperator { const std::vector& pairs) : PairExchangeOperator(vars, secondary_vars, std::move(start_empty_path_class), nullptr, - pairs) {} + nullptr, pairs) {} ~PairExchangeOperator() override {} bool MakeNeighbor() override; @@ -555,7 +559,7 @@ PairNodeSwapActiveOperator::PairNodeSwapActiveOperator( std::function start_empty_path_class, const std::vector& pairs) : PathOperator(vars, secondary_vars, 2, false, false, - std::move(start_empty_path_class), nullptr), + std::move(start_empty_path_class), nullptr, nullptr), inactive_pair_(0), pairs_(pairs) {} @@ -658,14 +662,15 @@ class RelocateSubtrip : public PathOperator { const std::vector& vars, const std::vector& secondary_vars, std::function start_empty_path_class, - std::function&(int, int)> get_neighbors, + std::function&(int, int)> get_incoming_neighbors, + std::function&(int, int)> get_outgoing_neighbors, absl::Span pairs); RelocateSubtrip(const std::vector& vars, const std::vector& secondary_vars, std::function start_empty_path_class, absl::Span pairs) : RelocateSubtrip(vars, secondary_vars, std::move(start_empty_path_class), - nullptr, pairs) {} + nullptr, nullptr, pairs) {} std::string DebugString() const override { return "RelocateSubtrip"; } bool MakeNeighbor() override; @@ -695,14 +700,15 @@ class ExchangeSubtrip : public PathOperator { const std::vector& vars, const std::vector& secondary_vars, std::function start_empty_path_class, - std::function&(int, int)> get_neighbors, + std::function&(int, int)> get_incoming_neighbors, + std::function&(int, int)> get_outgoing_neighbors, absl::Span pairs); ExchangeSubtrip(const std::vector& vars, const std::vector& secondary_vars, std::function start_empty_path_class, absl::Span pairs) : ExchangeSubtrip(vars, secondary_vars, std::move(start_empty_path_class), - nullptr, pairs) {} + nullptr, nullptr, pairs) {} std::string DebugString() const override { return "ExchangeSubtrip"; } bool MakeNeighbor() override; diff --git a/ortools/routing/routing.cc b/ortools/routing/routing.cc index dcef548e1c..bc2d7683ae 100644 --- a/ortools/routing/routing.cc +++ b/ortools/routing/routing.cc @@ -746,6 +746,12 @@ int RoutingModel::RegisterStateDependentTransitCallback( return state_dependent_transit_evaluators_.size() - 1; } +int RoutingModel::RegisterCumulDependentTransitCallback( + CumulDependentTransitCallback2 callback) { + cumul_dependent_transit_evaluators_.push_back(std::move(callback)); + return cumul_dependent_transit_evaluators_.size() - 1; +} + void RoutingModel::AddNoCycleConstraintInternal() { if (no_cycle_constraint_ == nullptr) { no_cycle_constraint_ = solver_->MakeNoCycle(nexts_, active_); @@ -758,7 +764,7 @@ bool RoutingModel::AddDimension(int evaluator_index, int64_t slack_max, const std::string& name) { const std::vector evaluator_indices(vehicles_, evaluator_index); std::vector capacities(vehicles_, capacity); - return AddDimensionWithCapacityInternal(evaluator_indices, slack_max, + return AddDimensionWithCapacityInternal(evaluator_indices, {}, slack_max, std::move(capacities), fix_start_cumul_to_zero, name); } @@ -767,7 +773,7 @@ bool RoutingModel::AddDimensionWithVehicleTransits( const std::vector& evaluator_indices, int64_t slack_max, int64_t capacity, bool fix_start_cumul_to_zero, const std::string& name) { std::vector capacities(vehicles_, capacity); - return AddDimensionWithCapacityInternal(evaluator_indices, slack_max, + return AddDimensionWithCapacityInternal(evaluator_indices, {}, slack_max, std::move(capacities), fix_start_cumul_to_zero, name); } @@ -777,7 +783,7 @@ bool RoutingModel::AddDimensionWithVehicleCapacity( std::vector vehicle_capacities, bool fix_start_cumul_to_zero, const std::string& name) { const std::vector evaluator_indices(vehicles_, evaluator_index); - return AddDimensionWithCapacityInternal(evaluator_indices, slack_max, + return AddDimensionWithCapacityInternal(evaluator_indices, {}, slack_max, std::move(vehicle_capacities), fix_start_cumul_to_zero, name); } @@ -786,23 +792,37 @@ bool RoutingModel::AddDimensionWithVehicleTransitAndCapacity( const std::vector& evaluator_indices, int64_t slack_max, std::vector vehicle_capacities, bool fix_start_cumul_to_zero, const std::string& name) { - return AddDimensionWithCapacityInternal(evaluator_indices, slack_max, + return AddDimensionWithCapacityInternal(evaluator_indices, {}, slack_max, std::move(vehicle_capacities), fix_start_cumul_to_zero, name); } +bool RoutingModel::AddDimensionWithCumulDependentVehicleTransitAndCapacity( + const std::vector& fixed_evaluator_indices, + const std::vector& cumul_dependent_evaluator_indices, + int64_t slack_max, std::vector vehicle_capacities, + bool fix_start_cumul_to_zero, const std::string& name) { + return AddDimensionWithCapacityInternal( + fixed_evaluator_indices, cumul_dependent_evaluator_indices, slack_max, + std::move(vehicle_capacities), fix_start_cumul_to_zero, name); +} + bool RoutingModel::AddDimensionWithCapacityInternal( - const std::vector& evaluator_indices, int64_t slack_max, - std::vector vehicle_capacities, bool fix_start_cumul_to_zero, - const std::string& name) { + const std::vector& evaluator_indices, + const std::vector& cumul_dependent_evaluator_indices, + int64_t slack_max, std::vector vehicle_capacities, + bool fix_start_cumul_to_zero, const std::string& name) { CHECK_EQ(vehicles_, vehicle_capacities.size()); return InitializeDimensionInternal( - evaluator_indices, std::vector(), slack_max, fix_start_cumul_to_zero, + evaluator_indices, cumul_dependent_evaluator_indices, + /*state_dependent_evaluator_indices=*/{}, slack_max, + fix_start_cumul_to_zero, new RoutingDimension(this, std::move(vehicle_capacities), name, nullptr)); } bool RoutingModel::InitializeDimensionInternal( const std::vector& evaluator_indices, + const std::vector& cumul_dependent_evaluator_indices, const std::vector& state_dependent_evaluator_indices, int64_t slack_max, bool fix_start_cumul_to_zero, RoutingDimension* dimension) { @@ -815,8 +835,8 @@ bool RoutingModel::InitializeDimensionInternal( const DimensionIndex dimension_index(dimensions_.size()); dimension_name_to_index_[dimension->name()] = dimension_index; dimensions_.push_back(dimension); - dimension->Initialize(evaluator_indices, state_dependent_evaluator_indices, - slack_max); + dimension->Initialize(evaluator_indices, cumul_dependent_evaluator_indices, + state_dependent_evaluator_indices, slack_max); solver_->AddConstraint(solver_->MakeDelayedPathCumul( nexts_, active_, dimension->cumuls(), dimension->transits())); if (fix_start_cumul_to_zero) { @@ -975,9 +995,10 @@ bool RoutingModel::AddDimensionDependentDimensionWithVehicleCapacityInternal( new_dimension = new RoutingDimension(this, std::move(vehicle_capacities), name, base_dimension); } - return InitializeDimensionInternal(pure_transits, dependent_transits, - slack_max, fix_start_cumul_to_zero, - new_dimension); + return InitializeDimensionInternal(pure_transits, + /*cumul_dependent_evaluator_indices=*/{}, + dependent_transits, slack_max, + fix_start_cumul_to_zero, new_dimension); } bool RoutingModel::AddDimensionDependentDimensionWithVehicleCapacity( @@ -1542,10 +1563,14 @@ struct VehicleClass { util_intops::StrongVector dimension_end_cumuls_min; util_intops::StrongVector dimension_end_cumuls_max; util_intops::StrongVector dimension_capacities; - /// dimension_evaluators[d]->Run(from, to) is the transit value of arc + /// dimension_evaluators[d]->Run(from, to) is the fixed transit value of arc /// from->to for a dimension d. util_intops::StrongVector dimension_evaluator_classes; + /// Same as above but for the cumul-dependent transit evaluators, if the + /// dimension has any. + util_intops::StrongVector + cumul_dependent_dimension_evaluator_classes; /// Hash of the visitability of (non-start/end) nodes. uint64_t visitable_nodes_hash; /// Hash of allowed resources for each resource group, or -1 if a given @@ -1564,6 +1589,8 @@ struct VehicleClass { c1.dimension_end_cumuls_max == c2.dimension_end_cumuls_max && c1.dimension_capacities == c2.dimension_capacities && c1.dimension_evaluator_classes == c2.dimension_evaluator_classes && + c1.cumul_dependent_dimension_evaluator_classes == + c2.cumul_dependent_dimension_evaluator_classes && c1.visitable_nodes_hash == c2.visitable_nodes_hash && c1.group_allowed_resources_hash == c2.group_allowed_resources_hash; } @@ -1574,8 +1601,9 @@ struct VehicleClass { c.end_equivalence_class, c.dimension_start_cumuls_min, c.dimension_start_cumuls_max, c.dimension_end_cumuls_min, c.dimension_end_cumuls_max, c.dimension_capacities, - c.dimension_evaluator_classes, c.visitable_nodes_hash, - c.group_allowed_resources_hash); + c.dimension_evaluator_classes, + c.cumul_dependent_dimension_evaluator_classes, + c.visitable_nodes_hash, c.group_allowed_resources_hash); } }; @@ -1608,6 +1636,8 @@ void RoutingModel::ComputeVehicleClasses() { dimension->vehicle_capacities()[vehicle]); vehicle_class.dimension_evaluator_classes.push_back( dimension->vehicle_to_class(vehicle)); + vehicle_class.cumul_dependent_dimension_evaluator_classes.push_back( + dimension->vehicle_to_cumul_dependent_class(vehicle)); } node_is_visitable.assign(Size(), true); for (int index = 0; index < Size(); ++index) { @@ -2958,7 +2988,32 @@ void RoutingModel::AddSearchMonitor(SearchMonitor* const monitor) { secondary_ls_monitors_.push_back(monitor); } +void RoutingModel::AddRestoreDimensionValuesResetCallback( + std::function callback) { + if (callback) { + if (restore_dimension_values_reset_callbacks_.empty()) { + AddEnterSearchCallback([this]() { + for (const auto& callback : restore_dimension_values_reset_callbacks_) { + callback(); + } + }); + } + restore_dimension_values_reset_callbacks_.push_back(std::move(callback)); + } +} + namespace { +class EnterSearchMonitor : public SearchMonitor { + public: + EnterSearchMonitor(Solver* solver, std::function callback) + : SearchMonitor(solver), callback_(std::move(callback)) {} + void EnterSearch() override { callback_(); } + void Install() override { ListenToEvent(Solver::MonitorEvent::kEnterSearch); } + + private: + std::function callback_; +}; + class AtSolutionCallbackMonitor : public SearchMonitor { public: AtSolutionCallbackMonitor(Solver* solver, std::function callback, @@ -2984,6 +3039,12 @@ class AtSolutionCallbackMonitor : public SearchMonitor { }; } // namespace +void RoutingModel::AddEnterSearchCallback(std::function callback) { + EnterSearchMonitor* const monitor = solver_->RevAlloc( + new EnterSearchMonitor(solver_.get(), std::move(callback))); + AddSearchMonitor(monitor); +} + void RoutingModel::AddAtSolutionCallback(std::function callback, bool track_unchecked_neighbors) { AtSolutionCallbackMonitor* const monitor = @@ -4617,6 +4678,12 @@ LocalSearchOperator* RoutingModel::CreateMakeInactiveOperator() { void RoutingModel::CreateNeighborhoodOperators( const RoutingSearchParameters& parameters) { + // TODO(user): Consider setting + // 'only_sort_neighbors_for_partial_neighborhoods' to false in + // GetOrCreateNodeNeighborsByCostClass(), and use neighbors regardless of + // the "used" ratio when parameters.ls_operator_neighbors_ratio() < 1. + // This would allow the operators to iterate on the neighbors by increasing + // distance, even if all nodes are considered as neighbors. double neighbors_ratio_used = 1; const NodeNeighborsByCostClass* neighbors_by_cost_class = GetOrCreateNodeNeighborsByCostClass( @@ -4624,22 +4691,17 @@ void RoutingModel::CreateNeighborhoodOperators( parameters.ls_operator_min_neighbors(), neighbors_ratio_used, /*add_vehicle_starts_to_neighbors=*/false, /*add_vehicle_ends_to_neighbors=*/false); - // TODO(user): Consider passing incoming/outgoing neighbors separately to - // LS operators. This would allow the Cross operator to use both incoming and - // outgoing neighbors, to exchange path starts and ends independently. - const auto get_neighbors = [neighbors_by_cost_class, this]( - int64_t node, - int64_t start) -> const std::vector& { - if (IsEnd(node)) { - // HACK(user): As of CL/651760817, vehicle end nodes no longer have - // outgoing neighbors, which causes neighborhood operators that iterate - // on vehicle ends as seeds to fail. This is a temporary hack to consider - // the incoming neighbors for vehicle ends, before the proper fix which is - // to pass 2 separate callbacks for incoming and outgoing neighbors to - // neighborhood operators. - return neighbors_by_cost_class->GetIncomingNeighborsOfNodeForCostClass( - GetCostClassIndexOfVehicle(VehicleIndex(start)).value(), node); - } + const auto get_incoming_neighbors = + [neighbors_by_cost_class, this]( + int64_t node, int64_t start) -> const std::vector& { + DCHECK(!IsStart(node)); + return neighbors_by_cost_class->GetIncomingNeighborsOfNodeForCostClass( + GetCostClassIndexOfVehicle(VehicleIndex(start)).value(), node); + }; + const auto get_outgoing_neighbors = + [neighbors_by_cost_class, this]( + int64_t node, int64_t start) -> const std::vector& { + DCHECK(!IsEnd(node)); return neighbors_by_cost_class->GetOutgoingNeighborsOfNodeForCostClass( GetCostClassIndexOfVehicle(VehicleIndex(start)).value(), node); }; @@ -4680,15 +4742,15 @@ void RoutingModel::CreateNeighborhoodOperators( // Other operators defined in the CP solver. local_search_operators_[RELOCATE] = - CreateOperatorWithNeighborsRatio(neighbors_ratio_used, - get_neighbors); + CreateOperatorWithNeighborsRatio( + neighbors_ratio_used, get_incoming_neighbors, get_outgoing_neighbors); local_search_operators_[EXCHANGE] = - CreateOperatorWithNeighborsRatio(neighbors_ratio_used, - get_neighbors); + CreateOperatorWithNeighborsRatio( + neighbors_ratio_used, get_incoming_neighbors, get_outgoing_neighbors); local_search_operators_[CROSS] = CreateOperatorWithNeighborsRatio( - neighbors_ratio_used, get_neighbors); + neighbors_ratio_used, get_incoming_neighbors, get_outgoing_neighbors); local_search_operators_[TWO_OPT] = CreateOperatorWithNeighborsRatio( - neighbors_ratio_used, get_neighbors); + neighbors_ratio_used, get_incoming_neighbors, get_outgoing_neighbors); local_search_operators_[RELOCATE_AND_MAKE_ACTIVE] = CreateCPOperator(); local_search_operators_[MAKE_ACTIVE_AND_RELOCATE] = @@ -4722,19 +4784,21 @@ void RoutingModel::CreateNeighborhoodOperators( std::vector light_relocate_pair_operators; light_relocate_pair_operators.push_back( CreateOperatorWithNeighborsRatio( - neighbors_ratio_used, get_neighbors, pickup_delivery_pairs_, - [this](int64_t start) { + neighbors_ratio_used, get_incoming_neighbors, get_outgoing_neighbors, + pickup_delivery_pairs_, [this](int64_t start) { return vehicle_pickup_delivery_policy_[VehicleIndex(start)] == RoutingModel::PICKUP_AND_DELIVERY_LIFO; })); light_relocate_pair_operators.push_back( CreatePairOperator(neighbors_ratio_used, - get_neighbors)); + get_incoming_neighbors, + get_outgoing_neighbors)); local_search_operators_[LIGHT_RELOCATE_PAIR] = solver_->ConcatenateOperators(light_relocate_pair_operators); local_search_operators_[EXCHANGE_PAIR] = solver_->ConcatenateOperators( {CreatePairOperator(neighbors_ratio_used, - get_neighbors), + get_incoming_neighbors, + get_outgoing_neighbors), solver_->RevAlloc(new SwapIndexPairOperator( nexts_, CostsAreHomogeneousAcrossVehicles() ? std::vector() @@ -4744,16 +4808,18 @@ void RoutingModel::CreateNeighborhoodOperators( CreatePairOperator(); local_search_operators_[RELOCATE_NEIGHBORS] = CreateOperatorWithNeighborsRatio( - neighbors_ratio_used, get_neighbors, + neighbors_ratio_used, get_incoming_neighbors, get_outgoing_neighbors, GetLocalSearchHomogeneousArcCostCallback(parameters)); local_search_operators_[NODE_PAIR_SWAP] = solver_->ConcatenateOperators( {CreatePairOperator(), CreatePairOperator>(), CreatePairOperator>()}); local_search_operators_[RELOCATE_SUBTRIP] = - CreatePairOperator(neighbors_ratio_used, get_neighbors); + CreatePairOperator( + neighbors_ratio_used, get_incoming_neighbors, get_outgoing_neighbors); local_search_operators_[EXCHANGE_SUBTRIP] = - CreatePairOperator(neighbors_ratio_used, get_neighbors); + CreatePairOperator( + neighbors_ratio_used, get_incoming_neighbors, get_outgoing_neighbors); const auto arc_cost_for_path_start = [this, arc_cost_getter = GetLocalSearchArcCostCallback(parameters)]( @@ -6419,11 +6485,12 @@ RoutingDimension::~RoutingDimension() { void RoutingDimension::Initialize( const std::vector& transit_evaluators, + const std::vector& cumul_dependent_transit_evaluators, const std::vector& state_dependent_transit_evaluators, int64_t slack_max) { InitializeCumuls(); - InitializeTransits(transit_evaluators, state_dependent_transit_evaluators, - slack_max); + InitializeTransits(transit_evaluators, cumul_dependent_transit_evaluators, + state_dependent_transit_evaluators, slack_max); } void RoutingDimension::InitializeCumuls() { @@ -6474,10 +6541,10 @@ void ComputeTransitClasses(absl::Span evaluator_indices, absl::flat_hash_map evaluator_to_class; for (int i = 0; i < evaluator_indices.size(); ++i) { const int evaluator_index = evaluator_indices[i]; - int evaluator_class = -1; - if (!gtl::FindCopy(evaluator_to_class, evaluator_index, &evaluator_class)) { - evaluator_class = class_evaluators->size(); - evaluator_to_class[evaluator_index] = evaluator_class; + const int new_class = class_evaluators->size(); + const int evaluator_class = + gtl::LookupOrInsert(&evaluator_to_class, evaluator_index, new_class); + if (evaluator_class == new_class) { class_evaluators->push_back(evaluator_index); } (*vehicle_to_class)[i] = evaluator_class; @@ -6582,6 +6649,7 @@ void RoutingDimension::InitializeTransitVariables(int64_t slack_max) { void RoutingDimension::InitializeTransits( absl::Span transit_evaluators, + absl::Span cumul_dependent_transit_evaluators, absl::Span state_dependent_transit_evaluators, int64_t slack_max) { CHECK_EQ(model_->vehicles(), transit_evaluators.size()); @@ -6594,6 +6662,9 @@ void RoutingDimension::InitializeTransits( dependent_transits_.resize(size, nullptr); ComputeTransitClasses(transit_evaluators, &class_evaluators_, &vehicle_to_class_); + ComputeTransitClasses(cumul_dependent_transit_evaluators, + &cumul_dependent_class_evaluators_, + &vehicle_to_cumul_dependent_class_); if (base_dimension_ != nullptr) { ComputeTransitClasses(state_dependent_transit_evaluators, &state_dependent_class_evaluators_, diff --git a/ortools/routing/routing.h b/ortools/routing/routing.h index d80d093b3a..648e880ac8 100644 --- a/ortools/routing/routing.h +++ b/ortools/routing/routing.h @@ -269,6 +269,7 @@ class RoutingModel { typedef RoutingResourceClassIndex ResourceClassIndex; typedef RoutingTransitCallback1 TransitCallback1; typedef RoutingTransitCallback2 TransitCallback2; + typedef RoutingCumulDependentTransitCallback2 CumulDependentTransitCallback2; #if !defined(SWIG) /// What follows is relevant for models with time/state dependent transits. @@ -640,8 +641,10 @@ class RoutingModel { int RegisterTransitCallback( TransitCallback2 callback, TransitEvaluatorSign sign = kTransitEvaluatorSignUnknown); - + int RegisterCumulDependentTransitCallback( + CumulDependentTransitCallback2 callback); int RegisterStateDependentTransitCallback(VariableIndexEvaluator2 callback); + const TransitCallback2& TransitCallback(int callback_index) const { CHECK_LT(callback_index, transit_evaluators_.size()); return transit_evaluators_[callback_index]; @@ -650,6 +653,11 @@ class RoutingModel { CHECK_LT(callback_index, unary_transit_evaluators_.size()); return unary_transit_evaluators_[callback_index]; } + const CumulDependentTransitCallback2& CumulDependentTransitCallback( + int callback_index) const { + CHECK_LT(callback_index, cumul_dependent_transit_evaluators_.size()); + return cumul_dependent_transit_evaluators_[callback_index]; + } const VariableIndexEvaluator2& StateDependentTransitCallback( int callback_index) const { CHECK_LT(callback_index, state_dependent_transit_evaluators_.size()); @@ -691,6 +699,18 @@ class RoutingModel { const std::vector& evaluator_indices, int64_t slack_max, std::vector vehicle_capacities, bool fix_start_cumul_to_zero, const std::string& name); + /// Creates a dimension where the transit variable on arc i->j is the sum of: + /// - A "fixed" transit value, obtained from the fixed_evaluator_index for + /// this vehicle, referencing evaluators in transit_evaluators_, and + /// - A FloatSlopePiecewiseLinearFunction of the cumul of node i, obtained + /// from the cumul_dependent_evaluator_index of this vehicle, pointing to + /// an evaluator in cumul_dependent_transit_evaluators_. + bool AddDimensionWithCumulDependentVehicleTransitAndCapacity( + const std::vector& fixed_evaluator_indices, + const std::vector& cumul_dependent_evaluator_indices, + int64_t slack_max, std::vector vehicle_capacities, + bool fix_start_cumul_to_zero, const std::string& name); + /// Creates a dimension where the transit variable is constrained to be /// equal to 'value'; 'capacity' is the upper bound of the cumul variables. /// 'name' is the name used to reference the dimension; this name is used to @@ -1302,6 +1322,8 @@ class RoutingModel { void AddLocalSearchOperator(LocalSearchOperator* ls_operator); /// Adds a search monitor to the search used to solve the routing model. void AddSearchMonitor(SearchMonitor* monitor); + // Adds a callback called at the beginning of the search. + void AddEnterSearchCallback(std::function callback); /// Adds a callback called each time a solution is found during the search. /// This is a shortcut to creating a monitor to call the callback on /// AtSolution() and adding it with AddSearchMonitor. @@ -1310,6 +1332,9 @@ class RoutingModel { /// obtained when solver_parameters.check_solution_period > 1 (aka fastLS). void AddAtSolutionCallback(std::function callback, bool track_unchecked_neighbors = false); + // Internal-only: Adds a callback to reset + // RestoreDimensionValuesForUnchangedRoutes at the beginning of the search. + void AddRestoreDimensionValuesResetCallback(std::function callback); /// Adds a variable to minimize in the solution finalizer. The solution /// finalizer is called each time a solution is found during the search and /// allows to instantiate secondary variables (such as dimension cumul @@ -2135,9 +2160,10 @@ class RoutingModel { void Initialize(); void AddNoCycleConstraintInternal(); bool AddDimensionWithCapacityInternal( - const std::vector& evaluator_indices, int64_t slack_max, - std::vector vehicle_capacities, bool fix_start_cumul_to_zero, - const std::string& name); + const std::vector& evaluator_indices, + const std::vector& cumul_dependent_evaluator_indices, + int64_t slack_max, std::vector vehicle_capacities, + bool fix_start_cumul_to_zero, const std::string& name); bool AddDimensionDependentDimensionWithVehicleCapacityInternal( const std::vector& pure_transits, const std::vector& dependent_transits, @@ -2146,6 +2172,7 @@ class RoutingModel { const std::string& name); bool InitializeDimensionInternal( const std::vector& evaluator_indices, + const std::vector& cumul_dependent_evaluator_indices, const std::vector& state_dependent_evaluator_indices, int64_t slack_max, bool fix_start_cumul_to_zero, RoutingDimension* dimension); @@ -2336,28 +2363,36 @@ class RoutingModel { return CreateCPOperatorWithArg(MakeLocalSearchOperatorWithArg, std::move(arg)); } - using NeighborAccessor = std::function&(int, int)>; + + using NeighborAccessor = + std::function&(/*node=*/int, /*start_node=*/int)>; template LocalSearchOperator* CreateCPOperatorWithNeighbors( - NeighborAccessor get_neighbors) { + NeighborAccessor get_incoming_neighbors, + NeighborAccessor get_outgoing_neighbors) { return CreateCPOperatorWithNeighbors( - MakeLocalSearchOperatorWithNeighbors, std::move(get_neighbors)); + MakeLocalSearchOperatorWithNeighbors, + std::move(get_incoming_neighbors), std::move(get_outgoing_neighbors)); } template LocalSearchOperator* CreateOperatorWithNeighborsRatio( - int neighbors_ratio_used, NeighborAccessor get_neighbors) { - return neighbors_ratio_used == 1 - ? CreateCPOperator() - : CreateCPOperatorWithNeighbors(std::move(get_neighbors)); + int neighbors_ratio_used, NeighborAccessor get_incoming_neighbors, + NeighborAccessor get_outgoing_neighbors) { + return neighbors_ratio_used == 1 ? CreateCPOperator() + : CreateCPOperatorWithNeighbors( + std::move(get_incoming_neighbors), + std::move(get_outgoing_neighbors)); } template LocalSearchOperator* CreateCPOperatorWithNeighbors( - const T& operator_factory, NeighborAccessor get_neighbors) { + const T& operator_factory, NeighborAccessor get_incoming_neighbors, + NeighborAccessor get_outgoing_neighbors) { return operator_factory( solver_.get(), nexts_, CostsAreHomogeneousAcrossVehicles() ? std::vector() : vehicle_vars_, - vehicle_start_class_callback_, std::move(get_neighbors)); + vehicle_start_class_callback_, std::move(get_incoming_neighbors), + std::move(get_outgoing_neighbors)); } template LocalSearchOperator* CreateCPOperatorWithArg(const T& operator_factory, @@ -2378,20 +2413,24 @@ class RoutingModel { } template LocalSearchOperator* CreateOperatorWithNeighbors( - NeighborAccessor get_neighbors, const Arg& arg) { + NeighborAccessor get_incoming_neighbors, + NeighborAccessor get_outgoing_neighbors, const Arg& arg) { return solver_->RevAlloc( new T(nexts_, CostsAreHomogeneousAcrossVehicles() ? std::vector() : vehicle_vars_, - vehicle_start_class_callback_, std::move(get_neighbors), arg)); + vehicle_start_class_callback_, std::move(get_incoming_neighbors), + std::move(get_outgoing_neighbors), arg)); } template LocalSearchOperator* CreateOperatorWithNeighborsRatio( - int neighbors_ratio_used, NeighborAccessor get_neighbors, - const Arg& arg) { + int neighbors_ratio_used, NeighborAccessor get_incoming_neighbors, + NeighborAccessor get_outgoing_neighbors, const Arg& arg) { return neighbors_ratio_used == 1 ? CreateOperator(arg) - : CreateOperatorWithNeighbors(std::move(get_neighbors), arg); + : CreateOperatorWithNeighbors( + std::move(get_incoming_neighbors), + std::move(get_outgoing_neighbors), arg); } template LocalSearchOperator* CreateOperator(const Arg1& arg1, MoveableArg2 arg2) { @@ -2403,8 +2442,9 @@ class RoutingModel { } template LocalSearchOperator* CreateOperatorWithNeighborsRatio( - int neighbors_ratio_used, NeighborAccessor get_neighbors, - const Arg1& arg1, MoveableArg2 arg2) { + int neighbors_ratio_used, NeighborAccessor get_incoming_neighbors, + NeighborAccessor get_outgoing_neighbors, const Arg1& arg1, + MoveableArg2 arg2) { return neighbors_ratio_used == 1 ? CreateOperator(arg1, std::move(arg2)) : solver_->RevAlloc(new T(nexts_, @@ -2412,20 +2452,23 @@ class RoutingModel { ? std::vector() : vehicle_vars_, vehicle_start_class_callback_, - std::move(get_neighbors), arg1, - std::move(arg2))); + std::move(get_incoming_neighbors), + std::move(get_outgoing_neighbors), + arg1, std::move(arg2))); } template LocalSearchOperator* CreatePairOperator() { return CreateOperator(pickup_delivery_pairs_); } template - LocalSearchOperator* CreatePairOperator(int neighbors_ratio_used, - NeighborAccessor get_neighbors) { + LocalSearchOperator* CreatePairOperator( + int neighbors_ratio_used, NeighborAccessor get_incoming_neighbors, + NeighborAccessor get_outgoing_neighbors) { return neighbors_ratio_used == 1 ? CreateOperator(pickup_delivery_pairs_) - : CreateOperatorWithNeighbors(std::move(get_neighbors), - pickup_delivery_pairs_); + : CreateOperatorWithNeighbors( + std::move(get_incoming_neighbors), + std::move(get_outgoing_neighbors), pickup_delivery_pairs_); } #endif // SWIG void CreateNeighborhoodOperators(const RoutingSearchParameters& parameters); @@ -2712,6 +2755,7 @@ class RoutingModel { std::vector monitors_; std::vector secondary_ls_monitors_; std::vector at_solution_monitors_; + std::vector> restore_dimension_values_reset_callbacks_; int monitors_before_setup_ = 0; int monitors_after_setup_ = 0; SearchMonitor* metaheuristic_ = nullptr; @@ -2779,6 +2823,9 @@ class RoutingModel { std::vector> state_dependent_transit_evaluators_cache_; + std::vector + cumul_dependent_transit_evaluators_; + // Returns global BinCapacities state, may be nullptr. std::unique_ptr bin_capacities_; @@ -3367,6 +3414,13 @@ class RoutingDimension { return model()->transit_evaluator_sign_[evaluator_index]; } int vehicle_to_class(int vehicle) const { return vehicle_to_class_[vehicle]; } + int vehicle_to_cumul_dependent_class(int vehicle) const { + if (vehicle_to_cumul_dependent_class_.empty()) { + return -1; + } + DCHECK_LT(vehicle, vehicle_to_cumul_dependent_class_.size()); + return vehicle_to_cumul_dependent_class_[vehicle]; + } #endif /// !defined(SWIGCSHARP) && !defined(SWIGJAVA) #endif /// !defined(SWIGPYTHON) /// Sets an upper bound on the dimension span on a given vehicle. This is the @@ -3688,11 +3742,13 @@ class RoutingDimension { RoutingDimension(RoutingModel* model, std::vector vehicle_capacities, const std::string& name, SelfBased); void Initialize(const std::vector& transit_evaluators, + const std::vector& cumul_dependent_transit_evaluators, const std::vector& state_dependent_transit_evaluators, int64_t slack_max); void InitializeCumuls(); void InitializeTransits( absl::Span transit_evaluators, + absl::Span cumul_dependent_transit_evaluators, absl::Span state_dependent_transit_evaluators, int64_t slack_max); void InitializeTransitVariables(int64_t slack_max); @@ -3730,8 +3786,16 @@ class RoutingDimension { std::vector fixed_transits_; /// Values in class_evaluators_ correspond to the evaluators in /// RoutingModel::transit_evaluators_ for each vehicle class. + // TODO(user): Make the *vehicle_to*_class_* members vector instead + // of vector. std::vector class_evaluators_; std::vector vehicle_to_class_; + + /// Values in cumul_dependent_class_evaluators_ correspond to the evaluators + /// in RoutingModel::cumul_dependent_transit_evaluators_ for each vehicle + /// class. + std::vector cumul_dependent_class_evaluators_; + std::vector vehicle_to_cumul_dependent_class_; #ifndef SWIG ReverseArcListGraph path_precedence_graph_; #endif @@ -3745,7 +3809,6 @@ class RoutingDimension { // another dimension. There can be no cycles, except for self loops, a // typical example for this is a time dimension. const RoutingDimension* const base_dimension_; - // Values in state_dependent_class_evaluators_ correspond to the evaluators // in RoutingModel::state_dependent_transit_evaluators_ for each vehicle // class. diff --git a/ortools/routing/types.h b/ortools/routing/types.h index 945802aed9..bddabb4e73 100644 --- a/ortools/routing/types.h +++ b/ortools/routing/types.h @@ -19,6 +19,7 @@ #include #include "ortools/base/int_type.h" +#include "ortools/util/piecewise_linear_function.h" namespace operations_research::routing { @@ -47,6 +48,9 @@ struct PickupDeliveryPair { typedef std::function RoutingTransitCallback1; typedef std::function RoutingTransitCallback2; +typedef std::function + RoutingCumulDependentTransitCallback2; } // namespace operations_research::routing From a54f09cc83d7a42c8b61b93548133334cdf73182 Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Fri, 11 Oct 2024 17:25:09 +0200 Subject: [PATCH 087/105] update .gitignore --- .gitignore | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.gitignore b/.gitignore index ab345f00c3..fdebdf0ccc 100644 --- a/.gitignore +++ b/.gitignore @@ -79,6 +79,8 @@ cache/ **/.vscode/* .DS_Store **/.vs/* +/.helix +/compile_commands.json ortools/linear_solver/lpi_glop.cc From ad39cf7c93ba2338509f929f18e7250894e5846a Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Mon, 14 Oct 2024 10:23:51 +0200 Subject: [PATCH 088/105] cmake: Fix python.cmake --- cmake/python.cmake | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/cmake/python.cmake b/cmake/python.cmake index 4bc52bf1c7..c5d55e7433 100644 --- a/cmake/python.cmake +++ b/cmake/python.cmake @@ -512,14 +512,14 @@ add_custom_command( $ $ ${PYTHON_PROJECT}/.libs - COMMAND ${CMAKE_COMMAND} -E - $,copy,true> - $ - ${PYTHON_PROJECT}/.libs COMMAND ${CMAKE_COMMAND} -E $,copy,true> $ ${PYTHON_PROJECT}/.libs + COMMAND ${CMAKE_COMMAND} -E + $,copy,true> + $ + ${PYTHON_PROJECT}/.libs COMMAND ${CMAKE_COMMAND} -E $,copy,true> @@ -532,8 +532,8 @@ add_custom_command( COMMAND ${CMAKE_COMMAND} -E $,copy,true> $ - #$ - #$ + $ + $ ${PYTHON_PROJECT}/.libs COMMAND ${CMAKE_COMMAND} -E $,copy,true> @@ -542,7 +542,7 @@ add_custom_command( COMMAND ${CMAKE_COMMAND} -E $,copy,true> $ - #$ + $ $ ${PYTHON_PROJECT}/.libs From 72af3e54694269fbf0a7631f475c6e4f5b59a749 Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Mon, 14 Oct 2024 10:24:11 +0200 Subject: [PATCH 089/105] cmake: Fix java shared libs copy --- cmake/java.cmake | 115 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 115 insertions(+) diff --git a/cmake/java.cmake b/cmake/java.cmake index 9be483d148..f1ba0faf78 100644 --- a/cmake/java.cmake +++ b/cmake/java.cmake @@ -281,6 +281,121 @@ add_custom_command( $ $<$>:$> ${JAVA_RESSOURCES_PATH}/${JAVA_NATIVE_PROJECT}/ + COMMAND ${CMAKE_COMMAND} -E + $,copy,true> + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + ${JAVA_RESSOURCES_PATH}/${JAVA_NATIVE_PROJECT}/ + COMMAND ${CMAKE_COMMAND} -E + $,copy,true> + $ + ${JAVA_RESSOURCES_PATH}/${JAVA_NATIVE_PROJECT}/ + COMMAND ${CMAKE_COMMAND} -E + $,copy,true> + $ + ${JAVA_RESSOURCES_PATH}/${JAVA_NATIVE_PROJECT}/ + + COMMAND ${CMAKE_COMMAND} -E + $,copy,true> + $ + ${JAVA_RESSOURCES_PATH}/${JAVA_NATIVE_PROJECT}/ + COMMAND ${CMAKE_COMMAND} -E + $,copy,true> + $ + ${JAVA_RESSOURCES_PATH}/${JAVA_NATIVE_PROJECT}/ + COMMAND ${CMAKE_COMMAND} -E + $,copy,true> + $ + $ + $ + ${JAVA_RESSOURCES_PATH}/${JAVA_NATIVE_PROJECT}/ + COMMAND ${CMAKE_COMMAND} -E + $,copy,true> + $ + ${JAVA_RESSOURCES_PATH}/${JAVA_NATIVE_PROJECT}/ + COMMAND ${CMAKE_COMMAND} -E + $,copy,true> + $ + $ + $ + ${JAVA_RESSOURCES_PATH}/${JAVA_NATIVE_PROJECT}/ + + COMMAND ${CMAKE_COMMAND} -E + $,copy,true> + $ + ${JAVA_RESSOURCES_PATH}/${JAVA_NATIVE_PROJECT}/ + COMMAND ${MAVEN_EXECUTABLE} compile -B COMMAND ${MAVEN_EXECUTABLE} package -B $<$:-Dfatjar=true> COMMAND ${MAVEN_EXECUTABLE} install -B $<$:-Dgpg.skip=true> From d25f7d88a1ab0f2766d79705d6389b004bb041a7 Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Mon, 14 Oct 2024 10:24:40 +0200 Subject: [PATCH 090/105] dotnet: Fix shared libs copy in .Net runtime package --- .../dotnet/Google.OrTools.runtime.csproj.in | 84 +++++++++++++++++++ ortools/dotnet/Test.csproj.in | 8 +- 2 files changed, 88 insertions(+), 4 deletions(-) diff --git a/ortools/dotnet/Google.OrTools.runtime.csproj.in b/ortools/dotnet/Google.OrTools.runtime.csproj.in index ad6d3dba0c..dcf27576a8 100644 --- a/ortools/dotnet/Google.OrTools.runtime.csproj.in +++ b/ortools/dotnet/Google.OrTools.runtime.csproj.in @@ -27,6 +27,90 @@ runtimes/@DOTNET_RID@/native/%(Filename)%(Extension) true diff --git a/ortools/dotnet/Test.csproj.in b/ortools/dotnet/Test.csproj.in index f10a2c90b4..9dd2471eca 100644 --- a/ortools/dotnet/Test.csproj.in +++ b/ortools/dotnet/Test.csproj.in @@ -33,10 +33,10 @@ - - - - + + + + From d77444d15147b4c7144690b78a9bcf4a97dc734e Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Mon, 14 Oct 2024 11:11:21 +0200 Subject: [PATCH 091/105] Revert "[FZ] revamp support for element and element2d constraints" This reverts commit 06d65a7575c534bacb6dfeb136cf3463cd9f6932. --- cmake/flatzinc.cmake | 2 + ortools/flatzinc/BUILD.bazel | 16 + ortools/flatzinc/checker.cc | 117 ++--- ortools/flatzinc/cp_model_fz_solver.cc | 138 ++--- ortools/flatzinc/fz.cc | 11 + .../flatzinc/mznlib/redefinitions-2.0.2.mzn | 26 - .../flatzinc/mznlib/redefinitions-2.5.2.mzn | 49 -- ortools/flatzinc/parser_main.cc | 14 +- ortools/flatzinc/presolve.cc | 495 ++++++++++++++++++ ortools/flatzinc/presolve.h | 115 ++++ 10 files changed, 728 insertions(+), 255 deletions(-) delete mode 100644 ortools/flatzinc/mznlib/redefinitions-2.0.2.mzn delete mode 100644 ortools/flatzinc/mznlib/redefinitions-2.5.2.mzn create mode 100644 ortools/flatzinc/presolve.cc create mode 100644 ortools/flatzinc/presolve.h diff --git a/cmake/flatzinc.cmake b/cmake/flatzinc.cmake index 6a74b799e8..55304f8935 100644 --- a/cmake/flatzinc.cmake +++ b/cmake/flatzinc.cmake @@ -70,6 +70,8 @@ add_library(flatzinc ortools/flatzinc/parser.yy.cc #ortools/flatzinc/parser_util.cc # Already #include in parser.tab.cc ortools/flatzinc/parser_util.h + ortools/flatzinc/presolve.cc + ortools/flatzinc/presolve.h ) ## Includes target_include_directories(flatzinc PUBLIC diff --git a/ortools/flatzinc/BUILD.bazel b/ortools/flatzinc/BUILD.bazel index 112bbd75bd..d3e8b22116 100644 --- a/ortools/flatzinc/BUILD.bazel +++ b/ortools/flatzinc/BUILD.bazel @@ -109,6 +109,21 @@ cc_library( ], ) +cc_library( + name = "presolve", + srcs = ["presolve.cc"], + hdrs = ["presolve.h"], + deps = [ + ":model", + "//ortools/base", + "//ortools/base:hash", + "//ortools/graph:cliques", + "//ortools/util:logging", + "//ortools/util:saturated_arithmetic", + "@com_google_absl//absl/strings", + ], +) + cc_library( name = "checker", srcs = ["checker.cc"], @@ -157,6 +172,7 @@ cc_binary( ":cp_model_fz_solver", ":model", ":parser_lib", + ":presolve", "//ortools/base", "//ortools/base:path", "//ortools/base:threadpool", diff --git a/ortools/flatzinc/checker.cc b/ortools/flatzinc/checker.cc index 6b179c311b..e21375ffe2 100644 --- a/ortools/flatzinc/checker.cc +++ b/ortools/flatzinc/checker.cc @@ -183,27 +183,6 @@ bool CheckArrayVarIntElement( return element == target; } -bool CheckOrtoolsArrayIntElement( - const Constraint& ct, const std::function& evaluator) { - const int64_t min_index = ct.arguments[1].values[0]; - const int64_t index = Eval(ct.arguments[0], evaluator) - min_index; - const int64_t element = EvalAt(ct.arguments[2], index, evaluator); - const int64_t target = Eval(ct.arguments[3], evaluator); - return element == target; -} - -bool CheckOrtoolsArrayIntElement2d( - const Constraint& ct, const std::function& evaluator) { - const int64_t min_index0 = ct.arguments[2].values[0]; - const int64_t min_index1 = ct.arguments[3].values[0]; - const int64_t span1 = ct.arguments[3].values[1] - min_index1 + 1; - const int64_t index0 = Eval(ct.arguments[0], evaluator) - min_index0; - const int64_t index1 = Eval(ct.arguments[1], evaluator) - min_index1; - const int64_t element = - EvalAt(ct.arguments[4], index0 * span1 + index1, evaluator); - const int64_t target = Eval(ct.arguments[5], evaluator); - return element == target; -} bool CheckAtMostInt(const Constraint& ct, const std::function& evaluator) { const int64_t expected = Eval(ct.arguments[0], evaluator); @@ -1205,6 +1184,7 @@ using CallMap = // They are created at compilation time when using the or-tools mzn library. CallMap CreateCallMap() { CallMap m; + m["fzn_all_different_int"] = CheckAllDifferentInt; m["alldifferent_except_0"] = CheckAlldifferentExcept0; m["among"] = CheckAmong; m["array_bool_and"] = CheckArrayBoolAnd; @@ -1213,137 +1193,130 @@ CallMap CreateCallMap() { m["array_bool_xor"] = CheckArrayBoolXor; m["array_int_element"] = CheckArrayIntElement; m["array_int_element_nonshifted"] = CheckArrayIntElementNonShifted; - m["array_int_maximum"] = CheckMaximumInt; - m["array_int_minimum"] = CheckMinimumInt; m["array_var_bool_element"] = CheckArrayVarIntElement; m["array_var_int_element"] = CheckArrayVarIntElement; m["at_most_int"] = CheckAtMostInt; m["bool_and"] = CheckBoolAnd; m["bool_clause"] = CheckBoolClause; + m["bool_eq"] = CheckIntEq; + m["bool2int"] = CheckIntEq; m["bool_eq_imp"] = CheckIntEqImp; m["bool_eq_reif"] = CheckIntEqReif; - m["bool_eq"] = CheckIntEq; + m["bool_ge"] = CheckIntGe; m["bool_ge_imp"] = CheckIntGeImp; m["bool_ge_reif"] = CheckIntGeReif; - m["bool_ge"] = CheckIntGe; + m["bool_gt"] = CheckIntGt; m["bool_gt_imp"] = CheckIntGtImp; m["bool_gt_reif"] = CheckIntGtReif; - m["bool_gt"] = CheckIntGt; + m["bool_le"] = CheckIntLe; m["bool_le_imp"] = CheckIntLeImp; m["bool_le_reif"] = CheckIntLeReif; - m["bool_le"] = CheckIntLe; m["bool_left_imp"] = CheckIntLe; m["bool_lin_eq"] = CheckIntLinEq; m["bool_lin_le"] = CheckIntLinLe; + m["bool_lt"] = CheckIntLt; m["bool_lt_imp"] = CheckIntLtImp; m["bool_lt_reif"] = CheckIntLtReif; - m["bool_lt"] = CheckIntLt; + m["bool_ne"] = CheckIntNe; m["bool_ne_imp"] = CheckIntNeImp; m["bool_ne_reif"] = CheckIntNeReif; - m["bool_ne"] = CheckIntNe; m["bool_not"] = CheckBoolNot; m["bool_or"] = CheckBoolOr; m["bool_right_imp"] = CheckIntGe; m["bool_xor"] = CheckBoolXor; - m["bool2int"] = CheckIntEq; + m["ortools_circuit"] = CheckCircuit; m["count_eq"] = CheckCountEq; + m["count"] = CheckCountEq; m["count_geq"] = CheckCountGeq; m["count_gt"] = CheckCountGt; m["count_leq"] = CheckCountLeq; m["count_lt"] = CheckCountLt; m["count_neq"] = CheckCountNeq; m["count_reif"] = CheckCountReif; - m["count"] = CheckCountEq; - m["diffn_k_with_sizes"] = CheckDiffnK; - m["diffn_nonstrict_k_with_sizes"] = CheckDiffnNonStrictK; - m["false_constraint"] = CheckFalseConstraint; - m["fixed_cumulative"] = CheckCumulative; - m["fzn_all_different_int"] = CheckAllDifferentInt; m["fzn_cumulative"] = CheckCumulative; - m["fzn_diffn_nonstrict"] = CheckDiffnNonStrict; + m["var_cumulative"] = CheckCumulative; + m["variable_cumulative"] = CheckCumulative; + m["fixed_cumulative"] = CheckCumulative; + m["ortools_cumulative_opt"] = CheckCumulativeOpt; m["fzn_diffn"] = CheckDiffn; - m["fzn_disjunctive_strict"] = CheckDisjunctiveStrict; + m["diffn_k_with_sizes"] = CheckDiffnK; + m["fzn_diffn_nonstrict"] = CheckDiffnNonStrict; + m["diffn_nonstrict_k_with_sizes"] = CheckDiffnNonStrictK; m["fzn_disjunctive"] = CheckDisjunctive; - m["global_cardinality_closed"] = CheckGlobalCardinalityClosed; - m["global_cardinality_low_up_closed"] = CheckGlobalCardinalityLowUpClosed; - m["global_cardinality_low_up"] = CheckGlobalCardinalityLowUp; - m["global_cardinality_old"] = CheckGlobalCardinalityOld; + m["fzn_disjunctive_strict"] = CheckDisjunctiveStrict; + m["ortools_disjunctive_strict_opt"] = CheckDisjunctiveStrictOpt; + m["false_constraint"] = CheckFalseConstraint; m["global_cardinality"] = CheckGlobalCardinality; + m["global_cardinality_closed"] = CheckGlobalCardinalityClosed; + m["global_cardinality_low_up"] = CheckGlobalCardinalityLowUp; + m["global_cardinality_low_up_closed"] = CheckGlobalCardinalityLowUpClosed; + m["global_cardinality_old"] = CheckGlobalCardinalityOld; m["int_abs"] = CheckIntAbs; m["int_div"] = CheckIntDiv; + m["int_eq"] = CheckIntEq; m["int_eq_imp"] = CheckIntEqImp; m["int_eq_reif"] = CheckIntEqReif; - m["int_eq"] = CheckIntEq; + m["int_ge"] = CheckIntGe; m["int_ge_imp"] = CheckIntGeImp; m["int_ge_reif"] = CheckIntGeReif; - m["int_ge"] = CheckIntGe; + m["int_gt"] = CheckIntGt; m["int_gt_imp"] = CheckIntGtImp; m["int_gt_reif"] = CheckIntGtReif; - m["int_gt"] = CheckIntGt; - m["int_in"] = CheckSetIn; + m["int_le"] = CheckIntLe; m["int_le_imp"] = CheckIntLeImp; m["int_le_reif"] = CheckIntLeReif; - m["int_le"] = CheckIntLe; + m["int_lin_eq"] = CheckIntLinEq; m["int_lin_eq_imp"] = CheckIntLinEqImp; m["int_lin_eq_reif"] = CheckIntLinEqReif; - m["int_lin_eq"] = CheckIntLinEq; + m["int_lin_ge"] = CheckIntLinGe; m["int_lin_ge_imp"] = CheckIntLinGeImp; m["int_lin_ge_reif"] = CheckIntLinGeReif; - m["int_lin_ge"] = CheckIntLinGe; + m["int_lin_le"] = CheckIntLinLe; m["int_lin_le_imp"] = CheckIntLinLeImp; m["int_lin_le_reif"] = CheckIntLinLeReif; - m["int_lin_le"] = CheckIntLinLe; + m["int_lin_ne"] = CheckIntLinNe; m["int_lin_ne_imp"] = CheckIntLinNeImp; m["int_lin_ne_reif"] = CheckIntLinNeReif; - m["int_lin_ne"] = CheckIntLinNe; + m["int_lt"] = CheckIntLt; m["int_lt_imp"] = CheckIntLtImp; m["int_lt_reif"] = CheckIntLtReif; - m["int_lt"] = CheckIntLt; m["int_max"] = CheckIntMax; m["int_min"] = CheckIntMin; m["int_minus"] = CheckIntMinus; m["int_mod"] = CheckIntMod; + m["int_ne"] = CheckIntNe; m["int_ne_imp"] = CheckIntNeImp; m["int_ne_reif"] = CheckIntNeReif; - m["int_ne"] = CheckIntNe; m["int_negate"] = CheckIntNegate; - m["int_not_in"] = CheckSetNotIn; m["int_plus"] = CheckIntPlus; m["int_times"] = CheckIntTimes; + m["ortools_inverse"] = CheckInverse; m["lex_less_bool"] = CheckLexLessInt; m["lex_less_int"] = CheckLexLessInt; m["lex_lesseq_bool"] = CheckLexLesseqInt; m["lex_lesseq_int"] = CheckLexLesseqInt; m["maximum_arg_int"] = CheckMaximumArgInt; m["maximum_int"] = CheckMaximumInt; + m["array_int_maximum"] = CheckMaximumInt; m["minimum_arg_int"] = CheckMinimumArgInt; m["minimum_int"] = CheckMinimumInt; - m["nvalue"] = CheckNvalue; - m["ortools_array_bool_element"] = CheckOrtoolsArrayIntElement; - m["ortools_array_int_element"] = CheckOrtoolsArrayIntElement; - m["ortools_array_var_bool_element"] = CheckOrtoolsArrayIntElement; - m["ortools_array_var_bool_element2d"] = CheckOrtoolsArrayIntElement2d; - m["ortools_array_var_int_element"] = CheckOrtoolsArrayIntElement; - m["ortools_array_var_int_element2d"] = CheckOrtoolsArrayIntElement2d; - m["ortools_circuit"] = CheckCircuit; - m["ortools_cumulative_opt"] = CheckCumulativeOpt; - m["ortools_disjunctive_strict_opt"] = CheckDisjunctiveStrictOpt; - m["ortools_inverse"] = CheckInverse; - m["ortools_network_flow_cost"] = CheckNetworkFlowCost; + m["array_int_minimum"] = CheckMinimumInt; m["ortools_network_flow"] = CheckNetworkFlow; + m["ortools_network_flow_cost"] = CheckNetworkFlowCost; + m["nvalue"] = CheckNvalue; m["ortools_regular"] = CheckRegular; - m["ortools_subcircuit"] = CheckSubCircuit; - m["ortools_table_bool"] = CheckTableInt; - m["ortools_table_int"] = CheckTableInt; m["regular_nfa"] = CheckRegularNfa; - m["set_in_reif"] = CheckSetInReif; m["set_in"] = CheckSetIn; + m["int_in"] = CheckSetIn; m["set_not_in"] = CheckSetNotIn; + m["int_not_in"] = CheckSetNotIn; + m["set_in_reif"] = CheckSetInReif; m["sliding_sum"] = CheckSlidingSum; m["sort"] = CheckSort; + m["ortools_subcircuit"] = CheckSubCircuit; m["symmetric_all_different"] = CheckSymmetricAllDifferent; - m["var_cumulative"] = CheckCumulative; - m["variable_cumulative"] = CheckCumulative; + m["ortools_table_bool"] = CheckTableInt; + m["ortools_table_int"] = CheckTableInt; return m; } diff --git a/ortools/flatzinc/cp_model_fz_solver.cc b/ortools/flatzinc/cp_model_fz_solver.cc index 25438654a6..d343d1b4b7 100644 --- a/ortools/flatzinc/cp_model_fz_solver.cc +++ b/ortools/flatzinc/cp_model_fz_solver.cc @@ -19,7 +19,6 @@ #include #include #include -#include #include #include "absl/container/flat_hash_map.h" @@ -77,9 +76,6 @@ struct CpModelProtoWithMapping { std::vector LookupVars(const fz::Argument& argument); std::vector LookupVarsOrValues(const fz::Argument& argument); - // Encoding literals. - int GetOrCreateVarEqValueLiteral(int var, int64_t value); - // Create and return the indices of the IntervalConstraint corresponding // to the flatzinc "interval" specified by a start var and a size var. // This method will cache intervals with the key . @@ -138,7 +134,6 @@ struct CpModelProtoWithMapping { absl::flat_hash_map, int> interval_key_to_index; absl::flat_hash_map var_to_lit_implies_greater_than_zero; - absl::flat_hash_map, int> value_encoding_literals; }; int CpModelProtoWithMapping::LookupConstant(int64_t value) { @@ -232,34 +227,6 @@ std::vector CpModelProtoWithMapping::LookupVarsOrValues( return result; } -int CpModelProtoWithMapping::GetOrCreateVarEqValueLiteral(int var, - int64_t value) { - const std::pair key = {var, value}; - const auto it = value_encoding_literals.find(key); - if (it != value_encoding_literals.end()) { - return it->second; - } - const int result = proto.variables_size(); - IntegerVariableProto* var_proto = proto.add_variables(); - var_proto->add_domain(0); - var_proto->add_domain(1); - value_encoding_literals[key] = result; - - ConstraintProto* pos_enforcement = AddEnforcedConstraint(result); - pos_enforcement->mutable_linear()->add_vars(var); - pos_enforcement->mutable_linear()->add_coeffs(1); - pos_enforcement->mutable_linear()->add_domain(value); - pos_enforcement->mutable_linear()->add_domain(value); - - ConstraintProto* neg_enforcement = AddEnforcedConstraint(NegatedRef(result)); - neg_enforcement->mutable_linear()->add_vars(var); - neg_enforcement->mutable_linear()->add_coeffs(1); - const Domain complement = Domain(value).Complement().IntersectionWith( - ReadDomainFromProto(proto.variables(var))); - FillDomainInProto(complement, neg_enforcement->mutable_linear()); - return result; -} - ConstraintProto* CpModelProtoWithMapping::AddEnforcedConstraint(int literal) { ConstraintProto* result = proto.add_constraints(); if (literal != kNoVar) { @@ -667,87 +634,46 @@ void CpModelProtoWithMapping::FillConstraint(const fz::Constraint& fz_ct, fz_ct.type == "array_var_int_element" || fz_ct.type == "array_var_bool_element" || fz_ct.type == "array_int_element_nonshifted") { - // Compatibility with the old format. - CHECK(fz_ct.arguments[0].type == fz::Argument::VAR_REF || - fz_ct.arguments[0].type == fz::Argument::INT_VALUE); - auto* arg = ct->mutable_element(); - arg->set_index(LookupVar(fz_ct.arguments[0])); - arg->set_target(LookupVar(fz_ct.arguments[2])); - - if (!absl::EndsWith(fz_ct.type, "_nonshifted")) { - // Add a dummy variable at position zero because flatzinc index start - // at 1. - // TODO(user): Make sure that zero is not in the index domain... - arg->add_vars(LookupConstant(0)); - } - for (const int var : LookupVars(fz_ct.arguments[1])) arg->add_vars(var); - } else if (fz_ct.type == "ortools_array_int_element" || - fz_ct.type == "ortools_array_bool_element" || - fz_ct.type == "ortools_array_var_int_element" || - fz_ct.type == "ortools_array_var_bool_element") { if (fz_ct.arguments[0].type == fz::Argument::VAR_REF || fz_ct.arguments[0].type == fz::Argument::INT_VALUE) { auto* arg = ct->mutable_element(); arg->set_index(LookupVar(fz_ct.arguments[0])); - arg->set_target(LookupVar(fz_ct.arguments[3])); - CHECK_EQ(fz_ct.arguments[1].type, fz::Argument::INT_INTERVAL); - const int64_t min_index = fz_ct.arguments[1].values.front(); - if (min_index > 0) { - const int zero_cst = LookupConstant(0); - for (int i = 0; i < min_index; ++i) { - arg->add_vars(zero_cst); - } + arg->set_target(LookupVar(fz_ct.arguments[2])); + + if (!absl::EndsWith(fz_ct.type, "_nonshifted")) { + // Add a dummy variable at position zero because flatzinc index start + // at 1. + // TODO(user): Make sure that zero is not in the index domain... + arg->add_vars(LookupConstant(0)); } - for (const int var : LookupVars(fz_ct.arguments[2])) arg->add_vars(var); - } - } else if (fz_ct.type == "ortools_array_var_int_element2d" || - fz_ct.type == "ortools_array_var_bool_element2d") { - const int index1 = LookupVar(fz_ct.arguments[0]); - const int index2 = LookupVar(fz_ct.arguments[1]); - const int target = LookupVar(fz_ct.arguments[5]); - - CHECK_EQ(fz_ct.arguments[2].type, fz::Argument::INT_INTERVAL); - CHECK_EQ(fz_ct.arguments[3].type, fz::Argument::INT_INTERVAL); - const int64_t min_1 = fz_ct.arguments[2].values[0]; - const int64_t max_1 = fz_ct.arguments[2].values[1]; - const int64_t min_2 = fz_ct.arguments[3].values[0]; - const int64_t max_2 = fz_ct.arguments[3].values[1]; - - if (fz_ct.arguments[4].type == fz::Argument::INT_LIST) { - // If the array is constant, we encode this as a table constraint. - auto* arg = ct->mutable_table(); - arg->add_vars(index1); - arg->add_vars(index2); - arg->add_vars(target); - - int i = 0; - for (int64_t val_1 = min_1; val_1 <= max_1; ++val_1) { - for (int64_t val_2 = min_2; val_2 <= max_2; ++val_2) { - arg->add_values(val_1); - arg->add_values(val_2); - arg->add_values(fz_ct.arguments[4].ValueAt(i++)); - } - } - CHECK_EQ(i, fz_ct.arguments[4].Size()); + for (const int var : LookupVars(fz_ct.arguments[1])) arg->add_vars(var); } else { - std::vector elems = LookupVars(fz_ct.arguments[4]); - int i = 0; - for (int64_t val_1 = min_1; val_1 <= max_1; ++val_1) { - const int lit1 = GetOrCreateVarEqValueLiteral(index1, val_1); - for (int64_t val_2 = min_2; val_2 <= max_2; ++val_2) { - const int lit2 = GetOrCreateVarEqValueLiteral(index2, val_2); - if (i != 0) ct = proto.add_constraints(); // new constraint. - ct->add_enforcement_literal(lit1); - ct->add_enforcement_literal(lit2); - ct->mutable_linear()->add_vars(target); - ct->mutable_linear()->add_coeffs(1); - ct->mutable_linear()->add_vars(elems[i++]); - ct->mutable_linear()->add_coeffs(-1); - ct->mutable_linear()->add_domain(0); - ct->mutable_linear()->add_domain(0); + // Special case added by the presolve or in flatzinc. We encode this + // as a table constraint. + CHECK(!absl::EndsWith(fz_ct.type, "_nonshifted")); + auto* arg = ct->mutable_table(); + + // the constraint is: + // values[coeff1 * vars[0] + coeff2 * vars[1] + offset] == target. + for (const int var : LookupVars(fz_ct.arguments[0])) arg->add_vars(var); + arg->add_vars(LookupVar(fz_ct.arguments[2])); // the target + + const std::vector& values = fz_ct.arguments[1].values; + const int64_t coeff1 = fz_ct.arguments[3].values[0]; + const int64_t coeff2 = fz_ct.arguments[3].values[1]; + const int64_t offset = fz_ct.arguments[4].values[0] - 1; + + for (const int64_t a : AllValuesInDomain(proto.variables(arg->vars(0)))) { + for (const int64_t b : + AllValuesInDomain(proto.variables(arg->vars(1)))) { + const int index = coeff1 * a + coeff2 * b + offset; + CHECK_GE(index, 0); + CHECK_LT(index, values.size()); + arg->add_values(a); + arg->add_values(b); + arg->add_values(values[index]); } } - CHECK_EQ(i, fz_ct.arguments[4].Size()); } } else if (fz_ct.type == "ortools_table_int") { auto* arg = ct->mutable_table(); diff --git a/ortools/flatzinc/fz.cc b/ortools/flatzinc/fz.cc index ee1ce9ef11..042102dae7 100644 --- a/ortools/flatzinc/fz.cc +++ b/ortools/flatzinc/fz.cc @@ -39,6 +39,7 @@ #include "ortools/flatzinc/cp_model_fz_solver.h" #include "ortools/flatzinc/model.h" #include "ortools/flatzinc/parser.h" +#include "ortools/flatzinc/presolve.h" #include "ortools/util/logging.h" ABSL_FLAG(double, time_limit, 0, "time limit in seconds."); @@ -49,6 +50,7 @@ ABSL_FLAG(bool, free_search, false, "If false, the solver must follow the defined search." "If true, other search are allowed."); ABSL_FLAG(int, threads, 0, "Number of threads the solver will use."); +ABSL_FLAG(bool, presolve, true, "Presolve the model to simplify it."); ABSL_FLAG(bool, statistics, false, "Print solver statistics after search."); ABSL_FLAG(bool, read_from_stdin, false, "Read the FlatZinc from stdin, not from a file."); @@ -151,6 +153,15 @@ Model ParseFlatzincModel(const std::string& input, bool input_is_filename, " parsed in ", timer.GetInMs(), " ms"); SOLVER_LOG(logger, ""); + // Presolve the model. + Presolver presolve(logger); + SOLVER_LOG(logger, "Presolve model"); + timer.Reset(); + timer.Start(); + presolve.Run(&model); + SOLVER_LOG(logger, " - done in ", timer.GetInMs(), " ms"); + SOLVER_LOG(logger); + // Print statistics. ModelStatistics stats(model, logger); stats.BuildStatistics(); diff --git a/ortools/flatzinc/mznlib/redefinitions-2.0.2.mzn b/ortools/flatzinc/mznlib/redefinitions-2.0.2.mzn deleted file mode 100644 index 74e28cd766..0000000000 --- a/ortools/flatzinc/mznlib/redefinitions-2.0.2.mzn +++ /dev/null @@ -1,26 +0,0 @@ -% Ignore. -predicate symmetry_breaking_constraint(var bool: b) = b; - -predicate redundant_constraint(var bool: b) = b; - -% array_var_bool_element_nonshifted. -predicate ortools_array_var_bool_element(var int: idx, - set of int: domain_of_x, - array [int] of var bool: x, - var bool: c); - -predicate array_var_bool_element_nonshifted(var int: idx, - array [int] of var bool: x, - var bool: c) = - ortools_array_var_bool_element(idx, index_set(x), x, c); - -% array_var_int_element_nonshifted. -predicate ortools_array_var_int_element(var int: idx, - set of int: domain_of_x, - array [int] of var int: x, - var int: c); - -predicate array_var_int_element_nonshifted(var int: idx, - array [int] of var int: x, - var int: c) = - ortools_array_var_int_element(idx, index_set(x), x, c); diff --git a/ortools/flatzinc/mznlib/redefinitions-2.5.2.mzn b/ortools/flatzinc/mznlib/redefinitions-2.5.2.mzn deleted file mode 100644 index e60bd8661f..0000000000 --- a/ortools/flatzinc/mznlib/redefinitions-2.5.2.mzn +++ /dev/null @@ -1,49 +0,0 @@ -% array_var_bool_element2d_nonshifted. -predicate ortools_array_var_bool_element2d(var int: idx1, - var int: idx2, - set of int: domain_of_x_1, - set of int: domain_of_x_2, - array[int] of var bool: x, - var bool: c); - -predicate array_var_bool_element2d_nonshifted(var int: idx1, - var int: idx2, - array[int,int] of var bool: x, - var bool: c) = - ortools_array_var_bool_element2d(idx1, - idx2, - index_set_1of2(x), - index_set_2of2(x), - array1d(x), - c); - -% array_var_int_element2d_nonshifted. -predicate ortools_array_var_int_element2d(var int: idx1, - var int: idx2, - set of int: domain_of_x_1, - set of int: domain_of_x_2, - array[int] of var int: x, - var int: c); - -predicate array_var_int_element2d_nonshifted(var int: idx1, - var int: idx2, - array[int,int] of var int: x, - var int: c) = - ortools_array_var_int_element2d(idx1, - idx2, - index_set_1of2(x), - index_set_2of2(x), - array1d(x), - c); - -predicate array_var_float_element2d_nonshifted(var int: idx1, var int: idx2, array[int,int] of var float: x, var float: c) = - let { - int: dim = card(index_set_2of2(x)); - int: min_flat = min(index_set_1of2(x))*dim+min(index_set_2of2(x))-1; - } in array_var_float_element_nonshifted((idx1*dim+idx2-min_flat)::domain, array1d(x), c); - -predicate array_var_set_element2d_nonshifted(var int: idx1, var int: idx2, array[int,int] of var set of int: x, var set of int: c) = - let { - int: dim = card(index_set_2of2(x)); - int: min_flat = min(index_set_1of2(x))*dim+min(index_set_2of2(x))-1; - } in array_var_set_element_nonshifted((idx1*dim+idx2-min_flat)::domain, array1d(x), c); diff --git a/ortools/flatzinc/parser_main.cc b/ortools/flatzinc/parser_main.cc index f571606c4f..eaebbedd00 100644 --- a/ortools/flatzinc/parser_main.cc +++ b/ortools/flatzinc/parser_main.cc @@ -25,6 +25,7 @@ #include "ortools/base/timer.h" #include "ortools/flatzinc/model.h" #include "ortools/flatzinc/parser.h" +#include "ortools/flatzinc/presolve.h" #include "ortools/util/logging.h" ABSL_FLAG(std::string, input, "", "Input file in the flatzinc format."); @@ -34,7 +35,7 @@ ABSL_FLAG(bool, statistics, false, "Print model statistics"); namespace operations_research { namespace fz { -void ParseFile(const std::string& filename) { +void ParseFile(const std::string& filename, bool presolve) { WallTimer timer; timer.Start(); @@ -57,6 +58,14 @@ void ParseFile(const std::string& filename) { Model model(problem_name); CHECK(ParseFlatzincFile(filename, &model)); + if (presolve) { + SOLVER_LOG(&logger, "Presolve model"); + timer.Reset(); + timer.Start(); + Presolver presolve(&logger); + presolve.Run(&model); + SOLVER_LOG(&logger, " - done in ", timer.GetInMs(), " ms"); + } if (absl::GetFlag(FLAGS_statistics)) { ModelStatistics stats(model, &logger); stats.BuildStatistics(); @@ -76,6 +85,7 @@ int main(int argc, char** argv) { absl::SetProgramUsageMessage(kUsage); absl::ParseCommandLine(argc, argv); google::InitGoogleLogging(argv[0]); - operations_research::fz::ParseFile(absl::GetFlag(FLAGS_input)); + operations_research::fz::ParseFile(absl::GetFlag(FLAGS_input), + absl::GetFlag(FLAGS_presolve)); return 0; } diff --git a/ortools/flatzinc/presolve.cc b/ortools/flatzinc/presolve.cc new file mode 100644 index 0000000000..2887f8db46 --- /dev/null +++ b/ortools/flatzinc/presolve.cc @@ -0,0 +1,495 @@ +// Copyright 2010-2024 Google LLC +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "ortools/flatzinc/presolve.h" + +#include +#include +#include +#include +#include + +#include "absl/container/flat_hash_set.h" +#include "absl/flags/flag.h" +#include "absl/log/check.h" +#include "absl/strings/string_view.h" +#include "absl/types/span.h" +#include "ortools/flatzinc/model.h" +#include "ortools/util/logging.h" + +ABSL_FLAG(bool, fz_floats_are_ints, false, + "Interpret floats as integers in all variables and constraints."); + +namespace operations_research { +namespace fz { +namespace { +enum PresolveState { ALWAYS_FALSE, ALWAYS_TRUE, UNDECIDED }; + +template +bool IsArrayBoolean(const std::vector& values) { + for (int i = 0; i < values.size(); ++i) { + if (values[i] != 0 && values[i] != 1) { + return false; + } + } + return true; +} + +template +bool AtMostOne0OrAtMostOne1(const std::vector& values) { + CHECK(IsArrayBoolean(values)); + int num_zero = 0; + int num_one = 0; + for (T val : values) { + if (val) { + num_one++; + } else { + num_zero++; + } + if (num_one > 1 && num_zero > 1) { + return false; + } + } + return true; +} + +template +void AppendIfNotInSet(T* value, absl::flat_hash_set* s, + std::vector* vec) { + if (s->insert(value).second) { + vec->push_back(value); + } + DCHECK_EQ(s->size(), vec->size()); +} + +} // namespace + +// Note on documentation +// +// In order to document presolve rules, we will use the following naming +// convention: +// - x, x1, xi, y, y1, yi denote integer variables +// - b, b1, bi denote boolean variables +// - c, c1, ci denote integer constants +// - t, t1, ti denote boolean constants +// - => x after a constraint denotes the target variable of this constraint. +// Arguments are listed in order. + +// Propagates cast constraint. +// Rule 1: +// Input: bool2int(b, c) or bool2int(t, x) +// Output: int_eq(...) +// +// Rule 2: +// Input: bool2int(b, x) +// Action: Replace all instances of x by b. +// Output: inactive constraint +void Presolver::PresolveBool2Int(Constraint* ct) { + DCHECK_EQ(ct->type, "bool2int"); + if (ct->arguments[0].HasOneValue() || ct->arguments[1].HasOneValue()) { + // Rule 1. + UpdateRuleStats("bool2int: rename to int_eq"); + ct->type = "int_eq"; + } else { + // Rule 2. + UpdateRuleStats("bool2int: merge boolean and integer variables."); + AddVariableSubstitution(ct->arguments[1].Var(), ct->arguments[0].Var()); + ct->MarkAsInactive(); + } +} + +// Propagates cast constraint. +// Rule 1: +// Input: int2float(x, y) +// Action: Replace all instances of y by x. +// Output: inactive constraint +void Presolver::PresolveInt2Float(Constraint* ct) { + DCHECK_EQ(ct->type, "int2float"); + // Rule 1. + UpdateRuleStats("int2float: merge integer and floating point variables."); + AddVariableSubstitution(ct->arguments[1].Var(), ct->arguments[0].Var()); + ct->MarkAsInactive(); +} + +void Presolver::PresolveStoreFlatteningMapping(Constraint* ct) { + CHECK_EQ(3, ct->arguments[1].variables.size()); + Variable* const var0 = ct->arguments[1].variables[0]; + Variable* const var1 = ct->arguments[1].variables[1]; + Variable* const var2 = ct->arguments[1].variables[2]; + const int64_t coeff0 = ct->arguments[0].values[0]; + const int64_t coeff1 = ct->arguments[0].values[1]; + const int64_t coeff2 = ct->arguments[0].values[2]; + const int64_t rhs = ct->arguments[2].Value(); + if (coeff0 == -1 && coeff2 == 1 && !array2d_index_map_.contains(var0)) { + array2d_index_map_[var0] = + Array2DIndexMapping(var1, coeff1, var2, -rhs, ct); + UpdateRuleStats("int_lin_eq: store 2d flattening mapping"); + } else if (coeff0 == -1 && coeff1 == 1 && + !array2d_index_map_.contains(var0)) { + array2d_index_map_[var0] = + Array2DIndexMapping(var2, coeff2, var1, -rhs, ct); + UpdateRuleStats("int_lin_eq: store 2d flattening mapping"); + } else if (coeff2 == -1 && coeff1 == 1 && + !array2d_index_map_.contains(var2)) { + array2d_index_map_[var2] = + Array2DIndexMapping(var0, coeff0, var1, -rhs, ct); + UpdateRuleStats("int_lin_eq: store 2d flattening mapping"); + } else if (coeff2 == -1 && coeff0 == 1 && + !array2d_index_map_.contains(var2)) { + array2d_index_map_[var2] = + Array2DIndexMapping(var1, coeff1, var0, -rhs, ct); + UpdateRuleStats("int_lin_eq: store 2d flattening mapping"); + } +} + +namespace { +bool IsIncreasingAndContiguous(absl::Span values) { + for (int i = 0; i < values.size() - 1; ++i) { + if (values[i + 1] != values[i] + 1) { + return false; + } + } + return true; +} + +bool AreOnesFollowedByMinusOne(absl::Span coeffs) { + CHECK(!coeffs.empty()); + for (int i = 0; i < coeffs.size() - 1; ++i) { + if (coeffs[i] != 1) { + return false; + } + } + return coeffs.back() == -1; +} + +template +bool IsStrictPrefix(const std::vector& v1, const std::vector& v2) { + if (v1.size() >= v2.size()) { + return false; + } + for (int i = 0; i < v1.size(); ++i) { + if (v1[i] != v2[i]) { + return false; + } + } + return true; +} +} // namespace + +// Rewrite array element: array_int_element: +// +// Rule 1: +// Input : array_int_element(x, [c1, .., cn], y) with x = a * x1 + x2 + b +// Output: array_int_element([x1, x2], [c_a1, .., c_am], b, [a, b]) +// to be interpreted by the extraction process. +// +// Rule 2: +// Input : array_int_element(x, [c1, .., cn], y) with x0 ci = c0 + i +// Output: int_lin_eq([-1, 1], [y, x], 1 - c) (e.g. y = x + c - 1) +void Presolver::PresolveSimplifyElement(Constraint* ct) { + if (ct->arguments[0].variables.size() != 1) return; + Variable* const index_var = ct->arguments[0].Var(); + + // Rule 1. + if (array2d_index_map_.contains(index_var)) { + UpdateRuleStats("array_int_element: rewrite as a 2d element"); + const Array2DIndexMapping& mapping = array2d_index_map_[index_var]; + // Rewrite constraint. + ct->arguments[0] = + Argument::VarRefArray({mapping.variable1, mapping.variable2}); + std::vector coefs; + coefs.push_back(mapping.coefficient); + coefs.push_back(1); + ct->arguments.push_back(Argument::IntegerList(coefs)); + ct->arguments.push_back(Argument::IntegerValue(mapping.offset)); + index_var->active = false; + mapping.constraint->MarkAsInactive(); + return; + } + + // Rule 2. + if (IsIncreasingAndContiguous(ct->arguments[1].values) && + ct->arguments[2].type == Argument::VAR_REF) { + const int64_t start = ct->arguments[1].values.front(); + Variable* const index = ct->arguments[0].Var(); + Variable* const target = ct->arguments[2].Var(); + UpdateRuleStats("array_int_element: rewrite as a linear constraint"); + + if (start == 1) { + ct->type = "int_eq"; + ct->RemoveArg(1); + } else { + // Rewrite constraint into a int_lin_eq + ct->type = "int_lin_eq"; + ct->arguments[0] = Argument::IntegerList({-1, 1}); + ct->arguments[1] = Argument::VarRefArray({target, index}); + ct->arguments[2] = Argument::IntegerValue(1 - start); + } + } +} + +void Presolver::Run(Model* model) { + // Should rewrite float constraints. + if (absl::GetFlag(FLAGS_fz_floats_are_ints)) { + // Treat float variables as int variables, convert constraints to int. + for (Constraint* const ct : model->constraints()) { + const std::string& id = ct->type; + if (id == "int2float") { + ct->type = "int_eq"; + } else if (id == "float_lin_le") { + ct->type = "int_lin_le"; + } else if (id == "float_lin_eq") { + ct->type = "int_lin_eq"; + } + } + } + + // Regroup increasing sequence of int_lin_eq([1,..,1,-1], [x1, ..., xn, yn]) + // into sequence of int_plus(x1, x2, y2), int_plus(y2, x3, y3)... + std::vector current_variables; + Variable* target_variable = nullptr; + Constraint* first_constraint = nullptr; + for (Constraint* const ct : model->constraints()) { + if (target_variable == nullptr) { + if (ct->type == "int_lin_eq" && ct->arguments[0].values.size() == 3 && + AreOnesFollowedByMinusOne(ct->arguments[0].values) && + ct->arguments[1].values.empty() && ct->arguments[2].Value() == 0) { + current_variables = ct->arguments[1].variables; + target_variable = current_variables.back(); + current_variables.pop_back(); + first_constraint = ct; + } + } else { + if (ct->type == "int_lin_eq" && + AreOnesFollowedByMinusOne(ct->arguments[0].values) && + ct->arguments[0].values.size() == current_variables.size() + 2 && + IsStrictPrefix(current_variables, ct->arguments[1].variables)) { + current_variables = ct->arguments[1].variables; + // Rewrite ct into int_plus. + ct->type = "int_plus"; + ct->arguments.clear(); + ct->arguments.push_back(Argument::VarRef(target_variable)); + ct->arguments.push_back( + Argument::VarRef(current_variables[current_variables.size() - 2])); + ct->arguments.push_back(Argument::VarRef(current_variables.back())); + target_variable = current_variables.back(); + current_variables.pop_back(); + + // We clean the first constraint too. + if (first_constraint != nullptr) { + first_constraint = nullptr; + } + } else { + current_variables.clear(); + target_variable = nullptr; + } + } + } + + // First pass. + for (Constraint* const ct : model->constraints()) { + if (ct->active && ct->type == "bool2int") { + PresolveBool2Int(ct); + } else if (ct->active && ct->type == "int2float") { + PresolveInt2Float(ct); + } else if (ct->active && ct->type == "int_lin_eq" && + ct->arguments[1].variables.size() == 3 && + ct->strong_propagation) { + PresolveStoreFlatteningMapping(ct); + } + } + if (!var_representative_map_.empty()) { + // Some new substitutions were introduced. Let's process them. + SubstituteEverywhere(model); + var_representative_map_.clear(); + var_representative_vector_.clear(); + } + + // Second pass. + for (Constraint* const ct : model->constraints()) { + if (ct->type == "array_int_element" || ct->type == "array_bool_element") { + PresolveSimplifyElement(ct); + } + } + + // Third pass: process objective with floating point coefficients. + Variable* float_objective_var = nullptr; + for (Variable* var : model->variables()) { + if (!var->active) continue; + if (var->domain.is_float) { + CHECK(float_objective_var == nullptr); + float_objective_var = var; + } + } + + Constraint* float_objective_ct = nullptr; + if (float_objective_var != nullptr) { + for (Constraint* ct : model->constraints()) { + if (!ct->active) continue; + if (ct->type == "float_lin_eq") { + CHECK(float_objective_ct == nullptr); + float_objective_ct = ct; + break; + } + } + } + + if (float_objective_ct != nullptr || float_objective_var != nullptr) { + CHECK(float_objective_ct != nullptr); + CHECK(float_objective_var != nullptr); + const int arity = float_objective_ct->arguments[0].Size(); + CHECK_EQ(float_objective_ct->arguments[1].variables[arity - 1], + float_objective_var); + CHECK_EQ(float_objective_ct->arguments[0].floats[arity - 1], -1.0); + for (int i = 0; i + 1 < arity; ++i) { + model->AddFloatingPointObjectiveTerm( + float_objective_ct->arguments[1].variables[i], + float_objective_ct->arguments[0].floats[i]); + } + model->SetFloatingPointObjectiveOffset( + -float_objective_ct->arguments[2].floats[0]); + model->ClearObjective(); + float_objective_var->active = false; + float_objective_ct->active = false; + } + + // Report presolve rules statistics. + if (!successful_rules_.empty()) { + for (const auto& rule : successful_rules_) { + if (rule.second == 1) { + SOLVER_LOG(logger_, " - rule '", rule.first, "' was applied 1 time"); + } else { + SOLVER_LOG(logger_, " - rule '", rule.first, "' was applied ", + rule.second, " times"); + } + } + } +} + +// ----- Substitution support ----- + +void Presolver::AddVariableSubstitution(Variable* from, Variable* to) { + CHECK(from != nullptr); + CHECK(to != nullptr); + // Apply the substitutions, if any. + from = FindRepresentativeOfVar(from); + to = FindRepresentativeOfVar(to); + if (to->temporary) { + // Let's switch to keep a non temporary as representative. + Variable* tmp = to; + to = from; + from = tmp; + } + if (from != to) { + CHECK(to->Merge(from->name, from->domain, from->temporary)); + from->active = false; + var_representative_map_[from] = to; + var_representative_vector_.push_back(from); + } +} + +Variable* Presolver::FindRepresentativeOfVar(Variable* var) { + if (var == nullptr) return nullptr; + Variable* start_var = var; + // First loop: find the top parent. + for (;;) { + const auto& it = var_representative_map_.find(var); + Variable* parent = it == var_representative_map_.end() ? var : it->second; + if (parent == var) break; + var = parent; + } + // Second loop: attach all the path to the top parent. + while (start_var != var) { + Variable* const parent = var_representative_map_[start_var]; + var_representative_map_[start_var] = var; + start_var = parent; + } + const auto& iter = var_representative_map_.find(var); + return iter == var_representative_map_.end() ? var : iter->second; +} + +void Presolver::SubstituteEverywhere(Model* model) { + // Rewrite the constraints. + for (Constraint* const ct : model->constraints()) { + if (ct != nullptr && ct->active) { + for (int i = 0; i < ct->arguments.size(); ++i) { + Argument& argument = ct->arguments[i]; + switch (argument.type) { + case Argument::VAR_REF: + case Argument::VAR_REF_ARRAY: { + for (int i = 0; i < argument.variables.size(); ++i) { + Variable* const old_var = argument.variables[i]; + Variable* const new_var = FindRepresentativeOfVar(old_var); + if (new_var != old_var) { + argument.variables[i] = new_var; + } + } + break; + } + default: { + } + } + } + } + } + // Rewrite the search. + for (Annotation* const ann : model->mutable_search_annotations()) { + SubstituteAnnotation(ann); + } + // Rewrite the output. + for (SolutionOutputSpecs* const output : model->mutable_output()) { + output->variable = FindRepresentativeOfVar(output->variable); + for (int i = 0; i < output->flat_variables.size(); ++i) { + output->flat_variables[i] = + FindRepresentativeOfVar(output->flat_variables[i]); + } + } + // Do not forget to merge domain that could have evolved asynchronously + // during presolve. + for (const auto& iter : var_representative_map_) { + iter.second->domain.IntersectWithDomain(iter.first->domain); + } + + // Change the objective variable. + Variable* const current_objective = model->objective(); + if (current_objective == nullptr) return; + Variable* const new_objective = FindRepresentativeOfVar(current_objective); + if (new_objective != current_objective) { + model->SetObjective(new_objective); + } +} + +void Presolver::SubstituteAnnotation(Annotation* ann) { + // TODO(user): Remove recursion. + switch (ann->type) { + case Annotation::ANNOTATION_LIST: + case Annotation::FUNCTION_CALL: { + for (int i = 0; i < ann->annotations.size(); ++i) { + SubstituteAnnotation(&ann->annotations[i]); + } + break; + } + case Annotation::VAR_REF: + case Annotation::VAR_REF_ARRAY: { + for (int i = 0; i < ann->variables.size(); ++i) { + ann->variables[i] = FindRepresentativeOfVar(ann->variables[i]); + } + break; + } + default: { + } + } +} + +} // namespace fz +} // namespace operations_research diff --git a/ortools/flatzinc/presolve.h b/ortools/flatzinc/presolve.h new file mode 100644 index 0000000000..38675968c7 --- /dev/null +++ b/ortools/flatzinc/presolve.h @@ -0,0 +1,115 @@ +// Copyright 2010-2024 Google LLC +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef OR_TOOLS_FLATZINC_PRESOLVE_H_ +#define OR_TOOLS_FLATZINC_PRESOLVE_H_ + +#include +#include +#include +#include +#include + +#include "absl/container/flat_hash_map.h" +#include "absl/container/flat_hash_set.h" +#include "absl/strings/match.h" +#include "ortools/base/hash.h" +#include "ortools/base/logging.h" +#include "ortools/base/types.h" +#include "ortools/flatzinc/model.h" +#include "ortools/util/logging.h" + +namespace operations_research { +namespace fz { +// The Presolver "pre-solves" a Model by applying some iterative +// transformations to it, which may simplify and/or shrink the model. +// +// TODO(user): Error reporting of unfeasible models. +class Presolver { + public: + explicit Presolver(SolverLogger* logger) : logger_(logger) {} + // Recursively apply all the pre-solve rules to the model, until exhaustion. + // The reduced model will: + // - Have some unused variables. + // - Have some unused constraints (marked as inactive). + // - Have some modified constraints (for example, they will no longer + // refer to unused variables). + void Run(Model* model); + + private: + // This struct stores the mapping of two index variables (of a 2D array; not + // included here) onto a single index variable (of the flattened 1D array). + // The original 2D array could be trimmed in the process; so we also need an + // offset. + // Eg. new_index_var = index_var1 * int_coeff + index_var2 + int_offset + struct Array2DIndexMapping { + Variable* variable1; + int64_t coefficient; + Variable* variable2; + int64_t offset; + Constraint* constraint; + + Array2DIndexMapping() + : variable1(nullptr), + coefficient(0), + variable2(nullptr), + offset(0), + constraint(nullptr) {} + Array2DIndexMapping(Variable* v1, int64_t c, Variable* v2, int64_t o, + Constraint* ct) + : variable1(v1), + coefficient(c), + variable2(v2), + offset(o), + constraint(ct) {} + }; + + // Substitution support. + void SubstituteEverywhere(Model* model); + void SubstituteAnnotation(Annotation* ann); + + // Presolve rules. + void PresolveBool2Int(Constraint* ct); + void PresolveInt2Float(Constraint* ct); + void PresolveStoreFlatteningMapping(Constraint* ct); + void PresolveSimplifyElement(Constraint* ct); + + // Helpers. + void UpdateRuleStats(const std::string& rule_name) { + successful_rules_[rule_name]++; + } + + // The presolver will discover some equivalence classes of variables [two + // variable are equivalent when replacing one by the other leads to the same + // logical model]. We will store them here, using a Union-find data structure. + // See http://en.wikipedia.org/wiki/Disjoint-set_data_structure. + // Note that the equivalence is directed. We prefer to replace all instances + // of 'from' with 'to', rather than the opposite. + void AddVariableSubstitution(Variable* from, Variable* to); + Variable* FindRepresentativeOfVar(Variable* var); + absl::flat_hash_map var_representative_map_; + std::vector var_representative_vector_; + + // Stores array2d_index_map_[z] = a * x + y + b. + absl::flat_hash_map array2d_index_map_; + + // Count applications of presolve rules. Use a sorted map for reporting + // purposes. + std::map successful_rules_; + + SolverLogger* logger_; +}; +} // namespace fz +} // namespace operations_research + +#endif // OR_TOOLS_FLATZINC_PRESOLVE_H_ From 294d9d3b5c9ee4f5f13ddccafee6aba7dca00084 Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Mon, 14 Oct 2024 13:03:55 +0200 Subject: [PATCH 092/105] math_opt: disable broken test when using CMake --- ortools/math_opt/solver_tests/CMakeLists.txt | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/ortools/math_opt/solver_tests/CMakeLists.txt b/ortools/math_opt/solver_tests/CMakeLists.txt index f4a92b5147..0c1b059d64 100644 --- a/ortools/math_opt/solver_tests/CMakeLists.txt +++ b/ortools/math_opt/solver_tests/CMakeLists.txt @@ -110,15 +110,16 @@ ortools_cxx_library( TESTING ) -ortools_cxx_test( - NAME - ${_PREFIX}_unregistered_solver_test - SOURCES - "unregistered_solver_test.cc" - LINK_LIBRARIES - GTest::gmock - GTest::gmock_main -) +# In CMake or-tools is linked with all enable solvers so this test won't work. +#ortools_cxx_test( +# NAME +# ${_PREFIX}_unregistered_solver_test +# SOURCES +# "unregistered_solver_test.cc" +# LINK_LIBRARIES +# GTest::gmock +# GTest::gmock_main +#) ortools_cxx_library( NAME From 02d225d4f4048c8ecf953cedaf2631461f8beb6e Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Mon, 14 Oct 2024 13:04:18 +0200 Subject: [PATCH 093/105] bazel: bump constraint_solver and routing BUILD.bazel --- ortools/constraint_solver/BUILD.bazel | 122 +++++++++++++++----------- ortools/routing/BUILD.bazel | 119 ++++++++++++++++--------- 2 files changed, 147 insertions(+), 94 deletions(-) diff --git a/ortools/constraint_solver/BUILD.bazel b/ortools/constraint_solver/BUILD.bazel index 8c208c3b00..a1fdc7bead 100644 --- a/ortools/constraint_solver/BUILD.bazel +++ b/ortools/constraint_solver/BUILD.bazel @@ -14,7 +14,10 @@ # Home of constraint solver constraint_solver load("@rules_cc//cc:defs.bzl", "cc_library", "cc_proto_library") +load("@rules_java//java:defs.bzl", "java_proto_library") load("@rules_proto//proto:defs.bzl", "proto_library") +load("@rules_python//python:defs.bzl", "py_library") +load("@rules_python//python:proto.bzl", "py_proto_library") package(default_visibility = ["//visibility:public"]) @@ -53,10 +56,10 @@ cc_proto_library( deps = [":search_limit_proto"], ) -# java_proto_library( -# name = "search_limit_java_proto", -# deps = [":search_limit_proto"], -# ) +java_proto_library( + name = "search_limit_java_proto", + deps = [":search_limit_proto"], +) proto_library( name = "demon_profiler_proto", @@ -78,10 +81,10 @@ cc_proto_library( deps = [":search_stats_proto"], ) -# java_proto_library( -# name = "search_stats_java_proto", -# deps = [":search_stats_proto"], -# ) +java_proto_library( + name = "search_stats_java_proto", + deps = [":search_stats_proto"], +) proto_library( name = "solver_parameters_proto", @@ -90,37 +93,55 @@ proto_library( cc_proto_library( name = "solver_parameters_cc_proto", - deps = ["solver_parameters_proto"], + deps = [":solver_parameters_proto"], ) -# java_proto_library( -# name = "solver_parameters_java_proto", -# deps = [":solver_parameters_proto"], -# ) +java_proto_library( + name = "solver_parameters_java_proto", + deps = [":solver_parameters_proto"], +) -#py_proto_library( -# name = "assignment_py_pb2", -# deps = [":assignment_proto"], -#) +py_proto_library( + name = "assignment_py_pb2", + deps = [":assignment_proto"], +) -#py_proto_library( -# name = "search_limit_py_pb2", -# deps = [":search_limit_proto"], -#) +py_proto_library( + name = "search_limit_py_pb2", + deps = [":search_limit_proto"], +) -#py_proto_library( -# name = "demon_profiler_py_pb2", -# deps = [":demon_profiler_proto"], -#) +py_proto_library( + name = "demon_profiler_py_pb2", + deps = [":demon_profiler_proto"], +) -#py_proto_library( -# name = "solver_parameters_py_pb2", -# deps = [":solver_parameters_proto"], -#) +py_proto_library( + name = "search_stats_py_pb2", + deps = [":search_stats_proto"], +) + +py_proto_library( + name = "solver_parameters_py_pb2", + deps = [":solver_parameters_proto"], +) + +py_library( + name = "constraint_solver_py_pb2", + deps = [ + ":assignment_py_pb2", + ":demon_profiler_py_pb2", + ":search_limit_py_pb2", + ":search_stats_py_pb2", + ":solver_parameters_py_pb2", + ], +) # ----- Constraint Programming core engine ----- # This is the main library. +# TODO(user): split this monolithic library into independent components. +# Do the same for others. cc_library( name = "cp", srcs = [ @@ -165,28 +186,19 @@ cc_library( ":search_stats_cc_proto", ":solver_parameters_cc_proto", "//ortools/base", - "//ortools/base:file", - "//ortools/base:recordio", - "//ortools/base:sysinfo", - "@com_google_absl//absl/container:flat_hash_map", - "@com_google_absl//absl/container:flat_hash_set", - "@com_google_absl//absl/memory", - "@com_google_absl//absl/strings", - "@com_google_absl//absl/strings:str_format", - "@com_google_absl//absl/time", - "@com_google_absl//absl/types:span", - # "//zlib:zlibonly", "//ortools/base:bitmap", + "//ortools/base:file", + "//ortools/base:int_type", "//ortools/base:intops", - "//ortools/base:strong_vector", "//ortools/base:iterator_adaptors", "//ortools/base:map_util", - "//ortools/base:stl_util", - # "//util/gzip:gzipstring", - "//ortools/base:hash", - # "//util/hash:jenkins", - # "//util/math:fastmath", "//ortools/base:mathutil", + "//ortools/base:recordio", + "//ortools/base:stl_util", + "//ortools/base:strong_vector", + "//ortools/base:sysinfo", + "//ortools/base:timer", + "//ortools/base:types", "//ortools/graph:hamiltonian_path", "//ortools/util:bitset", "//ortools/util:cached_log", @@ -197,11 +209,19 @@ cc_library( "//ortools/util:sorted_interval_list", "//ortools/util:string_array", "//ortools/util:tuple_set", - # "@com_google_re2//:re2", - "@com_google_absl//absl/status", - # "//util/textprogressbar", - "//ortools/base:timer", - # "//util/zippy", + "@com_google_absl//absl/algorithm:container", + "@com_google_absl//absl/base:core_headers", + "@com_google_absl//absl/base:log_severity", + "@com_google_absl//absl/container:flat_hash_map", + "@com_google_absl//absl/container:flat_hash_set", + "@com_google_absl//absl/flags:flag", + "@com_google_absl//absl/log:check", "@com_google_absl//absl/random", + "@com_google_absl//absl/random:distributions", + "@com_google_absl//absl/status", + "@com_google_absl//absl/strings", + "@com_google_absl//absl/strings:str_format", + "@com_google_absl//absl/time", + "@com_google_absl//absl/types:span", ], ) diff --git a/ortools/routing/BUILD.bazel b/ortools/routing/BUILD.bazel index a143d92313..1e9d469aca 100644 --- a/ortools/routing/BUILD.bazel +++ b/ortools/routing/BUILD.bazel @@ -11,8 +11,10 @@ # See the License for the specific language governing permissions and # limitations under the License. -load("@rules_cc//cc:defs.bzl", "cc_proto_library") +load("@rules_cc//cc:defs.bzl", "cc_library", "cc_proto_library") +load("@rules_java//java:defs.bzl", "java_proto_library") load("@rules_proto//proto:defs.bzl", "proto_library") +load("@rules_python//python:proto.bzl", "py_proto_library") package(default_visibility = ["//visibility:public"]) @@ -41,10 +43,10 @@ cc_proto_library( deps = [":enums_proto"], ) -# java_proto_library( -# name = "enums_java_proto", -# deps = [":enums_proto"], -# ) +java_proto_library( + name = "enums_java_proto", + deps = [":enums_proto"], +) proto_library( name = "ils_proto", @@ -54,7 +56,17 @@ proto_library( cc_proto_library( name = "ils_cc_proto", - deps = ["ils_proto"], + deps = [":ils_proto"], +) + +py_proto_library( + name = "ils_py_pb2", + deps = [":ils_proto"], +) + +java_proto_library( + name = "ils_java_proto", + deps = [":ils_proto"], ) proto_library( @@ -75,47 +87,47 @@ cc_proto_library( deps = [":parameters_proto"], ) -# java_proto_library( -# name = "parameters_java_proto", -# deps = [":parameters_proto"], -# ) +java_proto_library( + name = "parameters_java_proto", + deps = [":parameters_proto"], +) -#py_proto_library( -# name = "parameters_py_pb2", -# deps = [":parameters_proto"], -#) +py_proto_library( + name = "parameters_py_pb2", + deps = [":parameters_proto"], +) + +py_proto_library( + name = "enums_py_pb2", + deps = [":enums_proto"], +) cc_library( name = "parameters", srcs = ["parameters.cc"], hdrs = ["parameters.h"], deps = [ + ":enums_cc_proto", + ":ils_cc_proto", + ":parameters_cc_proto", "//ortools/base", "//ortools/base:proto_enum_utils", "//ortools/base:protoutil", + "//ortools/base:types", "//ortools/constraint_solver:cp", "//ortools/constraint_solver:solver_parameters_cc_proto", "//ortools/port:proto_utils", - "//ortools/routing:enums_cc_proto", - "//ortools/routing:parameters_cc_proto", + "//ortools/sat:sat_parameters_cc_proto", "//ortools/util:optional_boolean_cc_proto", "//ortools/util:testing_utils", - "@com_google_absl//absl/status:statusor", + "@com_google_absl//absl/container:flat_hash_map", "@com_google_absl//absl/strings", + "@com_google_absl//absl/strings:str_format", "@com_google_absl//absl/time", "@com_google_protobuf//:protobuf", ], ) -cc_library( - name = "types", - hdrs = ["types.h"], - deps = [ - "//ortools/base", - "//ortools/base:intops", - ], -) - cc_library( name = "parameters_utils", srcs = ["parameters_utils.cc"], @@ -126,14 +138,23 @@ cc_library( ], ) +cc_library( + name = "types", + hdrs = ["types.h"], + deps = [ + "//ortools/base:int_type", + "//ortools/util:piecewise_linear_function", + ], +) + cc_library( name = "utils", srcs = ["utils.cc"], hdrs = ["utils.h"], - visibility = ["//visibility:public"], deps = [ - "//ortools/base", "//ortools/util:saturated_arithmetic", + "@com_google_absl//absl/log:check", + "@com_google_absl//absl/types:span", ], ) @@ -141,12 +162,15 @@ cc_library( name = "neighborhoods", srcs = ["neighborhoods.cc"], hdrs = ["neighborhoods.h"], - visibility = ["//visibility:public"], deps = [ ":types", ":utils", - "//ortools/base", + "//ortools/base:types", "//ortools/constraint_solver:cp", + "//ortools/util:bitset", + "//ortools/util:saturated_arithmetic", + "@com_google_absl//absl/log:check", + "@com_google_absl//absl/types:span", ], ) @@ -157,9 +181,11 @@ cc_library( deps = [ ":types", "//ortools/base", - "//ortools/base:map_util", "//ortools/base:strong_vector", + "//ortools/base:types", "@com_google_absl//absl/container:flat_hash_set", + "@com_google_absl//absl/log:check", + "@com_google_absl//absl/types:span", ], ) @@ -205,50 +231,57 @@ cc_library( ":types", ":utils", "//ortools/base", - "//ortools/base:adjustable_priority_queue", "//ortools/base:dump_vars", - "//ortools/base:hash", + "//ortools/base:int_type", "//ortools/base:map_util", - "//ortools/base:murmur", + "//ortools/base:mathutil", "//ortools/base:protoutil", - "//ortools/base:small_map", "//ortools/base:stl_util", "//ortools/base:strong_vector", + "//ortools/base:types", "//ortools/constraint_solver:cp", + "//ortools/constraint_solver:solver_parameters_cc_proto", "//ortools/glop:lp_solver", + "//ortools/glop:parameters_cc_proto", "//ortools/graph", "//ortools/graph:christofides", "//ortools/graph:connected_components", + "//ortools/graph:ebert_graph", "//ortools/graph:linear_assignment", "//ortools/graph:min_cost_flow", - "//ortools/graph:topologicalsorter", "//ortools/lp_data", "//ortools/lp_data:base", - "//ortools/sat:boolean_problem", - "//ortools/sat:cp_constraints", - "//ortools/sat:cp_model", + "//ortools/port:proto_utils", "//ortools/sat:cp_model_cc_proto", - "//ortools/sat:cp_model_checker", "//ortools/sat:cp_model_solver", - "//ortools/sat:cp_model_utils", "//ortools/sat:integer", - "//ortools/sat:integer_expr", + "//ortools/sat:lp_utils", "//ortools/sat:model", - "//ortools/sat:optimization", + "//ortools/sat:sat_parameters_cc_proto", "//ortools/sat:theta_tree", "//ortools/util:bitset", "//ortools/util:flat_matrix", "//ortools/util:optional_boolean_cc_proto", + "//ortools/util:piecewise_linear_function", + "//ortools/util:range_minimum_query", "//ortools/util:range_query_function", "//ortools/util:saturated_arithmetic", "//ortools/util:sorted_interval_list", + "//ortools/util:time_limit", + "@com_google_absl//absl/algorithm:container", + "@com_google_absl//absl/base:core_headers", "@com_google_absl//absl/container:btree", "@com_google_absl//absl/container:flat_hash_map", "@com_google_absl//absl/container:flat_hash_set", "@com_google_absl//absl/container:inlined_vector", + "@com_google_absl//absl/flags:flag", "@com_google_absl//absl/functional:bind_front", "@com_google_absl//absl/hash", + "@com_google_absl//absl/log", + "@com_google_absl//absl/log:check", + "@com_google_absl//absl/log:die_if_null", "@com_google_absl//absl/memory", + "@com_google_absl//absl/status:statusor", "@com_google_absl//absl/strings", "@com_google_absl//absl/strings:str_format", "@com_google_absl//absl/time", From a89b375228cc8ac81d5c8eda004a92677180be11 Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Mon, 14 Oct 2024 15:08:45 +0200 Subject: [PATCH 094/105] cmake: fix math_opt "alwayslink" behaviour in CMake --- ortools/math_opt/solver_tests/CMakeLists.txt | 44 ++--- ortools/math_opt/solvers/CMakeLists.txt | 174 +++++++++---------- 2 files changed, 110 insertions(+), 108 deletions(-) diff --git a/ortools/math_opt/solver_tests/CMakeLists.txt b/ortools/math_opt/solver_tests/CMakeLists.txt index 0c1b059d64..2b18e7de9a 100644 --- a/ortools/math_opt/solver_tests/CMakeLists.txt +++ b/ortools/math_opt/solver_tests/CMakeLists.txt @@ -20,7 +20,7 @@ ortools_cxx_library( "base_solver_test.cc" "base_solver_test.h" TYPE - SHARED + STATIC LINK_LIBRARIES GTest::gmock absl::log @@ -34,7 +34,7 @@ ortools_cxx_library( "callback_tests.cc" "callback_tests.h" TYPE - SHARED + STATIC LINK_LIBRARIES GTest::gmock absl::log @@ -54,7 +54,7 @@ ortools_cxx_library( "status_tests.cc" "status_tests.h" TYPE - SHARED + STATIC LINK_LIBRARIES GTest::gmock absl::log @@ -70,7 +70,7 @@ ortools_cxx_library( "lp_tests.cc" "lp_tests.h" TYPE - SHARED + STATIC LINK_LIBRARIES GTest::gmock absl::log @@ -86,7 +86,7 @@ ortools_cxx_library( "lp_incomplete_solve_tests.cc" "lp_incomplete_solve_tests.h" TYPE - SHARED + STATIC LINK_LIBRARIES GTest::gmock absl::log @@ -102,7 +102,7 @@ ortools_cxx_library( "invalid_input_tests.cc" "invalid_input_tests.h" TYPE - SHARED + STATIC LINK_LIBRARIES GTest::gmock absl::log @@ -128,7 +128,7 @@ ortools_cxx_library( "mip_tests.cc" "mip_tests.h" TYPE - SHARED + STATIC LINK_LIBRARIES GTest::gmock absl::log @@ -145,7 +145,7 @@ ortools_cxx_library( "ip_model_solve_parameters_tests.cc" "ip_model_solve_parameters_tests.h" TYPE - SHARED + STATIC LINK_LIBRARIES GTest::gmock absl::log @@ -161,7 +161,7 @@ ortools_cxx_library( "ip_multiple_solutions_tests.cc" "ip_multiple_solutions_tests.h" TYPE - SHARED + STATIC LINK_LIBRARIES GTest::gmock absl::strings @@ -176,7 +176,7 @@ ortools_cxx_library( "lp_model_solve_parameters_tests.cc" "lp_model_solve_parameters_tests.h" TYPE - SHARED + STATIC LINK_LIBRARIES GTest::gmock absl::log @@ -192,7 +192,7 @@ ortools_cxx_library( "lp_parameter_tests.cc" "lp_parameter_tests.h" TYPE - SHARED + STATIC LINK_LIBRARIES GTest::gmock absl::log @@ -209,9 +209,11 @@ ortools_cxx_library( "lp_initial_basis_tests.cc" "lp_initial_basis_tests.h" TYPE - SHARED + STATIC LINK_LIBRARIES + GTest::gmock absl::log + absl::status ortools::math_opt_base_solver_test TESTING ) @@ -223,7 +225,7 @@ ortools_cxx_library( "ip_parameter_tests.cc" "ip_parameter_tests.h" TYPE - SHARED + STATIC LINK_LIBRARIES GTest::gmock absl::log @@ -239,7 +241,7 @@ ortools_cxx_library( "multi_objective_tests.cc" "multi_objective_tests.h" TYPE - SHARED + STATIC LINK_LIBRARIES GTest::gmock absl::log @@ -254,7 +256,7 @@ ortools_cxx_library( "qp_tests.cc" "qp_tests.h" TYPE - SHARED + STATIC LINK_LIBRARIES GTest::gmock absl::log @@ -269,7 +271,7 @@ ortools_cxx_library( "qc_tests.cc" "qc_tests.h" TYPE - SHARED + STATIC LINK_LIBRARIES GTest::gmock absl::log @@ -284,7 +286,7 @@ ortools_cxx_library( "second_order_cone_tests.cc" "second_order_cone_tests.h" TYPE - SHARED + STATIC LINK_LIBRARIES GTest::gmock absl::log @@ -299,7 +301,7 @@ ortools_cxx_library( "logical_constraint_tests.cc" "logical_constraint_tests.h" TYPE - SHARED + STATIC LINK_LIBRARIES GTest::gmock absl::log @@ -314,7 +316,7 @@ ortools_cxx_library( "test_models.cc" "test_models.h" TYPE - SHARED + STATIC LINK_LIBRARIES absl::log absl::strings @@ -342,7 +344,7 @@ ortools_cxx_library( "generic_tests.cc" "generic_tests.h" TYPE - SHARED + STATIC LINK_LIBRARIES GTest::gmock absl::log @@ -358,7 +360,7 @@ ortools_cxx_library( "infeasible_subsystem_tests.cc" "infeasible_subsystem_tests.h" TYPE - SHARED + STATIC LINK_LIBRARIES GTest::gmock absl::log diff --git a/ortools/math_opt/solvers/CMakeLists.txt b/ortools/math_opt/solvers/CMakeLists.txt index 2e71e90511..9cae3b1ea7 100644 --- a/ortools/math_opt/solvers/CMakeLists.txt +++ b/ortools/math_opt/solvers/CMakeLists.txt @@ -64,20 +64,20 @@ if(USE_SCIP) GTest::gmock_main absl::status ortools::math_opt_matchers - ortools::math_opt_callback_tests - ortools::math_opt_generic_tests - ortools::math_opt_infeasible_subsystem_tests - ortools::math_opt_invalid_input_tests - ortools::math_opt_ip_model_solve_parameters_tests - ortools::math_opt_ip_multiple_solutions_tests - ortools::math_opt_ip_parameter_tests - ortools::math_opt_logical_constraint_tests - ortools::math_opt_mip_tests - ortools::math_opt_multi_objective_tests - ortools::math_opt_qc_tests - ortools::math_opt_qp_tests - ortools::math_opt_second_order_cone_tests - ortools::math_opt_status_tests + "$" + "$" + "$" + "$" + "$" + "$" + "$" + "$" + "$" + "$" + "$" + "$" + "$" + "$" ) endif() @@ -92,21 +92,21 @@ if(USE_GLOP) GTest::gmock_main absl::status ortools::math_opt_matchers - ortools::math_opt_callback_tests - ortools::math_opt_generic_tests - ortools::math_opt_infeasible_subsystem_tests - ortools::math_opt_invalid_input_tests - ortools::math_opt_logical_constraint_tests - ortools::math_opt_lp_incomplete_solve_tests - ortools::math_opt_lp_initial_basis_tests - ortools::math_opt_lp_model_solve_parameters_tests - ortools::math_opt_lp_parameter_tests - ortools::math_opt_lp_tests - ortools::math_opt_multi_objective_tests - ortools::math_opt_qc_tests - ortools::math_opt_qp_tests - ortools::math_opt_second_order_cone_tests - ortools::math_opt_status_tests + "$" + "$" + "$" + "$" + "$" + "$" + "$" + "$" + "$" + "$" + "$" + "$" + "$" + "$" + "$" ) endif() @@ -120,20 +120,20 @@ ortools_cxx_test( GTest::gmock_main absl::status ortools::math_opt_matchers - ortools::math_opt_callback_tests - ortools::math_opt_generic_tests - ortools::math_opt_infeasible_subsystem_tests - ortools::math_opt_invalid_input_tests - ortools::math_opt_ip_model_solve_parameters_tests - ortools::math_opt_ip_multiple_solutions_tests - ortools::math_opt_ip_parameter_tests - ortools::math_opt_logical_constraint_tests - ortools::math_opt_mip_tests - ortools::math_opt_multi_objective_tests - ortools::math_opt_qc_tests - ortools::math_opt_qp_tests - ortools::math_opt_second_order_cone_tests - ortools::math_opt_status_tests + "$" + "$" + "$" + "$" + "$" + "$" + "$" + "$" + "$" + "$" + "$" + "$" + "$" + "$" ) ortools_cxx_test( @@ -157,21 +157,21 @@ if(USE_PDLP) GTest::gmock GTest::gmock_main absl::status - ortools::math_opt_callback_tests - ortools::math_opt_generic_tests - ortools::math_opt_infeasible_subsystem_tests - ortools::math_opt_invalid_input_tests - ortools::math_opt_logical_constraint_tests - ortools::math_opt_lp_incomplete_solve_tests - ortools::math_opt_lp_initial_basis_tests - ortools::math_opt_lp_model_solve_parameters_tests - ortools::math_opt_lp_parameter_tests - ortools::math_opt_lp_tests - ortools::math_opt_multi_objective_tests - ortools::math_opt_qc_tests - ortools::math_opt_qp_tests - ortools::math_opt_second_order_cone_tests - ortools::math_opt_status_tests + "$" + "$" + "$" + "$" + "$" + "$" + #"$" + "$" + "$" + "$" + "$" + "$" + "$" + "$" + "$" ) endif() @@ -187,23 +187,23 @@ if(USE_GLPK) absl::status absl::time ortools::math_opt_matchers - ortools::math_opt_callback_tests - ortools::math_opt_generic_tests - ortools::math_opt_infeasible_subsystem_tests - ortools::math_opt_invalid_input_tests - ortools::math_opt_ip_model_solve_parameters_tests - ortools::math_opt_ip_parameter_tests - ortools::math_opt_logical_constraint_tests - ortools::math_opt_lp_incomplete_solve_tests - ortools::math_opt_lp_model_solve_parameters_tests - ortools::math_opt_lp_parameter_tests - ortools::math_opt_lp_tests - ortools::math_opt_mip_tests - ortools::math_opt_multi_objective_tests - ortools::math_opt_qc_tests - ortools::math_opt_qp_tests - ortools::math_opt_second_order_cone_tests - ortools::math_opt_status_tests + "$" + "$" + "$" + "$" + "$" + "$" + "$" + "$" + "$" + "$" + "$" + "$" + "$" + "$" + "$" + "$" + "$" ) endif() @@ -218,17 +218,17 @@ if(USE_HIGHS) GTest::gmock_main absl::status ortools::math_opt_matchers - ortools::math_opt_callback_tests - ortools::math_opt_generic_tests - ortools::math_opt_infeasible_subsystem_tests - ortools::math_opt_ip_model_solve_parameters_tests - ortools::math_opt_ip_parameter_tests - #ortools::math_opt_logical_constraint_tests - #ortools::math_opt_lp_incomplete_solve_tests - ortools::math_opt_lp_model_solve_parameters_tests - ortools::math_opt_lp_parameter_tests - ortools::math_opt_lp_tests - ortools::math_opt_mip_tests - ortools::math_opt_status_tests + "$" + "$" + "$" + "$" + "$" + #"$" + #"$" + "$" + "$" + "$" + "$" + "$" ) endif() From 1bb98825e1e437aaa874fcc2bcefcdad101198a5 Mon Sep 17 00:00:00 2001 From: Mizux Seiha Date: Tue, 15 Oct 2024 10:35:04 +0200 Subject: [PATCH 095/105] math_opt: disable Mathopt.CallbackTest.EventNodeCut for SCIP this test is flacky with SCIP v900 --- ortools/math_opt/solver_tests/callback_tests.cc | 3 +++ 1 file changed, 3 insertions(+) diff --git a/ortools/math_opt/solver_tests/callback_tests.cc b/ortools/math_opt/solver_tests/callback_tests.cc index a688549d49..0c5d4b9882 100644 --- a/ortools/math_opt/solver_tests/callback_tests.cc +++ b/ortools/math_opt/solver_tests/callback_tests.cc @@ -671,6 +671,9 @@ TEST_P(CallbackTest, EventSolutionFilter) { } TEST_P(CallbackTest, EventNodeCut) { + if (GetParam().solver_type == SolverType::kGscip) { + GTEST_SKIP() << "This test does not work with SCIP v900"; + } if (!GetParam().supported_events.contains(CallbackEvent::kMipNode)) { GTEST_SKIP() << "Test skipped because this solver does not support " "CallbackEvent::kMipNode."; From b83b38ab3600b549e42c482b2f47e2bcf3ebf34a Mon Sep 17 00:00:00 2001 From: Mizux Seiha Date: Tue, 15 Oct 2024 12:06:00 +0200 Subject: [PATCH 096/105] cmake: Fix glop build --- cmake/docker/glop/Dockerfile | 2 +- cmake/glop.cmake | 3 +++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/cmake/docker/glop/Dockerfile b/cmake/docker/glop/Dockerfile index bbd1e2d31d..dfe8efb53b 100644 --- a/cmake/docker/glop/Dockerfile +++ b/cmake/docker/glop/Dockerfile @@ -22,7 +22,7 @@ COPY . . FROM devel AS build RUN cmake -S. -Bbuild -DBUILD_DEPS=ON -DBUILD_CXX=OFF -DBUILD_GLOP=ON -RUN cmake --build build --target all -v +RUN cmake --build build --target all -j 4 -v RUN cmake --build build --target install FROM build AS test diff --git a/cmake/glop.cmake b/cmake/glop.cmake index 3be3a62544..dc2ca034f8 100644 --- a/cmake/glop.cmake +++ b/cmake/glop.cmake @@ -228,6 +228,9 @@ target_compile_options(glop PUBLIC ${GLOP_COMPILE_OPTIONS}) # Properties if(NOT APPLE) set_target_properties(glop PROPERTIES VERSION ${PROJECT_VERSION}) + if(UNIX) + set_target_properties(glop PROPERTIES INSTALL_RPATH "$ORIGIN") + endif() else() # Clang don't support version x.y.z with z > 255 set_target_properties(glop PROPERTIES From 3c7936e859c7f58972483e805fe31aed052e3b65 Mon Sep 17 00:00:00 2001 From: Mizux Seiha Date: Tue, 15 Oct 2024 13:44:06 +0200 Subject: [PATCH 097/105] cmake: add missing absl deps in python --- cmake/python.cmake | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/cmake/python.cmake b/cmake/python.cmake index c5d55e7433..e1e3864aa9 100644 --- a/cmake/python.cmake +++ b/cmake/python.cmake @@ -439,7 +439,11 @@ add_custom_command( $,copy,true> # ortools direct deps $ + $ + $ + $ $ + $ $ $ $ @@ -447,6 +451,7 @@ add_custom_command( $ $ $ + $ $ $ $ @@ -466,10 +471,13 @@ add_custom_command( $ $ $ + $ $ + $ $ $ $ + $ $ $ $ @@ -482,6 +490,7 @@ add_custom_command( $ $ $ + $ $ $ $ @@ -494,6 +503,7 @@ add_custom_command( $ $ $ + $ $ $ $ @@ -502,6 +512,7 @@ add_custom_command( $ $ $ + $ $ $ $ From 7c9adf2cd360cecd9f241969553dd3a9d09dbe42 Mon Sep 17 00:00:00 2001 From: Mizux Seiha Date: Tue, 15 Oct 2024 14:32:33 +0200 Subject: [PATCH 098/105] cmake: .Net and Java absl fixup --- cmake/java.cmake | 11 ++ cmake/python.cmake | 174 +++++++++--------- .../dotnet/Google.OrTools.runtime.csproj.in | 11 ++ 3 files changed, 109 insertions(+), 87 deletions(-) diff --git a/cmake/java.cmake b/cmake/java.cmake index f1ba0faf78..c7327aeb5e 100644 --- a/cmake/java.cmake +++ b/cmake/java.cmake @@ -284,7 +284,11 @@ add_custom_command( COMMAND ${CMAKE_COMMAND} -E $,copy,true> $ + $ + $ + $ $ + $ $ $ $ @@ -292,6 +296,7 @@ add_custom_command( $ $ $ + $ $ $ $ @@ -311,10 +316,13 @@ add_custom_command( $ $ $ + $ $ + $ $ $ $ + $ $ $ $ @@ -327,6 +335,7 @@ add_custom_command( $ $ $ + $ $ $ $ @@ -339,6 +348,7 @@ add_custom_command( $ $ $ + $ $ $ $ @@ -347,6 +357,7 @@ add_custom_command( $ $ $ + $ $ $ $ diff --git a/cmake/python.cmake b/cmake/python.cmake index e1e3864aa9..3687e3be4d 100644 --- a/cmake/python.cmake +++ b/cmake/python.cmake @@ -436,93 +436,93 @@ add_custom_command( COMMAND ${CMAKE_COMMAND} -E make_directory ${PYTHON_PROJECT}/.libs # Don't need to copy static lib on Windows. COMMAND ${CMAKE_COMMAND} -E - $,copy,true> - # ortools direct deps - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - ${PYTHON_PROJECT}/.libs + $,copy,true> + # ortools direct deps + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + ${PYTHON_PROJECT}/.libs COMMAND ${CMAKE_COMMAND} -E $,copy,true> $ diff --git a/ortools/dotnet/Google.OrTools.runtime.csproj.in b/ortools/dotnet/Google.OrTools.runtime.csproj.in index dcf27576a8..f6e7864fb3 100644 --- a/ortools/dotnet/Google.OrTools.runtime.csproj.in +++ b/ortools/dotnet/Google.OrTools.runtime.csproj.in @@ -28,7 +28,11 @@ $ $<$,SHARED_LIBRARY>:;$> $<$:;$> + $<$:;$> + $<$:;$> + $<$:;$> $<$:;$> + $<$:;$> $<$:;$> $<$:;$> $<$:;$> @@ -36,6 +40,7 @@ $<$:;$> $<$:;$> $<$:;$> + $<$:;$> $<$:;$> $<$:;$> $<$:;$> @@ -55,10 +60,13 @@ $<$:;$> $<$:;$> $<$:;$> + $<$:;$> $<$:;$> + $<$:;$> $<$:;$> $<$:;$> $<$:;$> + $<$:;$> $<$:;$> $<$:;$> $<$:;$> @@ -71,6 +79,7 @@ $<$:;$> $<$:;$> $<$:;$> + $<$:;$> $<$:;$> $<$:;$> $<$:;$> @@ -83,6 +92,7 @@ $<$:;$> $<$:;$> $<$:;$> + $<$:;$> $<$:;$> $<$:;$> $<$:;$> @@ -91,6 +101,7 @@ $<$:;$> $<$:;$> $<$:;$> + $<$:;$ $<$:;$> $<$:;$> $<$:;$> From 09f8ed8ccbc9f6771b79b8e5731b578d4ef1851c Mon Sep 17 00:00:00 2001 From: Mizux Seiha Date: Tue, 15 Oct 2024 15:45:05 +0200 Subject: [PATCH 099/105] cmake(ci): Fix ubuntu python job --- cmake/docker/ubuntu/python.Dockerfile | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cmake/docker/ubuntu/python.Dockerfile b/cmake/docker/ubuntu/python.Dockerfile index fa4571c597..859e859c16 100644 --- a/cmake/docker/ubuntu/python.Dockerfile +++ b/cmake/docker/ubuntu/python.Dockerfile @@ -4,7 +4,8 @@ ENV PATH=/root/.local/bin:$PATH RUN apt-get update -qq \ && DEBIAN_FRONTEND=noninteractive apt-get install -yq \ python3-dev python3-pip \ - python3-wheel python3-venv python3-virtualenv \ + python3-setuptools python3-wheel \ + python3-venv python3-virtualenv \ python3-numpy python3-pandas \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* From bc510a0c7e41264f75cd427f023fd211bb87735e Mon Sep 17 00:00:00 2001 From: Mizux Seiha Date: Tue, 15 Oct 2024 15:45:24 +0200 Subject: [PATCH 100/105] cmake: fixup dotnet csproj.in --- ortools/dotnet/Google.OrTools.runtime.csproj.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ortools/dotnet/Google.OrTools.runtime.csproj.in b/ortools/dotnet/Google.OrTools.runtime.csproj.in index f6e7864fb3..057dedb2c9 100644 --- a/ortools/dotnet/Google.OrTools.runtime.csproj.in +++ b/ortools/dotnet/Google.OrTools.runtime.csproj.in @@ -101,7 +101,7 @@ $<$:;$> $<$:;$> $<$:;$> - $<$:;$ + $<$:;$> $<$:;$> $<$:;$> $<$:;$> From 4408ce49889a8b7d28a426ed09956c617d3f6e88 Mon Sep 17 00:00:00 2001 From: Mizux Seiha Date: Wed, 16 Oct 2024 01:15:58 +0200 Subject: [PATCH 101/105] ci: cleanup workflows --- .github/workflows/amd64_windows_cmake_cpp.yml | 2 +- .github/workflows/arm64_macos_cmake_cpp.yml | 2 +- cmake/cpp.cmake | 3 +-- 3 files changed, 3 insertions(+), 4 deletions(-) diff --git a/.github/workflows/amd64_windows_cmake_cpp.yml b/.github/workflows/amd64_windows_cmake_cpp.yml index d30baf8625..05a8917acb 100644 --- a/.github/workflows/amd64_windows_cmake_cpp.yml +++ b/.github/workflows/amd64_windows_cmake_cpp.yml @@ -27,7 +27,7 @@ jobs: run: > cmake -S. -Bbuild -G "${{ matrix.cmake.generator }}" - -DCMAKE_BUILD_TYPE=${{ matrix.cmake.config }} + -DCMAKE_CONFIGURATION_TYPES=${{ matrix.cmake.config }} -DBUILD_DEPS=ON -DCMAKE_INSTALL_PREFIX=install - name: Build diff --git a/.github/workflows/arm64_macos_cmake_cpp.yml b/.github/workflows/arm64_macos_cmake_cpp.yml index 8388c59aa1..92071673df 100644 --- a/.github/workflows/arm64_macos_cmake_cpp.yml +++ b/.github/workflows/arm64_macos_cmake_cpp.yml @@ -14,7 +14,7 @@ jobs: ] fail-fast: false name: MacOS • ${{ matrix.cmake.generator }} • C++ - runs-on: macos-latest # macos arm64 based runner + runs-on: macos-latest # macos M1 based runner steps: - uses: actions/checkout@v4 - name: Check cmake diff --git a/cmake/cpp.cmake b/cmake/cpp.cmake index 89ae76355a..b099b35ab3 100644 --- a/cmake/cpp.cmake +++ b/cmake/cpp.cmake @@ -639,8 +639,7 @@ configure_package_config_file(cmake/${PROJECT_NAME}Config.cmake.in NO_CHECK_REQUIRED_COMPONENTS_MACRO) write_basic_package_version_file( "${PROJECT_BINARY_DIR}/${PROJECT_NAME}ConfigVersion.cmake" - COMPATIBILITY SameMajorVersion - ) + COMPATIBILITY SameMajorVersion) install( FILES "${PROJECT_BINARY_DIR}/${PROJECT_NAME}Config.cmake" From 6bd3ebcf3dd518854fd6852201796345d418d229 Mon Sep 17 00:00:00 2001 From: Mizux Seiha Date: Wed, 16 Oct 2024 01:16:29 +0200 Subject: [PATCH 102/105] cmake: Fix libz.so inside packages --- cmake/java.cmake | 4 ++++ cmake/python.cmake | 5 ++++- ortools/dotnet/Google.OrTools.runtime.csproj.in | 1 + 3 files changed, 9 insertions(+), 1 deletion(-) diff --git a/cmake/java.cmake b/cmake/java.cmake index c7327aeb5e..0dbb9aebdb 100644 --- a/cmake/java.cmake +++ b/cmake/java.cmake @@ -281,6 +281,10 @@ add_custom_command( $ $<$>:$> ${JAVA_RESSOURCES_PATH}/${JAVA_NATIVE_PROJECT}/ + COMMAND ${CMAKE_COMMAND} -E + $,copy,true> + $ + ${JAVA_RESSOURCES_PATH}/${JAVA_NATIVE_PROJECT}/ COMMAND ${CMAKE_COMMAND} -E $,copy,true> $ diff --git a/cmake/python.cmake b/cmake/python.cmake index 3687e3be4d..239f7256a2 100644 --- a/cmake/python.cmake +++ b/cmake/python.cmake @@ -435,9 +435,12 @@ add_custom_command( COMMAND ${CMAKE_COMMAND} -E remove -f ortools_timestamp COMMAND ${CMAKE_COMMAND} -E make_directory ${PYTHON_PROJECT}/.libs # Don't need to copy static lib on Windows. + COMMAND ${CMAKE_COMMAND} -E + $,copy,true> + $ + ${PYTHON_PROJECT}/.libs COMMAND ${CMAKE_COMMAND} -E $,copy,true> - # ortools direct deps $ $ $ diff --git a/ortools/dotnet/Google.OrTools.runtime.csproj.in b/ortools/dotnet/Google.OrTools.runtime.csproj.in index 057dedb2c9..97451092d4 100644 --- a/ortools/dotnet/Google.OrTools.runtime.csproj.in +++ b/ortools/dotnet/Google.OrTools.runtime.csproj.in @@ -27,6 +27,7 @@ ) From 2ce45edd1761993289b836d2194ad28a32e7554c Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Wed, 16 Oct 2024 14:11:26 +0200 Subject: [PATCH 104/105] more absl support: span... --- ortools/algorithms/set_cover_heuristics.cc | 4 ++-- ortools/algorithms/set_cover_heuristics.h | 2 +- ortools/glop/lu_factorization.cc | 2 +- ortools/linear_solver/model_exporter.cc | 10 ++++---- ortools/linear_solver/python/model_builder.py | 24 +++++++++++++++++-- 5 files changed, 31 insertions(+), 11 deletions(-) diff --git a/ortools/algorithms/set_cover_heuristics.cc b/ortools/algorithms/set_cover_heuristics.cc index 4208c90e17..dc6377b893 100644 --- a/ortools/algorithms/set_cover_heuristics.cc +++ b/ortools/algorithms/set_cover_heuristics.cc @@ -122,8 +122,8 @@ bool GreedySolutionGenerator::NextSolution( return NextSolution(focus, inv_->model()->subset_costs()); } -bool GreedySolutionGenerator::NextSolution( - const std::vector& focus, const SubsetCostVector& costs) { +bool GreedySolutionGenerator::NextSolution(absl::Span focus, + const SubsetCostVector& costs) { DCHECK(inv_->CheckConsistency()); inv_->ClearTrace(); SubsetCostVector elements_per_cost(costs.size(), 0.0); diff --git a/ortools/algorithms/set_cover_heuristics.h b/ortools/algorithms/set_cover_heuristics.h index 154724cf61..c8440bf64e 100644 --- a/ortools/algorithms/set_cover_heuristics.h +++ b/ortools/algorithms/set_cover_heuristics.h @@ -174,7 +174,7 @@ class GreedySolutionGenerator { bool NextSolution(const std::vector& focus); // Same with a different set of costs. - bool NextSolution(const std::vector& focus, + bool NextSolution(absl::Span focus, const SubsetCostVector& costs); private: diff --git a/ortools/glop/lu_factorization.cc b/ortools/glop/lu_factorization.cc index 8deed0b61c..e50e9f9d3d 100644 --- a/ortools/glop/lu_factorization.cc +++ b/ortools/glop/lu_factorization.cc @@ -133,7 +133,7 @@ namespace { // norm of the given column, otherwise do the same with a sparse version. In // both cases column is cleared. Fractional ComputeSquaredNormAndResetToZero( - const std::vector& non_zeros, absl::Span column) { + absl::Span non_zeros, absl::Span column) { Fractional sum = 0.0; if (non_zeros.empty()) { sum = SquaredNormAndResetToZero(column); diff --git a/ortools/linear_solver/model_exporter.cc b/ortools/linear_solver/model_exporter.cc index 51d75611d6..cdccf8b241 100644 --- a/ortools/linear_solver/model_exporter.cc +++ b/ortools/linear_solver/model_exporter.cc @@ -149,11 +149,11 @@ class MPModelProtoExporter { // Appends a pair name, value to "output", formatted to comply with the MPS // standard. - void AppendMpsPair(const std::string& name, double value, + void AppendMpsPair(absl::string_view name, double value, std::string* output) const; // Appends the head of a line, consisting of an id and a name to output. - void AppendMpsLineHeader(const std::string& id, const std::string& name, + void AppendMpsLineHeader(absl::string_view id, absl::string_view name, std::string* output) const; // Same as AppendMpsLineHeader. Appends an extra new-line at the end the @@ -674,13 +674,13 @@ bool MPModelProtoExporter::ExportModelAsLpFormat( return true; } -void MPModelProtoExporter::AppendMpsPair(const std::string& name, double value, +void MPModelProtoExporter::AppendMpsPair(absl::string_view name, double value, std::string* output) const { absl::StrAppendFormat(output, *mps_format_, name, DoubleToString(value)); } -void MPModelProtoExporter::AppendMpsLineHeader(const std::string& id, - const std::string& name, +void MPModelProtoExporter::AppendMpsLineHeader(absl::string_view id, + absl::string_view name, std::string* output) const { absl::StrAppendFormat(output, *mps_header_format_, id, name); } diff --git a/ortools/linear_solver/python/model_builder.py b/ortools/linear_solver/python/model_builder.py index bca0e6df2c..9d9dc8f7c6 100644 --- a/ortools/linear_solver/python/model_builder.py +++ b/ortools/linear_solver/python/model_builder.py @@ -1599,11 +1599,31 @@ class Model: return self.__helper.import_from_mps_file(mps_file) def import_from_lp_string(self, lp_string: str) -> bool: - """Reads a model from a LP string.""" + """Reads a model from a LP string. + + Note that this code is very limited, and will not support any real lp. + It is only intented to be use to parse test lp problems. + + Args: + lp_string: The LP string to import. + + Returns: + True if the import was successful. + """ return self.__helper.import_from_lp_string(lp_string) def import_from_lp_file(self, lp_file: str) -> bool: - """Reads a model from a .lp file.""" + """Reads a model from a .lp file. + + Note that this code is very limited, and will not support any real lp. + It is only intented to be use to parse test lp problems. + + Args: + lp_file: The LP file to import. + + Returns: + True if the import was successful. + """ return self.__helper.import_from_lp_file(lp_file) def import_from_proto_file(self, proto_file: str) -> bool: From b8626a31f84974f5d1cb7a860158462b64acc662 Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Wed, 16 Oct 2024 14:12:38 +0200 Subject: [PATCH 105/105] [CP-SAT] check the time limit more frequently; experimental support for lp folding; more pedantic tests in presolve --- ortools/sat/2d_orthogonal_packing.cc | 4 +- ortools/sat/2d_orthogonal_packing_testing.cc | 2 +- ortools/sat/2d_orthogonal_packing_testing.h | 2 +- ortools/sat/2d_rectangle_presolve_test.cc | 2 +- ortools/sat/BUILD.bazel | 16 +- ortools/sat/clause.cc | 8 +- ortools/sat/clause.h | 7 + ortools/sat/cp_model_lns.cc | 60 ++++- ortools/sat/cp_model_lns.h | 20 +- ortools/sat/cp_model_presolve.cc | 109 ++++++-- ortools/sat/cp_model_presolve.h | 5 + ortools/sat/cp_model_search.cc | 3 + ortools/sat/cp_model_search.h | 2 +- ortools/sat/cp_model_solver.cc | 16 +- ortools/sat/cp_model_solver_helpers.cc | 108 +++++++- ortools/sat/cp_model_symmetries.cc | 48 +++- ortools/sat/cuts.cc | 3 + ortools/sat/diffn_cuts.cc | 7 +- ortools/sat/diffn_cuts.h | 3 +- ortools/sat/go/cpmodel/cp_solver_c.cc | 1 + ortools/sat/inclusion.h | 24 +- ortools/sat/inclusion_test.cc | 16 +- ortools/sat/integer_expr_test.cc | 4 +- ortools/sat/linear_constraint.cc | 7 + ortools/sat/linear_constraint.h | 40 +++ ortools/sat/linear_constraint_manager.cc | 171 +++++++++++- ortools/sat/linear_constraint_manager.h | 81 +++++- ortools/sat/linear_constraint_manager_test.cc | 58 ++++ ortools/sat/linear_programming_constraint.cc | 255 ++++++++++++------ ortools/sat/linear_programming_constraint.h | 8 +- ortools/sat/presolve_context.cc | 51 +++- ortools/sat/presolve_context.h | 4 +- ortools/sat/sat_parameters.proto | 9 +- ortools/sat/scheduling_cuts_test.cc | 2 +- ortools/sat/symmetry_util.cc | 19 ++ ortools/sat/symmetry_util.h | 5 + ortools/sat/symmetry_util_test.cc | 12 + 37 files changed, 1009 insertions(+), 183 deletions(-) diff --git a/ortools/sat/2d_orthogonal_packing.cc b/ortools/sat/2d_orthogonal_packing.cc index f8bd5b3d42..f352359832 100644 --- a/ortools/sat/2d_orthogonal_packing.cc +++ b/ortools/sat/2d_orthogonal_packing.cc @@ -70,8 +70,8 @@ std::optional> FindPairwiseConflict( absl::Span sizes_x, absl::Span sizes_y, std::pair bounding_box_size, - const std::vector& index_by_decreasing_x_size, - const std::vector& index_by_decreasing_y_size) { + absl::Span index_by_decreasing_x_size, + absl::Span index_by_decreasing_y_size) { // Look for pairwise incompatible pairs by using the logic such conflict can // only happen between a "tall" item a "wide" item. int x_idx = 0; diff --git a/ortools/sat/2d_orthogonal_packing_testing.cc b/ortools/sat/2d_orthogonal_packing_testing.cc index 4fc9789b1c..18b911136e 100644 --- a/ortools/sat/2d_orthogonal_packing_testing.cc +++ b/ortools/sat/2d_orthogonal_packing_testing.cc @@ -180,7 +180,7 @@ std::vector MakeItemsFromRectangles( std::vector GenerateItemsRectanglesWithNoPairwiseConflict( - const std::vector& rectangles, double slack_factor, + absl::Span rectangles, double slack_factor, absl::BitGenRef random) { const std::vector range_items = MakeItemsFromRectangles(rectangles, slack_factor, random); diff --git a/ortools/sat/2d_orthogonal_packing_testing.h b/ortools/sat/2d_orthogonal_packing_testing.h index 72de30a910..848e971e2f 100644 --- a/ortools/sat/2d_orthogonal_packing_testing.h +++ b/ortools/sat/2d_orthogonal_packing_testing.h @@ -40,7 +40,7 @@ std::vector MakeItemsFromRectangles( std::vector GenerateItemsRectanglesWithNoPairwiseConflict( - const std::vector& rectangles, double slack_factor, + absl::Span rectangles, double slack_factor, absl::BitGenRef random); std::vector diff --git a/ortools/sat/2d_rectangle_presolve_test.cc b/ortools/sat/2d_rectangle_presolve_test.cc index 20700a826d..7650f9bb27 100644 --- a/ortools/sat/2d_rectangle_presolve_test.cc +++ b/ortools/sat/2d_rectangle_presolve_test.cc @@ -207,7 +207,7 @@ TEST(RectanglePresolve, RandomTest) { } } -Neighbours NaiveBuildNeighboursGraph(const std::vector& rectangles) { +Neighbours NaiveBuildNeighboursGraph(absl::Span rectangles) { auto interval_intersect = [](IntegerValue begin1, IntegerValue end1, IntegerValue begin2, IntegerValue end2) { return std::max(begin1, begin2) < std::min(end1, end2); diff --git a/ortools/sat/BUILD.bazel b/ortools/sat/BUILD.bazel index e4d9c5c1f9..4ecad430ff 100644 --- a/ortools/sat/BUILD.bazel +++ b/ortools/sat/BUILD.bazel @@ -18,6 +18,7 @@ load("@rules_cc//cc:defs.bzl", "cc_library", "cc_proto_library") load("@rules_java//java:defs.bzl", "java_proto_library") load("@rules_proto//proto:defs.bzl", "proto_library") load("@rules_python//python:proto.bzl", "py_proto_library") +load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library") package(default_visibility = ["//visibility:public"]) @@ -410,6 +411,7 @@ cc_library( ":intervals", ":lb_tree_search", ":linear_constraint", + ":linear_constraint_manager", ":linear_model", ":linear_programming_constraint", ":linear_relaxation", @@ -429,9 +431,11 @@ cc_library( ":simplification", ":stat_tables", ":subsolver", + ":symmetry_util", ":synchronization", ":util", ":work_assignment", + "//ortools/algorithms:sparse_permutation", "//ortools/base", "//ortools/base:status_macros", "//ortools/base:strong_vector", @@ -1284,6 +1288,7 @@ cc_library( srcs = ["symmetry_util.cc"], hdrs = ["symmetry_util.h"], deps = [ + ":cp_model_cc_proto", "//ortools/algorithms:dynamic_partition", "//ortools/algorithms:sparse_permutation", "//ortools/base", @@ -1301,6 +1306,7 @@ cc_test( ":symmetry_util", "//ortools/algorithms:sparse_permutation", "//ortools/base:gmock_main", + "//ortools/base:parse_test_proto", "@com_google_absl//absl/types:span", ], ) @@ -2153,6 +2159,7 @@ cc_library( "//ortools/util:saturated_arithmetic", "//ortools/util:strong_integers", "//ortools/util:time_limit", + "@com_google_absl//absl/algorithm:container", "@com_google_absl//absl/container:flat_hash_map", "@com_google_absl//absl/log:check", "@com_google_absl//absl/numeric:int128", @@ -2176,7 +2183,6 @@ cc_library( "//ortools/base", "//ortools/base:hash", "//ortools/base:strong_vector", - "//ortools/glop:revised_simplex", "//ortools/glop:variables_info", "//ortools/lp_data:base", "//ortools/util:logging", @@ -2185,10 +2191,11 @@ cc_library( "//ortools/util:time_limit", "@com_google_absl//absl/container:btree", "@com_google_absl//absl/container:flat_hash_map", - "@com_google_absl//absl/container:flat_hash_set", + "@com_google_absl//absl/log", "@com_google_absl//absl/log:check", "@com_google_absl//absl/meta:type_traits", "@com_google_absl//absl/strings", + "@com_google_absl//absl/types:span", ], ) @@ -3011,7 +3018,6 @@ cc_library( ":diffn_util", ":integer", ":linear_constraint_manager", - ":linear_programming_constraint", ":model", ":presolve_context", ":rins", @@ -3032,6 +3038,7 @@ cc_library( "@com_google_absl//absl/base:log_severity", "@com_google_absl//absl/container:flat_hash_map", "@com_google_absl//absl/container:flat_hash_set", + "@com_google_absl//absl/flags:flag", "@com_google_absl//absl/log", "@com_google_absl//absl/log:check", "@com_google_absl//absl/meta:type_traits", @@ -3357,8 +3364,8 @@ cc_library( hdrs = ["inclusion.h"], deps = [ "//ortools/base", + "//ortools/util:time_limit", "@com_google_absl//absl/log:check", - "@com_google_absl//absl/types:span", ], ) @@ -3434,6 +3441,7 @@ cc_test( ":inclusion", ":util", "//ortools/base:gmock_main", + "//ortools/util:time_limit", "@com_google_absl//absl/random", "@com_google_absl//absl/types:span", ], diff --git a/ortools/sat/clause.cc b/ortools/sat/clause.cc index f7de79c194..79b6c3edd2 100644 --- a/ortools/sat/clause.cc +++ b/ortools/sat/clause.cc @@ -1475,17 +1475,20 @@ bool BinaryImplicationGraph::DetectEquivalences(bool log_info) { // using the binary implication graph only. // // TODO(user): Track which literal have new implications, and only process -// the antecedants of these. +// the antecedents of these. bool BinaryImplicationGraph::ComputeTransitiveReduction(bool log_info) { DCHECK_EQ(trail_->CurrentDecisionLevel(), 0); + if (time_limit_->LimitReached()) return true; if (!DetectEquivalences()) return false; // TODO(user): the situation with fixed variable is not really "clean". // Simplify the code so we are sure we don't run into issue or have to deal // with any of that here. + if (time_limit_->LimitReached()) return true; if (!Propagate(trail_)) return false; RemoveFixedVariables(); DCHECK(InvariantsAreOk()); + if (time_limit_->LimitReached()) return true; log_info |= VLOG_IS_ON(1); WallTimer wall_timer; @@ -1708,7 +1711,7 @@ bool BinaryImplicationGraph::TransformIntoMaxCliques( // Data to detect inclusion of base amo into extend amo. std::vector detector_clique_index; CompactVectorVector storage; - InclusionDetector detector(storage); + InclusionDetector detector(storage, time_limit_); detector.SetWorkLimit(1e9); std::vector dense_index_to_index; @@ -2503,6 +2506,7 @@ void BinaryImplicationGraph::CleanupAllRemovedAndFixedVariables() { } bool BinaryImplicationGraph::InvariantsAreOk() { + if (time_limit_->LimitReached()) return true; // We check that if a => b then not(b) => not(a). absl::flat_hash_set> seen; int num_redundant = 0; diff --git a/ortools/sat/clause.h b/ortools/sat/clause.h index 457d62e2c2..1d755bf12d 100644 --- a/ortools/sat/clause.h +++ b/ortools/sat/clause.h @@ -332,6 +332,13 @@ class ClauseManager : public SatPropagator { add_clause_callback_ = std::move(add_clause_callback); } + // Removes the add clause callback and returns it. This can be used to + // temporarily disable the callback. + absl::AnyInvocable)> + TakeAddClauseCallback() { + return std::move(add_clause_callback_); + } + private: // Attaches the given clause. This eventually propagates a literal which is // enqueued on the trail. Returns false if a contradiction was encountered. diff --git a/ortools/sat/cp_model_lns.cc b/ortools/sat/cp_model_lns.cc index 48404abdfd..eb135cfc0e 100644 --- a/ortools/sat/cp_model_lns.cc +++ b/ortools/sat/cp_model_lns.cc @@ -29,6 +29,7 @@ #include "absl/base/log_severity.h" #include "absl/container/flat_hash_map.h" #include "absl/container/flat_hash_set.h" +#include "absl/flags/flag.h" #include "absl/log/check.h" #include "absl/meta/type_traits.h" #include "absl/random/bit_gen_ref.h" @@ -49,7 +50,6 @@ #include "ortools/sat/diffn_util.h" #include "ortools/sat/integer.h" #include "ortools/sat/linear_constraint_manager.h" -#include "ortools/sat/linear_programming_constraint.h" #include "ortools/sat/model.h" #include "ortools/sat/presolve_context.h" #include "ortools/sat/rins.h" @@ -933,7 +933,8 @@ NeighborhoodGeneratorHelper::GetSchedulingPrecedences( return result; } -std::vector> NeighborhoodGeneratorHelper::GetRoutingPaths( +std::vector> +NeighborhoodGeneratorHelper::GetRoutingPathVariables( const CpSolverResponse& initial_solution) const { struct HeadAndArcLiteral { int head; @@ -1901,6 +1902,15 @@ Neighborhood LocalBranchingLpBasedNeighborhoodGenerator::Generate( local_cp_model.mutable_objective()->set_integer_scaling_factor(0); } + // Dump? + if (absl::GetFlag(FLAGS_cp_model_dump_submodels)) { + const std::string dump_name = + absl::StrCat(absl::GetFlag(FLAGS_cp_model_dump_prefix), + "lb_relax_lns_lp_", data.task_id, ".pb.txt"); + LOG(INFO) << "Dumping linear relaxed model to '" << dump_name << "'."; + CHECK(WriteModelProtoToFile(local_cp_model, dump_name)); + } + // Solve. // // TODO(user): Shall we pass the objective upper bound so we have more @@ -2445,26 +2455,24 @@ Neighborhood RoutingRandomNeighborhoodGenerator::Generate( const CpSolverResponse& initial_solution, SolveData& data, absl::BitGenRef random) { const std::vector> all_paths = - helper_.GetRoutingPaths(initial_solution); + helper_.GetRoutingPathVariables(initial_solution); // Collect all unique variables. - absl::flat_hash_set all_path_variables; - for (auto& path : all_paths) { - all_path_variables.insert(path.begin(), path.end()); + std::vector variables_to_fix; + for (const auto& path : all_paths) { + variables_to_fix.insert(variables_to_fix.end(), path.begin(), path.end()); } - std::vector fixed_variables(all_path_variables.begin(), - all_path_variables.end()); - std::sort(fixed_variables.begin(), fixed_variables.end()); - GetRandomSubset(1.0 - data.difficulty, &fixed_variables, random); + gtl::STLSortAndRemoveDuplicates(&variables_to_fix); + GetRandomSubset(1.0 - data.difficulty, &variables_to_fix, random); return helper_.FixGivenVariables( - initial_solution, {fixed_variables.begin(), fixed_variables.end()}); + initial_solution, {variables_to_fix.begin(), variables_to_fix.end()}); } Neighborhood RoutingPathNeighborhoodGenerator::Generate( const CpSolverResponse& initial_solution, SolveData& data, absl::BitGenRef random) { std::vector> all_paths = - helper_.GetRoutingPaths(initial_solution); + helper_.GetRoutingPathVariables(initial_solution); // Collect all unique variables. absl::flat_hash_set all_path_variables; @@ -2510,7 +2518,7 @@ Neighborhood RoutingFullPathNeighborhoodGenerator::Generate( const CpSolverResponse& initial_solution, SolveData& data, absl::BitGenRef random) { std::vector> all_paths = - helper_.GetRoutingPaths(initial_solution); + helper_.GetRoutingPathVariables(initial_solution); // Remove a corner case where all paths are empty. if (all_paths.empty()) { return helper_.NoNeighborhood(); @@ -2572,6 +2580,32 @@ Neighborhood RoutingFullPathNeighborhoodGenerator::Generate( return helper_.FixGivenVariables(initial_solution, fixed_variables); } +Neighborhood RoutingStartsNeighborhoodGenerator::Generate( + const CpSolverResponse& initial_solution, SolveData& data, + absl::BitGenRef random) { + std::vector> all_paths = + helper_.GetRoutingPathVariables(initial_solution); + // TODO(user): Maybe enable some routes to be non empty? + if (all_paths.empty()) return helper_.NoNeighborhood(); + + // Collect all unique path variables, except the start and end of the paths. + // For circuit constraints, we approximate this with the arc going in and out + // of the smallest node. This works well for model where the zero node is an + // artificial node. + std::vector variables_to_fix; + for (const auto& path : all_paths) { + for (const int ref : path) { + if (ref == path.front() || ref == path.back()) continue; + variables_to_fix.push_back(PositiveRef(ref)); + } + } + gtl::STLSortAndRemoveDuplicates(&variables_to_fix); + GetRandomSubset(1.0 - data.difficulty, &variables_to_fix, random); + + return helper_.FixGivenVariables( + initial_solution, {variables_to_fix.begin(), variables_to_fix.end()}); +} + bool RelaxationInducedNeighborhoodGenerator::ReadyToGenerate() const { return (incomplete_solutions_->HasSolution() || lp_solutions_->NumSolutions() > 0); diff --git a/ortools/sat/cp_model_lns.h b/ortools/sat/cp_model_lns.h index 00856cbe1c..ac69d2562b 100644 --- a/ortools/sat/cp_model_lns.h +++ b/ortools/sat/cp_model_lns.h @@ -238,7 +238,7 @@ class NeighborhoodGeneratorHelper : public SubSolver { // self-looping arcs. Path are sorted, starting from the arc with the lowest // tail index, and going in sequence up to the last arc before the circuit is // closed. Each entry correspond to the arc literal on the circuit. - std::vector> GetRoutingPaths( + std::vector> GetRoutingPathVariables( const CpSolverResponse& initial_solution) const; // Returns all precedences extracted from the scheduling constraint and the @@ -392,6 +392,9 @@ class NeighborhoodGenerator { IntegerValue base_objective = IntegerValue(0); IntegerValue new_objective = IntegerValue(0); + // For debugging. + int task_id = 0; + // This is just used to construct a deterministic order for the updates. bool operator<(const SolveData& o) const { return std::tie(status, difficulty, deterministic_limit, @@ -782,7 +785,7 @@ class RoutingPathNeighborhoodGenerator : public NeighborhoodGenerator { SolveData& data, absl::BitGenRef random) final; }; -// This routing based LNS generator aims are relaxing one full path, and make +// This routing based LNS generator aims at relaxing one full path, and make // some room on the other paths to absorb the nodes of the relaxed path. // // In order to do so, it will relax the first and the last arc of each path in @@ -799,6 +802,19 @@ class RoutingFullPathNeighborhoodGenerator : public NeighborhoodGenerator { SolveData& data, absl::BitGenRef random) final; }; +// This routing based LNS generator performs like the +// RoutingRandomNeighborhoodGenerator, but always relax the arcs going in and +// out of the depot for routes constraints, and of the node with the minimal +// index for circuit constraints. +class RoutingStartsNeighborhoodGenerator : public NeighborhoodGenerator { + public: + RoutingStartsNeighborhoodGenerator(NeighborhoodGeneratorHelper const* helper, + absl::string_view name) + : NeighborhoodGenerator(name, helper) {} + Neighborhood Generate(const CpSolverResponse& initial_solution, + SolveData& data, absl::BitGenRef random) final; +}; + // Generates a neighborhood by fixing the variables to solutions reported in // various repositories. This is inspired from RINS published in "Exploring // relaxation induced neighborhoods to improve MIP solutions" 2004 by E. Danna diff --git a/ortools/sat/cp_model_presolve.cc b/ortools/sat/cp_model_presolve.cc index 9ae2eca5e4..a283115e39 100644 --- a/ortools/sat/cp_model_presolve.cc +++ b/ortools/sat/cp_model_presolve.cc @@ -8507,7 +8507,7 @@ void CpModelPresolver::ProcessSetPPC() { // TODO(user): compute on the fly instead of temporary storing variables? CompactVectorVector storage; - InclusionDetector detector(storage); + InclusionDetector detector(storage, time_limit_); detector.SetWorkLimit(context_->params().presolve_inclusion_work_limit()); // We use an encoding of literal that allows to index arrays. @@ -8618,7 +8618,7 @@ void CpModelPresolver::DetectIncludedEnforcement() { // TODO(user): compute on the fly instead of temporary storing variables? std::vector relevant_constraints; CompactVectorVector storage; - InclusionDetector detector(storage); + InclusionDetector detector(storage, time_limit_); detector.SetWorkLimit(context_->params().presolve_inclusion_work_limit()); std::vector temp_literals; @@ -9021,6 +9021,20 @@ void CpModelPresolver::DetectDuplicateConstraintsWithDifferentEnforcements( continue; } + const int a = rep_ct->enforcement_literal(0); + const int b = dup_ct->enforcement_literal(0); + + if (a == NegatedRef(b) && rep_ct->enforcement_literal().size() == 1 && + dup_ct->enforcement_literal().size() == 1) { + context_->UpdateRuleStats( + "duplicate: both with enforcement and its negation"); + rep_ct->mutable_enforcement_literal()->Clear(); + context_->UpdateConstraintVariableUsage(rep); + dup_ct->Clear(); + context_->UpdateConstraintVariableUsage(dup); + continue; + } + // Special case. This looks specific but users might reify with a cost // a duplicate constraint. In this case, no need to have two variables, // we can make them equal by duality argument. @@ -9034,8 +9048,6 @@ void CpModelPresolver::DetectDuplicateConstraintsWithDifferentEnforcements( // we can also add the equality. Alternatively, we can just introduce a new // variable and merge all duplicate constraint into 1 + bunch of boolean // constraints liking enforcements. - const int a = rep_ct->enforcement_literal(0); - const int b = dup_ct->enforcement_literal(0); if (context_->VariableWithCostIsUniqueAndRemovable(a) && context_->VariableWithCostIsUniqueAndRemovable(b)) { // Both these case should be presolved before, but it is easy to deal with @@ -9580,7 +9592,7 @@ void CpModelPresolver::DetectDominatedLinearConstraints() { const CpModelProto& proto_; }; Storage storage(context_->working_model); - InclusionDetector detector(storage); + InclusionDetector detector(storage, time_limit_); detector.SetWorkLimit(context_->params().presolve_inclusion_work_limit()); // Because we use the constraint <-> variable graph, we cannot modify it @@ -10759,7 +10771,7 @@ void CpModelPresolver::ExtractEncodingFromLinear() { // TODO(user): compute on the fly instead of temporary storing variables? std::vector relevant_constraints; CompactVectorVector storage; - InclusionDetector detector(storage); + InclusionDetector detector(storage, time_limit_); detector.SetWorkLimit(context_->params().presolve_inclusion_work_limit()); // Loop over the constraints and fill the structures above. @@ -11815,8 +11827,7 @@ void CpModelPresolver::PresolveToFixPoint() { if (ProcessChangedVariables(&in_queue, &queue)) continue; - // TODO(user): Uncomment this line once the tests pass. - // DCHECK(!context_->HasUnusedAffineVariable()); + DCHECK(!context_->HasUnusedAffineVariable()); // Deal with integer variable only appearing in an encoding. for (int v = 0; v < context_->working_model->variables().size(); ++v) { @@ -11999,6 +12010,9 @@ bool ModelCopy::ImportAndSimplifyConstraints( case ConstraintProto::kLinear: if (!CopyLinear(ct)) return CreateUnsatModel(c, ct); break; + case ConstraintProto::kIntProd: + if (!CopyIntProd(ct, ignore_names)) return CreateUnsatModel(c, ct); + break; case ConstraintProto::kAtMostOne: if (!CopyAtMostOne(ct)) return CreateUnsatModel(c, ct); break; @@ -12258,6 +12272,38 @@ bool ModelCopy::CopyBoolAndWithDupSupport(const ConstraintProto& ct) { return true; } +bool ModelCopy::CopyLinearExpression(const LinearExpressionProto& expr, + LinearExpressionProto* dst) { + non_fixed_variables_.clear(); + non_fixed_coefficients_.clear(); + int64_t offset = expr.offset(); + for (int i = 0; i < expr.vars_size(); ++i) { + const int ref = expr.vars(i); + const int64_t coeff = expr.coeffs(i); + if (coeff == 0) continue; + if (context_->IsFixed(ref)) { + offset += coeff * context_->MinOf(ref); + continue; + } + + // Make sure we never have negative ref in a linear constraint. + if (RefIsPositive(ref)) { + non_fixed_variables_.push_back(ref); + non_fixed_coefficients_.push_back(coeff); + } else { + non_fixed_variables_.push_back(NegatedRef(ref)); + non_fixed_coefficients_.push_back(-coeff); + } + } + + dst->set_offset(offset); + dst->mutable_vars()->Add(non_fixed_variables_.begin(), + non_fixed_variables_.end()); + dst->mutable_coeffs()->Add(non_fixed_coefficients_.begin(), + non_fixed_coefficients_.end()); + return true; +} + bool ModelCopy::CopyLinear(const ConstraintProto& ct) { non_fixed_variables_.clear(); non_fixed_coefficients_.clear(); @@ -12375,13 +12421,29 @@ bool ModelCopy::CopyInterval(const ConstraintProto& ct, int c, "supported."; interval_mapping_[c] = context_->working_model->constraints_size(); ConstraintProto* new_ct = context_->working_model->add_constraints(); - if (ignore_names) { - *new_ct->mutable_enforcement_literal() = ct.enforcement_literal(); - *new_ct->mutable_interval() = ct.interval(); - } else { - *new_ct = ct; + if (!ignore_names) { + new_ct->set_name(ct.name()); } + *new_ct->mutable_enforcement_literal() = ct.enforcement_literal(); + CopyLinearExpression(ct.interval().start(), + new_ct->mutable_interval()->mutable_start()); + CopyLinearExpression(ct.interval().size(), + new_ct->mutable_interval()->mutable_size()); + CopyLinearExpression(ct.interval().end(), + new_ct->mutable_interval()->mutable_end()); + return true; +} +bool ModelCopy::CopyIntProd(const ConstraintProto& ct, bool ignore_names) { + ConstraintProto* new_ct = context_->working_model->add_constraints(); + if (!ignore_names) { + new_ct->set_name(ct.name()); + } + for (const LinearExpressionProto& expr : ct.int_prod().exprs()) { + CopyLinearExpression(expr, new_ct->mutable_int_prod()->add_exprs()); + } + CopyLinearExpression(ct.int_prod().target(), + new_ct->mutable_int_prod()->mutable_target()); return true; } @@ -12399,26 +12461,29 @@ void ModelCopy::AddLinearConstraintForInterval(const ConstraintProto& ct) { absl::Span(itv.end().coeffs())) { // Trivial constraint, nothing to do. } else { - ConstraintProto* new_ct = context_->working_model->add_constraints(); - *new_ct->mutable_enforcement_literal() = ct.enforcement_literal(); + tmp_constraint_.Clear(); + *tmp_constraint_.mutable_enforcement_literal() = ct.enforcement_literal(); + LinearConstraintProto* mutable_linear = tmp_constraint_.mutable_linear(); - LinearConstraintProto* mutable_linear = new_ct->mutable_linear(); mutable_linear->add_domain(0); mutable_linear->add_domain(0); AddLinearExpressionToLinearConstraint(itv.start(), 1, mutable_linear); AddLinearExpressionToLinearConstraint(itv.size(), 1, mutable_linear); AddLinearExpressionToLinearConstraint(itv.end(), -1, mutable_linear); + CopyLinear(tmp_constraint_); } // An enforced interval must have is size non-negative. const LinearExpressionProto& size_expr = itv.size(); if (context_->MinOf(size_expr) < 0) { - ConstraintProto* new_ct = context_->working_model->add_constraints(); - *new_ct->mutable_enforcement_literal() = ct.enforcement_literal(); - *new_ct->mutable_linear()->mutable_vars() = size_expr.vars(); - *new_ct->mutable_linear()->mutable_coeffs() = size_expr.coeffs(); - new_ct->mutable_linear()->add_domain(-size_expr.offset()); - new_ct->mutable_linear()->add_domain(std::numeric_limits::max()); + tmp_constraint_.Clear(); + *tmp_constraint_.mutable_enforcement_literal() = ct.enforcement_literal(); + *tmp_constraint_.mutable_linear()->mutable_vars() = size_expr.vars(); + *tmp_constraint_.mutable_linear()->mutable_coeffs() = size_expr.coeffs(); + tmp_constraint_.mutable_linear()->add_domain(-size_expr.offset()); + tmp_constraint_.mutable_linear()->add_domain( + std::numeric_limits::max()); + CopyLinear(tmp_constraint_); } } diff --git a/ortools/sat/cp_model_presolve.h b/ortools/sat/cp_model_presolve.h index 7c9dff19cc..6759df2d0e 100644 --- a/ortools/sat/cp_model_presolve.h +++ b/ortools/sat/cp_model_presolve.h @@ -428,6 +428,9 @@ class ModelCopy { bool CopyBoolAnd(const ConstraintProto& ct); bool CopyBoolAndWithDupSupport(const ConstraintProto& ct); + bool CopyLinearExpression(const LinearExpressionProto& expr, + LinearExpressionProto* dst); + bool CopyIntProd(const ConstraintProto& ct, bool ignore_names); bool CopyLinear(const ConstraintProto& ct); bool CopyAtMostOne(const ConstraintProto& ct); bool CopyExactlyOne(const ConstraintProto& ct); @@ -458,6 +461,8 @@ class ModelCopy { std::vector temp_literals_; absl::flat_hash_set temp_literals_set_; + + ConstraintProto tmp_constraint_; }; // Copy in_model to the model in the presolve context. diff --git a/ortools/sat/cp_model_search.cc b/ortools/sat/cp_model_search.cc index d6ffc9f60a..21da620201 100644 --- a/ortools/sat/cp_model_search.cc +++ b/ortools/sat/cp_model_search.cc @@ -24,6 +24,7 @@ #include "absl/container/flat_hash_map.h" #include "absl/container/flat_hash_set.h" +#include "absl/flags/flag.h" #include "absl/log/check.h" #include "absl/random/distributions.h" #include "absl/strings/str_cat.h" @@ -496,6 +497,8 @@ absl::flat_hash_map GetNamedParameters( new_params.set_linearization_level(2); new_params.set_add_lp_constraints_lazily(false); strategies["max_lp"] = new_params; + new_params.set_use_symmetry_in_lp(true); + strategies["max_lp_sym"] = new_params; } // Core. Note that we disable the lp here because it is faster on the minizinc diff --git a/ortools/sat/cp_model_search.h b/ortools/sat/cp_model_search.h index 4f57c3f741..2e005b818c 100644 --- a/ortools/sat/cp_model_search.h +++ b/ortools/sat/cp_model_search.h @@ -20,7 +20,7 @@ #include #include "absl/container/flat_hash_map.h" -#include "absl/strings/string_view.h" +#include "ortools/base/types.h" #include "ortools/sat/cp_model.pb.h" #include "ortools/sat/cp_model_mapping.h" #include "ortools/sat/integer.h" diff --git a/ortools/sat/cp_model_solver.cc b/ortools/sat/cp_model_solver.cc index dd60f67fa3..9420aa4c6d 100644 --- a/ortools/sat/cp_model_solver.cc +++ b/ortools/sat/cp_model_solver.cc @@ -1105,6 +1105,7 @@ class LnsSolver : public SubSolver { random_engine_t random(seed); NeighborhoodGenerator::SolveData data; + data.task_id = task_id; data.difficulty = generator_->difficulty(); data.deterministic_limit = generator_->deterministic_limit(); @@ -1213,6 +1214,13 @@ class LnsSolver : public SubSolver { helper_->ModelProto(), context.get()); lns_fragment.set_name(absl::StrCat("lns_", task_id, "_", source_info)); + // Tricky: we don't want to use the symmetry of the main problem in the + // LNS presolved problem ! And currently no code clears/update it. + // + // TODO(user): Find a cleaner way like clear it as part of the presolve. + // Also, do not copy that in the first place. + lns_fragment.clear_symmetry(); + // Overwrite solution hinting. if (neighborhood.delta.has_solution_hint()) { *lns_fragment.mutable_solution_hint() = @@ -1366,7 +1374,7 @@ class LnsSolver : public SubSolver { // TODO(user): We could however fix it in the LNS Helper! if (data.status == CpSolverStatus::OPTIMAL && !shared_->model_proto.has_symmetry() && !solution_values.empty() && - neighborhood.is_simple && + neighborhood.is_simple && shared_->bounds != nullptr && !neighborhood.variables_that_can_be_fixed_to_local_optimum .empty()) { display_lns_info = true; @@ -1710,6 +1718,12 @@ void SolveCpModelParallel(SharedClasses* shared, Model* global_model) { helper, name_filter.LastName()), lns_params, helper, shared)); } + if (name_filter.Keep("routing_starts_lns")) { + reentrant_interleaved_subsolvers.push_back(std::make_unique( + std::make_unique( + helper, name_filter.LastName()), + lns_params, helper, shared)); + } } if (num_routes > 0 || num_circuit > 1) { if (name_filter.Keep("routing_full_path_lns")) { diff --git a/ortools/sat/cp_model_solver_helpers.cc b/ortools/sat/cp_model_solver_helpers.cc index 405d4b58b5..b961bcf0f7 100644 --- a/ortools/sat/cp_model_solver_helpers.cc +++ b/ortools/sat/cp_model_solver_helpers.cc @@ -41,13 +41,13 @@ #include "absl/time/time.h" #include "absl/types/span.h" #include "google/protobuf/arena.h" +#include "ortools/algorithms/sparse_permutation.h" #include "ortools/base/logging.h" #include "ortools/base/strong_vector.h" #include "ortools/graph/connected_components.h" #include "ortools/port/proto_utils.h" #include "ortools/sat/clause.h" #include "ortools/sat/cp_model.pb.h" -#include "ortools/sat/cp_model_checker.h" #include "ortools/sat/cp_model_loader.h" #include "ortools/sat/cp_model_mapping.h" #include "ortools/sat/cp_model_postsolve.h" @@ -62,6 +62,7 @@ #include "ortools/sat/intervals.h" #include "ortools/sat/lb_tree_search.h" #include "ortools/sat/linear_constraint.h" +#include "ortools/sat/linear_constraint_manager.h" #include "ortools/sat/linear_programming_constraint.h" #include "ortools/sat/linear_relaxation.h" #include "ortools/sat/max_hs.h" @@ -72,6 +73,7 @@ #include "ortools/sat/sat_base.h" #include "ortools/sat/sat_parameters.pb.h" #include "ortools/sat/sat_solver.h" +#include "ortools/sat/symmetry_util.h" #include "ortools/sat/synchronization.h" #include "ortools/sat/util.h" #include "ortools/sat/work_assignment.h" @@ -383,6 +385,53 @@ IntegerVariable AddLPConstraints(bool objective_need_to_be_tight, LinearRelaxation relaxation = ComputeLinearRelaxation(model_proto, m); if (m->GetOrCreate()->ModelIsUnsat()) return kNoIntegerVariable; + // In the presence of symmetry, will will create an extra integer variable per + // orbit. + auto* mapping = m->GetOrCreate(); + auto* params = m->GetOrCreate(); + auto* symmetrizer = m->GetOrCreate(); + if (model_proto.has_symmetry() && params->linearization_level() > 1 && + params->use_symmetry_in_lp()) { + // Convert to SparsePermutation. + const int num_vars = model_proto.variables().size(); + std::vector> generators; + for (const SparsePermutationProto& perm : + model_proto.symmetry().permutations()) { + generators.emplace_back(CreateSparsePermutationFromProto(num_vars, perm)); + } + + // Get orbits in term of IntegerVariable. + const std::vector var_to_orbit_index = GetOrbits(num_vars, generators); + std::vector orbit_is_ok; + std::vector> orbits; + for (int proto_var = 0; proto_var < num_vars; ++proto_var) { + const int orbit_index = var_to_orbit_index[proto_var]; + if (orbit_index == -1) continue; + if (orbit_index >= orbits.size()) { + orbits.resize(orbit_index + 1); + orbit_is_ok.resize(orbit_index + 1, true); + } + + // In linerization level >=2, all variables should have a view. + // Otherwise revisit and skip orbit without a full view. + const IntegerVariable var = mapping->Integer(proto_var); + CHECK_NE(var, kNoIntegerVariable); + orbits[orbit_index].push_back(var); + } + + // Lets create the orbit sum vars and register each orbit. + std::vector> terms; + for (const std::vector& orbit : orbits) { + terms.clear(); + for (const IntegerVariable var : orbit) { + terms.push_back({var, 1}); + } + const IntegerVariable sum_var = + GetOrCreateVariableLinkedToSumOf(terms, true, true, m); + symmetrizer->AddSymmetryOrbit(sum_var, orbit); + } + } + // The bipartite graph of LP constraints might be disconnected: // make a partition of the variables into connected components. // Constraint nodes are indexed by [0..num_lp_constraints), @@ -420,6 +469,14 @@ IntegerVariable AddLPConstraints(bool objective_need_to_be_tight, } } + // Make sure variables from the same orbit end up in same components. + for (int i = 0; i < symmetrizer->NumOrbits(); ++i) { + const int representative = get_var_index(symmetrizer->OrbitSumVar(i)); + for (const IntegerVariable var : symmetrizer->Orbit(i)) { + components.AddEdge(representative, get_var_index(var)); + } + } + const int num_components = components.GetNumberOfComponents(); std::vector component_sizes(num_components, 0); const std::vector index_to_component = components.GetComponentIds(); @@ -442,7 +499,6 @@ IntegerVariable AddLPConstraints(bool objective_need_to_be_tight, // as much as possible the objective bound by using any bounds the LP give // us on one of its components. This is critical on the zephyrus problems for // instance. - auto* mapping = m->GetOrCreate(); for (int i = 0; i < model_proto.objective().coeffs_size(); ++i) { const IntegerVariable var = mapping->Integer(model_proto.objective().vars(i)); @@ -482,12 +538,45 @@ IntegerVariable AddLPConstraints(bool objective_need_to_be_tight, std::vector> top_level_cp_terms; int num_components_containing_objective = 0; if (model_proto.has_objective()) { + // First convert the proto objective to an IntegerVariable one. In case of + // "use_symmetry_in_lp", we also rewrite it in terms of the sum of the + // variables in the orbits. + std::vector> objective; + const int num_orbits = symmetrizer->NumOrbits(); + if (num_orbits > 0) { + // We use the orbit_sum var instead. + std::vector orbit_obj_coeff(num_orbits, 0); + for (int i = 0; i < model_proto.objective().coeffs_size(); ++i) { + const IntegerVariable var = + mapping->Integer(model_proto.objective().vars(i)); + const int64_t coeff = model_proto.objective().coeffs(i); + const int orbit_index = symmetrizer->OrbitIndex(var); + if (orbit_index != -1) { + if (orbit_obj_coeff[orbit_index] == 0) { + orbit_obj_coeff[orbit_index] = coeff; + } else { + CHECK_EQ(orbit_obj_coeff[orbit_index], coeff); + } + continue; + } + objective.push_back({var, coeff}); + } + for (int i = 0; i < num_orbits; ++i) { + if (orbit_obj_coeff[i] == 0) continue; + objective.push_back({symmetrizer->OrbitSumVar(i), orbit_obj_coeff[i]}); + } + } else { + for (int i = 0; i < model_proto.objective().coeffs_size(); ++i) { + const IntegerVariable var = + mapping->Integer(model_proto.objective().vars(i)); + const int64_t coeff = model_proto.objective().coeffs(i); + objective.push_back({var, coeff}); + } + } + // First pass: set objective coefficients on the lp constraints, and store // the cp terms in one vector per component. - for (int i = 0; i < model_proto.objective().coeffs_size(); ++i) { - const IntegerVariable var = - mapping->Integer(model_proto.objective().vars(i)); - const int64_t coeff = model_proto.objective().coeffs(i); + for (const auto [var, coeff] : objective) { const int c = index_to_component[get_var_index(var)]; if (lp_constraints[c] != nullptr) { lp_constraints[c]->SetObjectiveCoefficient(var, IntegerValue(coeff)); @@ -863,9 +952,10 @@ int RegisterClausesLevelZeroImport(int id, auto* clause_stream = share_glue_clauses ? shared_clauses_manager->GetClauseStream(id) : nullptr; + auto* clause_manager = model->GetOrCreate(); const auto& import_level_zero_clauses = [shared_clauses_manager, id, mapping, sat_solver, implications, - clause_stream, + clause_stream, clause_manager, minimize_shared_clauses]() { std::vector> new_binary_clauses; shared_clauses_manager->GetUnseenBinaryClauses(id, &new_binary_clauses); @@ -883,6 +973,9 @@ int RegisterClausesLevelZeroImport(int id, int new_clauses = 0; std::array local_clause; sat_solver->EnsureNewClauseIndexInitialized(); + // Temporarily disable clause sharing so we don't immediately re-export the + // clauses we just imported. + auto callback = clause_manager->TakeAddClauseCallback(); for (const absl::Span shared_clause : shared_clauses_manager->GetUnseenClauses(id)) { // Check this clause was not already learned by this worker. @@ -900,6 +993,7 @@ int RegisterClausesLevelZeroImport(int id, } ++new_clauses; } + clause_manager->SetAddClauseCallback(std::move(callback)); clause_stream->RemoveWorstClauses(); if (minimize_shared_clauses && new_clauses > 0) { // The new clauses may be subsumed, so try to minimize them to reduce diff --git a/ortools/sat/cp_model_symmetries.cc b/ortools/sat/cp_model_symmetries.cc index b489d655b6..5b6046e23c 100644 --- a/ortools/sat/cp_model_symmetries.cc +++ b/ortools/sat/cp_model_symmetries.cc @@ -739,6 +739,31 @@ void FindCpModelSymmetries( } } +namespace { + +void LogOrbitInformation(absl::Span var_to_orbit_index, + SolverLogger* logger) { + if (logger == nullptr || !logger->LoggingIsEnabled()) return; + + int num_touched_vars = 0; + std::vector orbit_sizes; + for (int var = 0; var < var_to_orbit_index.size(); ++var) { + const int rep = var_to_orbit_index[var]; + if (rep == -1) continue; + if (rep >= orbit_sizes.size()) orbit_sizes.resize(rep + 1, 0); + ++num_touched_vars; + orbit_sizes[rep]++; + } + std::sort(orbit_sizes.begin(), orbit_sizes.end(), std::greater()); + const int num_orbits = orbit_sizes.size(); + if (num_orbits > 10) orbit_sizes.resize(10); + SOLVER_LOG(logger, "[Symmetry] ", num_orbits, " orbits on ", num_touched_vars, + " variables with sizes: ", absl::StrJoin(orbit_sizes, ","), + (num_orbits > orbit_sizes.size() ? ",..." : "")); +} + +} // namespace + void DetectAndAddSymmetryToProto(const SatParameters& params, CpModelProto* proto, SolverLogger* logger) { SymmetryProto* symmetry = proto->mutable_symmetry(); @@ -752,6 +777,14 @@ void DetectAndAddSymmetryToProto(const SatParameters& params, return; } + // Log orbit information. + // + // TODO(user): It might be nice to just add this to the proto rather than + // re-reading the generators and recomputing this in a few places. + const int num_vars = proto->variables().size(); + const std::vector orbits = GetOrbits(num_vars, generators); + LogOrbitInformation(orbits, logger); + for (const std::unique_ptr& perm : generators) { SparsePermutationProto* perm_proto = symmetry->add_permutations(); const int num_cycle = perm->NumCycles(); @@ -897,7 +930,7 @@ std::vector BuildInequalityCoeffsForOrbitope( void UpdateHintAfterFixingBoolToBreakSymmetry( PresolveContext* context, int var, bool fixed_value, - const std::vector>& generators) { + absl::Span> generators) { if (!context->VarHasSolutionHint(var)) { return; } @@ -1021,18 +1054,7 @@ bool DetectAndExploitSymmetriesInPresolve(PresolveContext* context) { } // Log orbit info. - if (context->logger()->LoggingIsEnabled()) { - std::vector sorted_sizes; - for (const int s : orbit_sizes) { - if (s != 0) sorted_sizes.push_back(s); - } - std::sort(sorted_sizes.begin(), sorted_sizes.end(), std::greater()); - const int num_orbits = sorted_sizes.size(); - if (num_orbits > 10) sorted_sizes.resize(10); - SOLVER_LOG(context->logger(), "[Symmetry] ", num_orbits, - " orbits with sizes: ", absl::StrJoin(sorted_sizes, ","), - (num_orbits > sorted_sizes.size() ? ",..." : "")); - } + LogOrbitInformation(orbits, context->logger()); // First heuristic based on propagation, see the function comment. if (max_orbit_size > 2) { diff --git a/ortools/sat/cuts.cc b/ortools/sat/cuts.cc index af4aec65de..ab3988287e 100644 --- a/ortools/sat/cuts.cc +++ b/ortools/sat/cuts.cc @@ -888,6 +888,9 @@ bool IntegerRoundingCutHelper::ComputeCut( } // Re try complementation on the transformed cut. + // TODO(user): This can be quadratic! we don't want to try too much of them. + // Or optimize the algo, we should be able to be more incremental here. + // see on g200x740.pb.gz for instance. for (CutTerm& entry : cut_.terms) { if (!entry.HasRelevantLpValue()) break; if (entry.coeff % best_divisor == 0) continue; diff --git a/ortools/sat/diffn_cuts.cc b/ortools/sat/diffn_cuts.cc index 8c4b77b2d1..dfd535e419 100644 --- a/ortools/sat/diffn_cuts.cc +++ b/ortools/sat/diffn_cuts.cc @@ -310,7 +310,7 @@ CutGenerator CreateNoOverlap2dEnergyCutGenerator( SchedulingConstraintHelper* x_helper, SchedulingConstraintHelper* y_helper, SchedulingDemandHelper* x_demands_helper, SchedulingDemandHelper* y_demands_helper, - const std::vector>& energies, Model* model) { + absl::Span> energies, Model* model) { CutGenerator result; result.only_run_at_level_zero = true; AddIntegerVariableFromIntervals(x_helper, model, &result.vars); @@ -319,7 +319,10 @@ CutGenerator CreateNoOverlap2dEnergyCutGenerator( result.generate_cuts = [x_helper, y_helper, x_demands_helper, y_demands_helper, model, - energies](LinearConstraintManager* manager) { + energies = + std::vector>( + energies.begin(), energies.end())]( + LinearConstraintManager* manager) { if (!x_helper->SynchronizeAndSetTimeDirection(true)) return false; if (!y_helper->SynchronizeAndSetTimeDirection(true)) return false; x_demands_helper->CacheAllEnergyValues(); diff --git a/ortools/sat/diffn_cuts.h b/ortools/sat/diffn_cuts.h index 8b15f8352d..86a8e40e6b 100644 --- a/ortools/sat/diffn_cuts.h +++ b/ortools/sat/diffn_cuts.h @@ -17,6 +17,7 @@ #include #include +#include "absl/types/span.h" #include "ortools/sat/cuts.h" #include "ortools/sat/integer.h" #include "ortools/sat/intervals.h" @@ -49,7 +50,7 @@ CutGenerator CreateNoOverlap2dEnergyCutGenerator( SchedulingConstraintHelper* x_helper, SchedulingConstraintHelper* y_helper, SchedulingDemandHelper* x_demands_helper, SchedulingDemandHelper* y_demands_helper, - const std::vector>& energies, Model* model); + absl::Span> energies, Model* model); // Internal methods and data structures, useful for testing. diff --git a/ortools/sat/go/cpmodel/cp_solver_c.cc b/ortools/sat/go/cpmodel/cp_solver_c.cc index e8bba37b13..6c3dd6c558 100644 --- a/ortools/sat/go/cpmodel/cp_solver_c.cc +++ b/ortools/sat/go/cpmodel/cp_solver_c.cc @@ -14,6 +14,7 @@ #include "ortools/sat/go/cpmodel/cp_solver_c.h" #include +#include #include "absl/log/check.h" #include "ortools/base/memutil.h" diff --git a/ortools/sat/inclusion.h b/ortools/sat/inclusion.h index 4570b23486..5f5ac3012c 100644 --- a/ortools/sat/inclusion.h +++ b/ortools/sat/inclusion.h @@ -27,6 +27,7 @@ #include "absl/log/check.h" #include "ortools/base/logging.h" +#include "ortools/util/time_limit.h" namespace operations_research { namespace sat { @@ -53,7 +54,8 @@ namespace sat { template class InclusionDetector { public: - explicit InclusionDetector(const Storage& storage) : storage_(storage) {} + explicit InclusionDetector(const Storage& storage, TimeLimit* time_limit) + : storage_(storage), time_limit_(time_limit) {} // Resets the class to an empty state. void Reset() { @@ -97,7 +99,7 @@ class InclusionDetector { const std::function& process); // Function that should only be used from within "process()". - // Returns the bitset corresponsing to the elements of the current superset + // Returns the bitset corresponding to the elements of the current superset // passed to the process() function. std::vector IsInSuperset() const { return is_in_superset_; } @@ -128,6 +130,8 @@ class InclusionDetector { // Allows to access the elements of each candidates via storage_[index]; const Storage& storage_; + TimeLimit* time_limit_; + // List of candidates, this will be sorted. struct Candidate { int index; // Storage index. @@ -216,6 +220,10 @@ inline void InclusionDetector::DetectInclusions( DCHECK(signatures_.empty()); DCHECK(one_watcher_.empty()); + // We check each time our work_done_ has increased by more than this. + constexpr int64_t kCheckTimeLimitInterval = 1000; + int64_t next_time_limit_check = kCheckTimeLimitInterval; + // Main algo. work_done_ = 0; std::stable_sort(candidates_.begin(), candidates_.end()); @@ -250,6 +258,10 @@ inline void InclusionDetector::DetectInclusions( // Find any subset included in current superset. work_done_ += 2 * superset.size; if (work_done_ > work_limit_) return Stop(); + if (work_done_ > next_time_limit_check) { + if (time_limit_->LimitReached()) return Stop(); + next_time_limit_check = work_done_ + kCheckTimeLimitInterval; + } // We make a copy because process() might alter the content of the // storage when it returns "stop_with_current_superset_" and we need @@ -278,6 +290,10 @@ inline void InclusionDetector::DetectInclusions( bool is_included = true; work_done_ += subset.size; if (work_done_ > work_limit_) return Stop(); + if (work_done_ > next_time_limit_check) { + if (time_limit_->LimitReached()) return Stop(); + next_time_limit_check = work_done_ + kCheckTimeLimitInterval; + } for (const int subset_e : storage_[subset.index]) { if (!is_in_superset_[subset_e]) { is_included = false; @@ -291,6 +307,10 @@ inline void InclusionDetector::DetectInclusions( if (stop_) return; if (work_done_ > work_limit_) return Stop(); + if (work_done_ > next_time_limit_check) { + if (time_limit_->LimitReached()) return Stop(); + next_time_limit_check = work_done_ + kCheckTimeLimitInterval; + } if (stop_with_current_subset_) { // Remove from the watcher list. diff --git a/ortools/sat/inclusion_test.cc b/ortools/sat/inclusion_test.cc index 7f5276708f..94465b95f7 100644 --- a/ortools/sat/inclusion_test.cc +++ b/ortools/sat/inclusion_test.cc @@ -21,6 +21,7 @@ #include "gtest/gtest.h" #include "ortools/base/gmock.h" #include "ortools/sat/util.h" +#include "ortools/util/time_limit.h" namespace operations_research { namespace sat { @@ -28,7 +29,8 @@ namespace { TEST(InclusionDetectorTest, SymmetricExample) { CompactVectorVector storage; - InclusionDetector detector(storage); + TimeLimit time_limit; + InclusionDetector detector(storage, &time_limit); detector.AddPotentialSet(storage.Add({1, 2})); detector.AddPotentialSet(storage.Add({1, 3})); detector.AddPotentialSet(storage.Add({1, 2, 3})); @@ -47,7 +49,8 @@ TEST(InclusionDetectorTest, SymmetricExample) { // If sets are duplicates, we do not detect both inclusions, but just one. TEST(InclusionDetectorTest, DuplicateBehavior) { CompactVectorVector storage; - InclusionDetector detector(storage); + TimeLimit time_limit; + InclusionDetector detector(storage, &time_limit); detector.AddPotentialSet(storage.Add({1, 2})); detector.AddPotentialSet(storage.Add({1, 2})); detector.AddPotentialSet(storage.Add({1, 2})); @@ -65,7 +68,8 @@ TEST(InclusionDetectorTest, DuplicateBehavior) { TEST(InclusionDetectorTest, NonSymmetricExample) { CompactVectorVector storage; - InclusionDetector detector(storage); + TimeLimit time_limit; + InclusionDetector detector(storage, &time_limit); // Index 0, 1, 2 detector.AddPotentialSubset(storage.Add({1, 2})); @@ -119,7 +123,8 @@ TEST(InclusionDetectorTest, NonSymmetricExample) { TEST(InclusionDetectorTest, InclusionChain) { CompactVectorVector storage; - InclusionDetector detector(storage); + TimeLimit time_limit; + InclusionDetector detector(storage, &time_limit); detector.AddPotentialSet(storage.Add({1})); detector.AddPotentialSet(storage.Add({1, 2})); detector.AddPotentialSet(storage.Add({1, 2, 3})); @@ -147,7 +152,8 @@ TEST(InclusionDetectorTest, InclusionChain) { TEST(InclusionDetectorTest, RandomTest) { absl::BitGen random; CompactVectorVector storage; - InclusionDetector detector(storage); + TimeLimit time_limit; + InclusionDetector detector(storage, &time_limit); std::vector temp; for (int i = 0; i < 1000; ++i) { diff --git a/ortools/sat/integer_expr_test.cc b/ortools/sat/integer_expr_test.cc index 93c02494e5..ac26d54423 100644 --- a/ortools/sat/integer_expr_test.cc +++ b/ortools/sat/integer_expr_test.cc @@ -73,8 +73,8 @@ void AddWeightedSumGreaterOrEqualReif(Literal is_ge, // Weighted sum == constant reified. // TODO(user): Simplify if the constant is at the edge of the possible values. void AddFixedWeightedSumReif(Literal is_eq, - const std::vector& vars, - const std::vector& coefficients, + absl::Span vars, + absl::Span coefficients, int64_t value, Model* model) { // We creates two extra Boolean variables in this case. The alternative is // to code a custom propagator for the direction equality => reified. diff --git a/ortools/sat/linear_constraint.cc b/ortools/sat/linear_constraint.cc index 046f27f968..f4a645f1c5 100644 --- a/ortools/sat/linear_constraint.cc +++ b/ortools/sat/linear_constraint.cc @@ -155,6 +155,13 @@ LinearConstraint LinearConstraintBuilder::BuildConstraint(IntegerValue lb, return result; } +bool LinearConstraintBuilder::BuildIntoConstraintAndCheckOverflow( + IntegerValue lb, IntegerValue ub, LinearConstraint* ct) { + ct->lb = lb > kMinIntegerValue ? lb - offset_ : lb; + ct->ub = ub < kMaxIntegerValue ? ub - offset_ : ub; + return MergePositiveVariableTermsAndCheckForOverflow(&terms_, ct); +} + LinearExpression LinearConstraintBuilder::BuildExpression() { LinearExpression result; CleanTermsAndFillConstraint(&terms_, &result); diff --git a/ortools/sat/linear_constraint.h b/ortools/sat/linear_constraint.h index 10a7411b96..9a88f4aaf8 100644 --- a/ortools/sat/linear_constraint.h +++ b/ortools/sat/linear_constraint.h @@ -274,6 +274,11 @@ class LinearConstraintBuilder { LinearConstraint Build(); LinearConstraint BuildConstraint(IntegerValue lb, IntegerValue ub); + // Similar to BuildConstraint() but make sure we don't overflow while we merge + // terms referring to the same variables. + bool BuildIntoConstraintAndCheckOverflow(IntegerValue lb, IntegerValue ub, + LinearConstraint* ct); + // Returns the linear expression part of the constraint only, without the // bounds. LinearExpression BuildExpression(); @@ -398,6 +403,41 @@ inline void CleanTermsAndFillConstraint( output->resize(new_size); } +inline bool MergePositiveVariableTermsAndCheckForOverflow( + std::vector>* terms, + LinearConstraint* output) { + // Sort and add coeff of duplicate variables. Note that a variable and + // its negation will appear one after another in the natural order. + int new_size = 0; + output->resize(terms->size()); + std::sort(terms->begin(), terms->end()); + IntegerVariable previous_var = kNoIntegerVariable; + int64_t current_coeff = 0; + for (const std::pair& entry : *terms) { + DCHECK(VariableIsPositive(entry.first)); + if (previous_var == entry.first) { + if (AddIntoOverflow(entry.second.value(), ¤t_coeff)) { + return false; + } + } else { + if (current_coeff != 0) { + output->vars[new_size] = previous_var; + output->coeffs[new_size] = current_coeff; + ++new_size; + } + previous_var = entry.first; + current_coeff = entry.second.value(); + } + } + if (current_coeff != 0) { + output->vars[new_size] = previous_var; + output->coeffs[new_size] = current_coeff; + ++new_size; + } + output->resize(new_size); + return true; +} + } // namespace sat } // namespace operations_research diff --git a/ortools/sat/linear_constraint_manager.cc b/ortools/sat/linear_constraint_manager.cc index ec2d67e595..f9025e06dc 100644 --- a/ortools/sat/linear_constraint_manager.cc +++ b/ortools/sat/linear_constraint_manager.cc @@ -19,6 +19,7 @@ #include #include #include +#include #include #include #include @@ -29,6 +30,7 @@ #include "absl/meta/type_traits.h" #include "absl/strings/str_cat.h" #include "absl/strings/string_view.h" +#include "absl/types/span.h" #include "ortools/base/hash.h" #include "ortools/base/logging.h" #include "ortools/base/strong_vector.h" @@ -47,6 +49,167 @@ namespace operations_research { namespace sat { +LinearConstraintSymmetrizer::~LinearConstraintSymmetrizer() { + if (!VLOG_IS_ON(1)) return; + std::vector> stats; + stats.push_back({"symmetrizer/overflows", num_overflows_}); + shared_stats_->AddStats(stats); +} + +void LinearConstraintSymmetrizer::AddSymmetryOrbit( + IntegerVariable sum_var, absl::Span orbit) { + CHECK_GT(orbit.size(), 1); + + // Store the orbit info. + const int orbit_index = orbits_.size(); + has_symmetry_ = true; + orbit_sum_vars_.push_back(sum_var); + orbits_.Add(orbit); + + // And fill var_to_orbit_index_. + const int new_size = integer_trail_->NumIntegerVariables().value() / 2; + if (var_to_orbit_index_.size() < new_size) { + var_to_orbit_index_.resize(new_size, -1); + } + DCHECK(VariableIsPositive(sum_var)); + DCHECK_EQ(var_to_orbit_index_[GetPositiveOnlyIndex(sum_var)], -1); + var_to_orbit_index_[GetPositiveOnlyIndex(sum_var)] = orbit_index; + for (const IntegerVariable var : orbit) { + DCHECK(VariableIsPositive(var)); + DCHECK_EQ(var_to_orbit_index_[GetPositiveOnlyIndex(var)], -1); + var_to_orbit_index_[GetPositiveOnlyIndex(var)] = orbit_index; + } +} + +// What we do here is basically equivalent to adding all the possible +// permutations (under the problem symmetry group) of the constraint together. +// When we do that all variables in the same orbit will have the same +// coefficient (TODO(user): think how to prove this properly and especially +// that the scaling is in 1/orbit_size) and will each appear once. We then +// substitute each sum by the sum over the orbit, and divide coefficient by +// their gcd. +// +// Any solution of the original LP can be transformed to a solution of the +// folded LP with the same objective. So the folded LP will give us a tight and +// valid objective lower bound but with a lot less variables! This is an +// adaptation of "LP folding" to use in a MIP context. Introducing the orbit sum +// allow to propagates and add cuts as these sum are still integer for us. +// +// The only issue is regarding scaling of the constraints. Basically each +// orbit sum variable will appear with a factor 1/orbit_size in the original +// constraint. +// +// We will remap & scale the constraint. +// If not possible, we will drop it for now. +bool LinearConstraintSymmetrizer::FoldLinearConstraint(LinearConstraint* ct) { + if (!has_symmetry_) return true; + + // We assume the constraint had basic preprocessing with tight lb/ub for + // instance. First pass is to compute the scaling factor. + int64_t scaling_factor = 1; + for (int i = 0; i < ct->num_terms; ++i) { + const IntegerVariable var = ct->vars[i]; + CHECK(VariableIsPositive(var)); + + const int orbit_index = var_to_orbit_index_[GetPositiveOnlyIndex(var)]; + if (orbit_index == -1 || orbit_sum_vars_[orbit_index] == var) { + // If we have an orbit of size one, or the variable is its own + // representative (orbit sum), skip. + continue; + } + + // Update the scaling factor. + const int orbit_size = orbits_[orbit_index].size(); + if (AtMinOrMaxInt64(CapProd(orbit_size, scaling_factor))) { + ++num_overflows_; + VLOG(2) << "SYMMETRY skip constraint due to overflow"; + return false; + } + scaling_factor = std::lcm(scaling_factor, orbit_size); + } + + if (scaling_factor == 1) { + // No symmetric variables. + return true; + } + + // We need to multiply each term by scaling_factor / orbit_size. + // + // TODO(user): Now that we know the actual coefficient we could scale less. + // Maybe the coefficient of an orbit_var is already divisible by orbit_size. + builder_.Clear(); + for (int i = 0; i < ct->num_terms; ++i) { + const IntegerVariable var = ct->vars[i]; + const IntegerValue coeff = ct->coeffs[i]; + + const int orbit_index = var_to_orbit_index_[GetPositiveOnlyIndex(var)]; + if (orbit_index == -1 || orbit_sum_vars_[orbit_index] == var) { + const int64_t scaled_coeff = CapProd(coeff.value(), scaling_factor); + if (AtMinOrMaxInt64(scaled_coeff)) { + ++num_overflows_; + VLOG(2) << "SYMMETRY skip constraint due to overflow"; + return false; + } + builder_.AddTerm(var, scaled_coeff); + } else { + const int64_t orbit_size = orbits_[orbit_index].size(); + const int64_t factor = scaling_factor / orbit_size; + const int64_t scaled_coeff = CapProd(coeff.value(), factor); + if (AtMinOrMaxInt64(scaled_coeff)) { + ++num_overflows_; + VLOG(2) << "SYMMETRY skip constraint due to overflow"; + return false; + } + builder_.AddTerm(orbit_sum_vars_[orbit_index], scaled_coeff); + } + } + + if (AtMinOrMaxInt64(CapProd(ct->lb.value(), scaling_factor)) || + AtMinOrMaxInt64(CapProd(ct->ub.value(), scaling_factor))) { + ++num_overflows_; + VLOG(2) << "SYMMETRY skip constraint due to lb/ub overflow"; + return false; + } + if (!builder_.BuildIntoConstraintAndCheckOverflow( + ct->lb * scaling_factor, ct->ub * scaling_factor, ct)) { + ++num_overflows_; + VLOG(2) << "SYMMETRY skip constraint due to overflow"; + return false; + } + + // Dividing by gcd can help. + DivideByGCD(ct); + if (PossibleOverflow(*integer_trail_, *ct)) { + ++num_overflows_; + VLOG(2) << "SYMMETRY skip constraint due to overflow factor = " + << scaling_factor; + return false; + } + + // TODO(user): In some cases, this constraint will propagate/fix directly + // the orbit sum variables, we might want to propagate this in the cp world? + // This migth also remove bad scaling. + return true; +} + +int LinearConstraintSymmetrizer::OrbitIndex(IntegerVariable var) const { + if (!has_symmetry_) return -1; + return var_to_orbit_index_[GetPositiveOnlyIndex(var)]; +} + +bool LinearConstraintSymmetrizer::IsOrbitSumVar(IntegerVariable var) const { + if (!has_symmetry_) return false; + const int orbit_index = var_to_orbit_index_[GetPositiveOnlyIndex(var)]; + return orbit_index >= 0 && orbit_sum_vars_[orbit_index] == var; +} + +bool LinearConstraintSymmetrizer::AppearInFoldedProblem( + IntegerVariable var) const { + if (!has_symmetry_) return true; + const int orbit_index = var_to_orbit_index_[GetPositiveOnlyIndex(var)]; + return orbit_index == -1 || orbit_sum_vars_[orbit_index] == var; +} + namespace { const LinearConstraintManager::ConstraintIndex kInvalidConstraintIndex(-1); @@ -146,9 +309,15 @@ LinearConstraintManager::ConstraintIndex LinearConstraintManager::Add( SimplifyConstraint(&ct); DivideByGCD(&ct); MakeAllVariablesPositive(&ct); - CHECK(std::is_sorted(ct.VarsAsSpan().begin(), ct.VarsAsSpan().end())); DCHECK(DebugCheckConstraint(ct)); + // If configured, store instead the folded version of this constraint. + // TODO(user): Shall we simplify again? + if (symmetrizer_->HasSymmetry() && !symmetrizer_->FoldLinearConstraint(&ct)) { + return kInvalidConstraintIndex; + } + CHECK(std::is_sorted(ct.VarsAsSpan().begin(), ct.VarsAsSpan().end())); + // If an identical constraint exists, only updates its bound. const size_t key = ComputeHashOfTerms(ct); if (equiv_constraints_.contains(key)) { diff --git a/ortools/sat/linear_constraint_manager.h b/ortools/sat/linear_constraint_manager.h index 2085fa63d4..a58d09dc59 100644 --- a/ortools/sat/linear_constraint_manager.h +++ b/ortools/sat/linear_constraint_manager.h @@ -29,6 +29,7 @@ #include "ortools/sat/linear_constraint.h" #include "ortools/sat/model.h" #include "ortools/sat/sat_parameters.pb.h" +#include "ortools/sat/synchronization.h" #include "ortools/sat/util.h" #include "ortools/util/logging.h" #include "ortools/util/strong_integers.h" @@ -54,6 +55,82 @@ struct ModelReducedCosts ModelReducedCosts() = default; }; +// Knowing the symmetry of the IP problem should allow us to +// solve the LP faster via "folding" techniques. +// +// You can read this for the LP part: "Dimension Reduction via Colour +// Refinement", Martin Grohe, Kristian Kersting, Martin Mladenov, Erkal +// Selman, https://arxiv.org/abs/1307.5697 +// +// In the presence of symmetry, by considering all symmetric version of a +// constraint and summing them, we can derive a new constraint using the sum +// of the variable on each orbit instead of the individual variables. +// +// For the integration in a MIP solver, I couldn't find any reference. The way +// I did it here is to introduce for each orbit a variable representing the +// sum of the orbit variable. This allows to represent the folded LP in terms +// of these variables (that are connected to the rest of the solver) and just +// reuse the full machinery. +class LinearConstraintSymmetrizer { + public: + explicit LinearConstraintSymmetrizer(Model* model) + : shared_stats_(model->GetOrCreate()), + integer_trail_(model->GetOrCreate()) {} + ~LinearConstraintSymmetrizer(); + + // This must be called with all orbits before we call FoldLinearConstraint(). + // Note that sum_var MUST not be in any of the orbits. All orbits must also be + // disjoint. + // + // Precondition: All IntegerVariable must be positive. + void AddSymmetryOrbit(IntegerVariable sum_var, + absl::Span orbit); + + // If there are no symmetry, we shouldn't bother calling the functions below. + // Note that they will still work, but be no-op. + bool HasSymmetry() const { return has_symmetry_; } + + // Accessors by orbit index in [0, num_orbits). + int NumOrbits() const { return orbits_.size(); } + IntegerVariable OrbitSumVar(int i) const { return orbit_sum_vars_[i]; } + absl::Span Orbit(int i) const { return orbits_[i]; } + + // Returns the orbit number in [0, num_orbits) if var belong to a non-trivial + // orbit or if it is a "orbit_sum_var". Returns -1 otherwise. + int OrbitIndex(IntegerVariable var) const; + + // Returns true iff var is one of the sum_var passed to AddSymmetryOrbit(). + bool IsOrbitSumVar(IntegerVariable var) const; + + // This will be only true for variable not appearing in any orbit and for + // the orbit sum variables. + bool AppearInFoldedProblem(IntegerVariable var) const; + + // Given a constraint on the "original" model variables, try to construct a + // symmetric version of it using the orbit sum variables. This might fail if + // we encounter integer overflow. Returns true on success. On failure, the + // original constraints will not be usable. + // + // Preconditions: All IntegerVariable must be positive. And the constraint + // lb/ub must be tight and not +/- int64_t max. + bool FoldLinearConstraint(LinearConstraint* ct); + + private: + SharedStatistics* shared_stats_; + IntegerTrail* integer_trail_; + + bool has_symmetry_ = false; + int64_t num_overflows_ = 0; + LinearConstraintBuilder builder_; + + // We index our vector by positive variable only. + util_intops::StrongVector var_to_orbit_index_; + + // Orbit info index by number in [0, num_orbits); + std::vector orbit_sum_vars_; + CompactVectorVector orbits_; +}; + // This class holds a list of globally valid linear constraints and has some // logic to decide which one should be part of the LP relaxation. We want more // for a better relaxation, but for efficiency we do not want to have too much @@ -106,7 +183,7 @@ class LinearConstraintManager { expanded_lp_solution_(*model->GetOrCreate()), expanded_reduced_costs_(*model->GetOrCreate()), model_(model), - logger_(model->GetOrCreate()) {} + symmetrizer_(model->GetOrCreate()) {} ~LinearConstraintManager(); // Add a new constraint to the manager. Note that we canonicalize constraints @@ -279,7 +356,7 @@ class LinearConstraintManager { ModelLpValues& expanded_lp_solution_; ModelReducedCosts& expanded_reduced_costs_; Model* model_; - SolverLogger* logger_; + LinearConstraintSymmetrizer* symmetrizer_; // We want to decay the active counts of all constraints at each call and // increase the active counts of active/violated constraints. However this can diff --git a/ortools/sat/linear_constraint_manager_test.cc b/ortools/sat/linear_constraint_manager_test.cc index e842599ca5..3e1c182d1e 100644 --- a/ortools/sat/linear_constraint_manager_test.cc +++ b/ortools/sat/linear_constraint_manager_test.cc @@ -39,6 +39,64 @@ using ::testing::StartsWith; using ::testing::UnorderedElementsAre; using ConstraintIndex = LinearConstraintManager::ConstraintIndex; +TEST(LinearConstraintSymmetrizerTest, BasicFolding) { + Model model; + const IntegerVariable x = model.Add(NewIntegerVariable(-10, 10)); + const IntegerVariable y = model.Add(NewIntegerVariable(-10, 10)); + const IntegerVariable z = model.Add(NewIntegerVariable(-10, 10)); + const IntegerVariable a = model.Add(NewIntegerVariable(-10, 10)); + const IntegerVariable b = model.Add(NewIntegerVariable(-10, 10)); + const IntegerVariable c = model.Add(NewIntegerVariable(-10, 10)); + const IntegerVariable sum_xy = model.Add(NewIntegerVariable(-20, 20)); + const IntegerVariable sum_abc = model.Add(NewIntegerVariable(-30, 30)); + auto* symmetrizer = model.GetOrCreate(); + symmetrizer->AddSymmetryOrbit(sum_xy, {x, y}); + symmetrizer->AddSymmetryOrbit(sum_abc, {a, b, c}); + + LinearConstraintBuilder builder(0, 10); + builder.AddTerm(x, IntegerValue(2)); + builder.AddTerm(y, IntegerValue(1)); + builder.AddTerm(z, IntegerValue(3)); + builder.AddTerm(a, IntegerValue(2)); + builder.AddTerm(c, IntegerValue(5)); + LinearConstraint ct = builder.Build(); + symmetrizer->FoldLinearConstraint(&ct); + + // We will scale by 6 (one orbit of size 2 and one of size 3). + builder.Clear(); + builder.AddTerm(z, IntegerValue(6 * 3)); + builder.AddTerm(sum_xy, IntegerValue(6 / 2 * (2 + 1))); + builder.AddTerm(sum_abc, IntegerValue(6 / 3 * (2 + 5))); + LinearConstraint expected = builder.BuildConstraint(0, 6 * 10); + EXPECT_EQ(ct.DebugString(), expected.DebugString()); +} + +TEST(LinearConstraintSymmetrizerTest, FoldingWithSumVariableOriginallyPresent) { + Model model; + const IntegerVariable x = model.Add(NewIntegerVariable(-10, 10)); + const IntegerVariable y = model.Add(NewIntegerVariable(-10, 10)); + const IntegerVariable z = model.Add(NewIntegerVariable(-10, 10)); + const IntegerVariable sum_xy = model.Add(NewIntegerVariable(-20, 20)); + auto* symmetrizer = model.GetOrCreate(); + symmetrizer->AddSymmetryOrbit(sum_xy, {x, y}); + + LinearConstraintBuilder builder(0, 10); + builder.AddTerm(x, IntegerValue(2)); + builder.AddTerm(y, IntegerValue(1)); + builder.AddTerm(z, IntegerValue(3)); + builder.AddTerm(sum_xy, IntegerValue(7)); + LinearConstraint ct = builder.Build(); + symmetrizer->FoldLinearConstraint(&ct); + + // We will scale by 2 the original sum_xy, and by 1 the one coming from the + // orbit with coeff (2 + 1) from x and y. + builder.Clear(); + builder.AddTerm(z, IntegerValue(2 * 3)); + builder.AddTerm(sum_xy, IntegerValue(2 * 7 + 2 / 2 * (2 + 1))); + LinearConstraint expected = builder.BuildConstraint(0, 2 * 10); + EXPECT_EQ(ct.DebugString(), expected.DebugString()); +} + TEST(LinearConstraintManagerTest, DuplicateDetection) { Model model; LinearConstraintManager manager(&model); diff --git a/ortools/sat/linear_programming_constraint.cc b/ortools/sat/linear_programming_constraint.cc index 52fb78652c..c2382745cb 100644 --- a/ortools/sat/linear_programming_constraint.cc +++ b/ortools/sat/linear_programming_constraint.cc @@ -27,6 +27,7 @@ #include #include +#include "absl/algorithm/container.h" #include "absl/container/flat_hash_map.h" #include "absl/log/check.h" #include "absl/numeric/int128.h" @@ -280,6 +281,7 @@ LinearProgrammingConstraint::LinearProgrammingConstraint( shared_stats_(model->GetOrCreate()), shared_response_manager_(model->GetOrCreate()), random_(model->GetOrCreate()), + symmetrizer_(model->GetOrCreate()), rlt_cut_helper_(model), implied_bounds_processor_({}, integer_trail_, model->GetOrCreate()), @@ -311,18 +313,22 @@ LinearProgrammingConstraint::LinearProgrammingConstraint( // Initialize the IntegerVariable -> ColIndex mapping. CHECK(std::is_sorted(vars.begin(), vars.end())); - integer_variables_.assign(vars.begin(), vars.end()); + // TODO(user): We shouldn't need to add variable from the orbit here in the + // presence of symmetry. However they can still appear in cut, so it is a + // bit tricky and require some refactoring to be tried. ColIndex col{0}; + integer_variables_.assign(vars.begin(), vars.end()); for (const IntegerVariable positive_variable : vars) { CHECK(VariableIsPositive(positive_variable)); implied_bounds_processor_.AddLpVariable(positive_variable); (*dispatcher_)[positive_variable] = this; mirror_lp_variable_[positive_variable] = col; - ++col; } - lp_solution_.assign(vars.size(), std::numeric_limits::infinity()); - lp_reduced_cost_.assign(vars.size(), 0.0); + + lp_solution_.assign(integer_variables_.size(), + std::numeric_limits::infinity()); + lp_reduced_cost_.assign(integer_variables_.size(), 0.0); if (!vars.empty()) { const int max_index = NegationOf(vars.back()).value(); @@ -340,6 +346,63 @@ void LinearProgrammingConstraint::AddLinearConstraint(LinearConstraint ct) { constraint_manager_.Add(std::move(ct)); } +void LinearProgrammingConstraint::RegisterWith(Model* model) { + DCHECK(!lp_constraint_is_registered_); + lp_constraint_is_registered_ = true; + model->GetOrCreate()->push_back(this); + + // Copy objective data to the constraint_manager_. + // + // Note(user): the sort is not really needed but should lead to better cache + // locality. + std::sort(integer_objective_.begin(), integer_objective_.end()); + objective_infinity_norm_ = 0; + for (const auto [col, coeff] : integer_objective_) { + constraint_manager_.SetObjectiveCoefficient(integer_variables_[col.value()], + coeff); + objective_infinity_norm_ = + std::max(objective_infinity_norm_, IntTypeAbs(coeff)); + } + + // Set the LP to its initial content. + // + // Note that we always add LP constraint lazily if we have A LOT of them. + // This is because currently on large problem with millions of constraints, + // our LP is usually not fast enough anyway. + if (!parameters_.add_lp_constraints_lazily() && + constraint_manager_.num_constraints() < 1e6) { + constraint_manager_.AddAllConstraintsToLp(); + } + if (!CreateLpFromConstraintManager()) { + model->GetOrCreate()->NotifyThatModelIsUnsat(); + return; + } + + watcher_id_ = watcher_->Register(this); + const int num_vars = integer_variables_.size(); + orbit_indices_.clear(); + for (int i = 0; i < num_vars; i++) { + const IntegerVariable pos_var = integer_variables_[i]; + if (symmetrizer_->AppearInFoldedProblem(pos_var)) { + watcher_->WatchIntegerVariable(pos_var, watcher_id_, i); + } + + if (symmetrizer_->IsOrbitSumVar(pos_var)) { + orbit_indices_.push_back(symmetrizer_->OrbitIndex(pos_var)); + } + } + if (objective_is_defined_) { + watcher_->WatchUpperBound(objective_cp_, watcher_id_); + } + watcher_->SetPropagatorPriority(watcher_id_, 2); + watcher_->AlwaysCallAtLevelZero(watcher_id_); + + // Registering it with the trail make sure this class is always in sync when + // it is used in the decision heuristics. + integer_trail_->RegisterReversibleClass(this); + watcher_->RegisterReversibleInt(watcher_id_, &rev_optimal_constraints_size_); +} + glop::ColIndex LinearProgrammingConstraint::GetMirrorVariable( IntegerVariable positive_variable) { DCHECK(VariableIsPositive(positive_variable)); @@ -352,12 +415,7 @@ void LinearProgrammingConstraint::SetObjectiveCoefficient(IntegerVariable ivar, objective_is_defined_ = true; IntegerVariable pos_var = VariableIsPositive(ivar) ? ivar : NegationOf(ivar); if (ivar != pos_var) coeff = -coeff; - - constraint_manager_.SetObjectiveCoefficient(pos_var, coeff); - const glop::ColIndex col = GetMirrorVariable(pos_var); - integer_objective_.push_back({col, coeff}); - objective_infinity_norm_ = - std::max(objective_infinity_norm_, IntTypeAbs(coeff)); + integer_objective_.push_back({GetMirrorVariable(pos_var), coeff}); } // TODO(user): As the search progress, some variables might get fixed. Exploit @@ -670,46 +728,6 @@ void LinearProgrammingConstraint::FillReducedCostReasonIn( integer_trail_->RemoveLevelZeroBounds(integer_reason); } -void LinearProgrammingConstraint::RegisterWith(Model* model) { - DCHECK(!lp_constraint_is_registered_); - lp_constraint_is_registered_ = true; - model->GetOrCreate()->push_back(this); - - // Note fdid, this is not really needed by should lead to better cache - // locality. - std::sort(integer_objective_.begin(), integer_objective_.end()); - - // Set the LP to its initial content. - // - // Note that we always add LP constraint lazily if we have A LOT of them. - // This is because currently on large problem with millions of constraints, - // our LP is usually not fast enough anyway. - if (!parameters_.add_lp_constraints_lazily() && - constraint_manager_.num_constraints() < 1e6) { - constraint_manager_.AddAllConstraintsToLp(); - } - if (!CreateLpFromConstraintManager()) { - model->GetOrCreate()->NotifyThatModelIsUnsat(); - return; - } - - watcher_id_ = watcher_->Register(this); - const int num_vars = integer_variables_.size(); - for (int i = 0; i < num_vars; i++) { - watcher_->WatchIntegerVariable(integer_variables_[i], watcher_id_, i); - } - if (objective_is_defined_) { - watcher_->WatchUpperBound(objective_cp_, watcher_id_); - } - watcher_->SetPropagatorPriority(watcher_id_, 2); - watcher_->AlwaysCallAtLevelZero(watcher_id_); - - // Registering it with the trail make sure this class is always in sync when - // it is used in the decision heuristics. - integer_trail_->RegisterReversibleClass(this); - watcher_->RegisterReversibleInt(watcher_id_, &rev_optimal_constraints_size_); -} - void LinearProgrammingConstraint::SetLevel(int level) { // Get rid of all optimal constraint each time we go back to level zero. if (level == 0) rev_optimal_constraints_size_ = 0; @@ -820,11 +838,6 @@ double LinearProgrammingConstraint::GetSolutionValue( return lp_solution_[mirror_lp_variable_.at(variable).value()]; } -double LinearProgrammingConstraint::GetSolutionReducedCost( - IntegerVariable variable) const { - return lp_reduced_cost_[mirror_lp_variable_.at(variable).value()]; -} - void LinearProgrammingConstraint::UpdateBoundsOfLpVariables() { const int num_vars = integer_variables_.size(); Fractional* lb_with_slack = simplex_.MutableLowerBounds()->data(); @@ -853,10 +866,19 @@ bool LinearProgrammingConstraint::SolveLp() { const auto status = simplex_.MinimizeFromTransposedMatrixWithSlack( obj_with_slack_, unscaling_factor, offset_before_unscaling, time_limit_); + // Lets resolve from scratch if we encounter this status. + if (simplex_.GetProblemStatus() == glop::ProblemStatus::ABNORMAL) { + VLOG(2) << "The LP solver returned abnormal, resolving from scratch"; + simplex_.ClearStateForNextSolve(); + const auto status = simplex_.MinimizeFromTransposedMatrixWithSlack( + obj_with_slack_, unscaling_factor, offset_before_unscaling, + time_limit_); + } + state_ = simplex_.GetState(); total_num_simplex_iterations_ += simplex_.GetNumberOfIterations(); if (!status.ok()) { - VLOG(1) << "The LP solver encountered an error: " << status.error_message(); + VLOG(2) << "The LP solver encountered an error: " << status.error_message(); simplex_.ClearStateForNextSolve(); return false; } @@ -895,15 +917,67 @@ bool LinearProgrammingConstraint::SolveLp() { const auto reduced_costs = simplex_.GetReducedCosts().const_view(); for (int i = 0; i < num_vars; i++) { const glop::ColIndex col(i); + const IntegerVariable var = integer_variables_[i]; + const glop::Fractional value = GetVariableValueAtCpScale(col); lp_solution_[i] = value; - expanded_lp_solution_[integer_variables_[i]] = value; - expanded_lp_solution_[NegationOf(integer_variables_[i])] = -value; + expanded_lp_solution_[var] = value; + expanded_lp_solution_[NegationOf(var)] = -value; const glop::Fractional rc = scaler_.UnscaleReducedCost(col, reduced_costs[col]); - expanded_reduced_costs_[integer_variables_[i]] = rc; - expanded_reduced_costs_[NegationOf(integer_variables_[i])] = -rc; + lp_reduced_cost_[i] = rc; + expanded_reduced_costs_[var] = rc; + expanded_reduced_costs_[NegationOf(var)] = -rc; + } + + // Lets fix the result in case of symmetry since the variable in symmetry + // are actually not part of the LP, they will just be at their bounds. + for (const int orbit_index : orbit_indices_) { + const IntegerVariable sum_var = symmetrizer_->OrbitSumVar(orbit_index); + const absl::Span orbit = + symmetrizer_->Orbit(orbit_index); + + // We assign sum / orbit_size to each variables. + // This is still an LP optimal, but not necessarily a good heuristic. + // + // TODO(user): using sum / orbit_size is good for the cut generation that + // might still use these variables, any violated cuts on the original + // problem where all variables in the orbit have the same value will + // result in a violated cut for the folded problem. However it is probably + // not so good for the heuristics that uses the LP values. In particular + // it might result in LP value not even within the bounds of the + // individual variable since as we branch, we don't have an identical + // domain for all variables in an orbit. Maybe we can generate two + // solutions vectors, one for the cuts and one for the heuristics, or we + // can add custom code to the cuts so that they don't depend on this. + const double new_value = + expanded_lp_solution_[sum_var] / static_cast(orbit.size()); + + // For the reduced costs, they are the same. There should be no + // complication there. + const double new_rc = expanded_reduced_costs_[sum_var]; + + for (const IntegerVariable var : orbit) { + const glop::ColIndex col = GetMirrorVariable(var); + lp_solution_[col.value()] = new_value; + expanded_lp_solution_[var] = new_value; + expanded_lp_solution_[NegationOf(var)] = -new_value; + + lp_reduced_cost_[col.value()] = new_rc; + expanded_reduced_costs_[var] = new_rc; + expanded_reduced_costs_[NegationOf(var)] = -new_rc; + } + } + + // Compute integrality. + lp_solution_is_integer_ = true; + for (int i = 0; i < num_vars; i++) { + if (std::abs(lp_solution_[i] - std::round(lp_solution_[i])) > + kCpEpsilon) { + lp_solution_is_integer_ = false; + break; + } } if (lp_solution_level_ == 0) { @@ -994,22 +1068,10 @@ bool LinearProgrammingConstraint::AnalyzeLp() { } // Copy more info about the current solution. - if (simplex_.GetProblemStatus() == glop::ProblemStatus::OPTIMAL) { + if (compute_reduced_cost_averages_ && + simplex_.GetProblemStatus() == glop::ProblemStatus::OPTIMAL) { CHECK(lp_solution_is_set_); - lp_solution_is_integer_ = true; - const int num_vars = integer_variables_.size(); - for (int i = 0; i < num_vars; i++) { - lp_reduced_cost_[i] = scaler_.UnscaleReducedCost( - glop::ColIndex(i), simplex_.GetReducedCost(glop::ColIndex(i))); - if (std::abs(lp_solution_[i] - std::round(lp_solution_[i])) > - kCpEpsilon) { - lp_solution_is_integer_ = false; - } - } - - if (compute_reduced_cost_averages_) { - UpdateAverageReducedCosts(); - } + UpdateAverageReducedCosts(); } // On some problem, LP solves and cut rounds can be slow, so we report @@ -1429,18 +1491,19 @@ bool LinearProgrammingConstraint::PostprocessAndAddCut( // it triggers. We should add heuristics to abort earlier if a cut is not // promising. Or only test a few positions and not all rows. void LinearProgrammingConstraint::AddCGCuts() { - // Note that the index is permuted and do not correspond to a row. + std::vector> sorted_columns; const RowIndex num_rows(integer_lp_.size()); + glop::DenseColumn::ConstView norms = simplex_.GetDualSquaredNorms(); for (RowIndex index(0); index < num_rows; ++index) { - if (time_limit_->LimitReached()) break; - const ColIndex basis_col = simplex_.GetBasis(index); // We used to skip slack and also not to do "classical" gomory and instead // call IgnoreTrivialConstraintMultipliers() heuristic. It is usually faster - // but on some problem like neos*creuse, this do not find good cut though. + // but on some problem like neos*creuse or neos-888544, this do not find + // good cut though. // - // TODO(user): Tune this. + // TODO(user): Tune this. It seems better but we need to handle nicely the + // extra amount of cuts this produces. if (basis_col >= integer_variables_.size()) continue; // Get he variable value at cp-scale. Similar to GetVariableValueAtCpScale() @@ -1455,10 +1518,22 @@ void LinearProgrammingConstraint::AddCGCuts() { // TODO(user): We could just look at the diff with std::floor() in the hope // that when we are just under an integer, the exact computation below will // also be just under it. - if (std::abs(lp_value - std::round(lp_value)) < 0.01) continue; + const double fractionality = std::abs(lp_value - std::round(lp_value)); + if (fractionality < 0.01) continue; - // We multiply by row_factors_ directly, which might be slighly more precise - // than dividing by 1/factor like UnscaleLeftSolveValue() does. + const double score = fractionality * (1.0 - fractionality) / norms[index]; + sorted_columns.push_back({index, score}); + } + absl::c_sort(sorted_columns, [](const std::pair& a, + const std::pair& b) { + return a.second > b.second; + }); + + int num_added = 0; + for (const auto [index, _] : sorted_columns) { + if (time_limit_->LimitReached()) return; + // We multiply by row_factors_ directly, which might be slightly more + // precise than dividing by 1/factor like UnscaleLeftSolveValue() does. // // TODO(user): Avoid code duplication between the sparse/dense path. tmp_lp_multipliers_.clear(); @@ -1497,10 +1572,9 @@ void LinearProgrammingConstraint::AddCGCuts() { // Remove constraints that shouldn't be helpful. // // In practice, because we can complement the slack, it might still be - // useful to have some constraint with a trivial upper bound. That said, - // this does looks weird, maybe we miss something in our one-constraint - // cut generation if it is useful to add such a term. Investigate on - // neos-555884. + // useful to have some constraint with a trivial upper bound. Also + // removing this seem to generate a lot more cuts, so we need to be more + // efficient in dealing with them. if (true) { IgnoreTrivialConstraintMultipliers(&tmp_cg_multipliers_); if (tmp_cg_multipliers_.size() <= 1) continue; @@ -1508,9 +1582,14 @@ void LinearProgrammingConstraint::AddCGCuts() { tmp_integer_multipliers_ = ScaleMultipliers( tmp_cg_multipliers_, /*take_objective_into_account=*/false, &scaling); if (scaling != 0) { - AddCutFromConstraints("CG", tmp_integer_multipliers_); + if (AddCutFromConstraints("CG", tmp_integer_multipliers_)) { + ++num_added; + } } } + + // Stop if we already added more than 10 cuts this round. + if (num_added > 10) break; } } diff --git a/ortools/sat/linear_programming_constraint.h b/ortools/sat/linear_programming_constraint.h index a3f34096e9..715de7ba38 100644 --- a/ortools/sat/linear_programming_constraint.h +++ b/ortools/sat/linear_programming_constraint.h @@ -166,7 +166,6 @@ class LinearProgrammingConstraint : public PropagatorInterface, // at the current decision level. We "erase" it when we backtrack over it. bool HasSolution() const { return lp_solution_is_set_; } double GetSolutionValue(IntegerVariable variable) const; - double GetSolutionReducedCost(IntegerVariable variable) const; bool SolutionIsInteger() const { return lp_solution_is_integer_; } // Returns a valid lp lower bound for the current branch, and indicates if @@ -500,10 +499,14 @@ class LinearProgrammingConstraint : public PropagatorInterface, // they can be used as vector indices. // // TODO(user): This should be util_intops::StrongVector Except if we have too many LinearProgrammingConstraint. + // IntegerVariable>. std::vector integer_variables_; absl::flat_hash_map mirror_lp_variable_; + // This is only used if we use symmetry folding. + // Refer to relevant orbit in the LinearConstraintSymmetrizer. + std::vector orbit_indices_; + // We need to remember what to optimize if an objective is given, because // then we will switch the objective between feasibility and optimization. bool objective_is_defined_ = false; @@ -526,6 +529,7 @@ class LinearProgrammingConstraint : public PropagatorInterface, SharedStatistics* shared_stats_; SharedResponseManager* shared_response_manager_; ModelRandomGenerator* random_; + LinearConstraintSymmetrizer* symmetrizer_; int watcher_id_; diff --git a/ortools/sat/presolve_context.cc b/ortools/sat/presolve_context.cc index 5a819566a3..d844d2f08a 100644 --- a/ortools/sat/presolve_context.cc +++ b/ortools/sat/presolve_context.cc @@ -743,8 +743,11 @@ void PresolveContext::UpdateNewConstraintsVariableUsage() { } bool PresolveContext::HasUnusedAffineVariable() const { + if (is_unsat_) return false; // We do not care in this case. + if (keep_all_feasible_solutions) return false; for (int var = 0; var < working_model->variables_size(); ++var) { if (VariableIsNotUsedAnymore(var)) continue; + if (IsFixed(var)) continue; const auto& constraints = VarToConstraints(var); if (constraints.size() == 1 && constraints.contains(kAffineRelationConstraint) && @@ -1509,9 +1512,40 @@ bool PresolveContext::InsertHalfVarValueEncoding(int literal, int var, // Creates the linking sets on demand. // Insert the enforcement literal in the half encoding map. auto& direct_set = imply_eq ? eq_half_encoding_ : neq_half_encoding_; - if (!direct_set.insert({literal, var, value}).second) { + auto insert_result = direct_set.insert({{literal, var}, value}); + if (!insert_result.second) { + if (insert_result.first->second != value && imply_eq) { + UpdateRuleStats("variables: detect half reified incompatible value"); + return SetLiteralToFalse(literal); + } return false; // Already there. } + if (imply_eq) { + // We are adding b => x=v. Check if we already have ~b => x=u. + auto negated_encoding = direct_set.find({NegatedRef(literal), var}); + if (negated_encoding != direct_set.end()) { + if (negated_encoding->second == value) { + UpdateRuleStats( + "variables: both boolean and its negation imply same equality"); + if (!IntersectDomainWith(var, Domain(value))) { + return false; + } + } else { + const int64_t other_value = negated_encoding->second; + // b => var == value + // !b => var == other_value + // var = (value - other_value) * b + other_value + UpdateRuleStats( + "variables: both boolean and its negation fix the same variable"); + if (RefIsPositive(literal)) { + StoreAffineRelation(var, literal, value - other_value, other_value); + } else { + StoreAffineRelation(var, NegatedRef(literal), other_value - value, + value); + } + } + } + } VLOG(2) << "Collect lit(" << literal << ") implies var(" << var << (imply_eq ? ") == " : ") != ") << value; UpdateRuleStats("variables: detect half reified value encoding"); @@ -1519,7 +1553,8 @@ bool PresolveContext::InsertHalfVarValueEncoding(int literal, int var, // Note(user): We don't expect a lot of literals in these sets, so doing // a scan should be okay. auto& other_set = imply_eq ? neq_half_encoding_ : eq_half_encoding_; - if (other_set.contains({NegatedRef(literal), var, value})) { + auto it = other_set.find({NegatedRef(literal), var}); + if (it != other_set.end() && it->second == value) { UpdateRuleStats("variables: detect fully reified value encoding"); const int imply_eq_literal = imply_eq ? literal : NegatedRef(literal); if (!InsertVarValueEncodingInternal(imply_eq_literal, var, value, @@ -1549,8 +1584,12 @@ bool PresolveContext::InsertVarValueEncoding(int literal, int var, /*add_constraints=*/true)) { return false; } - eq_half_encoding_.insert({literal, var, value}); - neq_half_encoding_.insert({NegatedRef(literal), var, value}); + if (!StoreLiteralImpliesVarEqValue(literal, var, value)) { + return false; + } + if (!StoreLiteralImpliesVarNEqValue(NegatedRef(literal), var, value)) { + return false; + } if (hint_is_loaded_) { const int bool_var = PositiveRef(literal); @@ -1797,6 +1836,10 @@ bool PresolveContext::CanonicalizeOneObjectiveVariable(int var) { RemoveVariableFromObjective(var); + // After we removed the variable from the objective it might have become a + // unused affine. Add it to the list of variables to check so we reprocess it. + modified_domains.Set(var); + // Do the substitution. AddToObjectiveOffset(coeff * r.offset); const int64_t new_coeff = objective_map_[r.representative] += coeff * r.coeff; diff --git a/ortools/sat/presolve_context.h b/ortools/sat/presolve_context.h index 7b11b6b66e..318cc895e0 100644 --- a/ortools/sat/presolve_context.h +++ b/ortools/sat/presolve_context.h @@ -741,8 +741,8 @@ class PresolveContext { // (literal, var, value), i.e.: literal => var ==/!= value // The state is accumulated (adding x => var == value then !x => var != value) // will deduce that x equivalent to var == value. - absl::flat_hash_set> eq_half_encoding_; - absl::flat_hash_set> neq_half_encoding_; + absl::flat_hash_map, int64_t> eq_half_encoding_; + absl::flat_hash_map, int64_t> neq_half_encoding_; // This regroups all the affine relations between variables. Note that the // constraints used to detect such relations will be removed from the model at diff --git a/ortools/sat/sat_parameters.proto b/ortools/sat/sat_parameters.proto index b7e73d92ac..6e03c28e2d 100644 --- a/ortools/sat/sat_parameters.proto +++ b/ortools/sat/sat_parameters.proto @@ -23,7 +23,7 @@ option java_multiple_files = true; // Contains the definitions for all the sat algorithm parameters and their // default values. // -// NEXT TAG: 300 +// NEXT TAG: 302 message SatParameters { // In some context, like in a portfolio of search, it makes sense to name a // given parameters set for logging purpose. @@ -1327,6 +1327,13 @@ message SatParameters { // symmetry as possible in presolve. optional int32 symmetry_level = 183 [default = 2]; + // When we have symmetry, it is possible to "fold" all variables from the same + // orbit into a single variable, while having the same power of LP relaxation. + // This can help significantly on symmetric problem. However there is + // currently a bit of overhead as the rest of the solver need to do some + // translation between the folded LP and the rest of the problem. + optional bool use_symmetry_in_lp = 301 [default = false]; + // The new linear propagation code treat all constraints at once and use // an adaptation of Bellman-Ford-Tarjan to propagate constraint in a smarter // order and potentially detect propagation cycle earlier. diff --git a/ortools/sat/scheduling_cuts_test.cc b/ortools/sat/scheduling_cuts_test.cc index 23a8b92bac..da3741ef55 100644 --- a/ortools/sat/scheduling_cuts_test.cc +++ b/ortools/sat/scheduling_cuts_test.cc @@ -500,7 +500,7 @@ TEST(ComputeMinSumOfEndMinsTest, Infeasible) { kMinIntegerValue, kMinIntegerValue)); } -int64_t ExactMakespan(const std::vector& sizes, std::vector& demands, +int64_t ExactMakespan(absl::Span sizes, std::vector& demands, int capacity) { const int64_t kHorizon = 1000; CpModelBuilder builder; diff --git a/ortools/sat/symmetry_util.cc b/ortools/sat/symmetry_util.cc index c1d96e0a38..3189d75b2e 100644 --- a/ortools/sat/symmetry_util.cc +++ b/ortools/sat/symmetry_util.cc @@ -156,12 +156,15 @@ std::vector GetOrbits( MergingPartition union_find; union_find.Reset(n); for (const std::unique_ptr& perm : generators) { + DCHECK(perm != nullptr); const int num_cycles = perm->NumCycles(); for (int i = 0; i < num_cycles; ++i) { // Note that there is currently no random access api like cycle[j]. int first; bool is_first = true; for (const int x : perm->Cycle(i)) { + DCHECK_GE(x, 0); + DCHECK_LT(x, n); if (is_first) { first = x; is_first = false; @@ -232,5 +235,21 @@ std::vector TracePoint( return result; } +std::unique_ptr CreateSparsePermutationFromProto( + int n, const SparsePermutationProto& proto) { + auto perm = std::make_unique(n); + int support_index = 0; + const int num_cycle = proto.cycle_sizes().size(); + for (int i = 0; i < num_cycle; ++i) { + const int size = proto.cycle_sizes(i); + for (int j = 0; j < size; ++j) { + DCHECK_LT(proto.support(support_index), n); + perm->AddToCurrentCycle(proto.support(support_index++)); + } + perm->CloseCurrentCycle(); + } + return perm; +} + } // namespace sat } // namespace operations_research diff --git a/ortools/sat/symmetry_util.h b/ortools/sat/symmetry_util.h index 5e5e813d6e..aeed85a60c 100644 --- a/ortools/sat/symmetry_util.h +++ b/ortools/sat/symmetry_util.h @@ -19,6 +19,7 @@ #include "absl/types/span.h" #include "ortools/algorithms/sparse_permutation.h" +#include "ortools/sat/cp_model.pb.h" namespace operations_research { namespace sat { @@ -75,6 +76,10 @@ std::vector TracePoint( int point, absl::Span schrier_vector, absl::Span> generators); +// Creates a SparsePermutation on [0, n) from its proto representation. +std::unique_ptr CreateSparsePermutationFromProto( + int n, const SparsePermutationProto& proto); + // Given the generators for a permutation group of [0, n-1], update it to // a set of generators of the group stabilizing the given element. // diff --git a/ortools/sat/symmetry_util_test.cc b/ortools/sat/symmetry_util_test.cc index 9b3a5b19f5..cdd57f6975 100644 --- a/ortools/sat/symmetry_util_test.cc +++ b/ortools/sat/symmetry_util_test.cc @@ -22,11 +22,13 @@ #include "gtest/gtest.h" #include "ortools/algorithms/sparse_permutation.h" #include "ortools/base/gmock.h" +#include "ortools/base/parse_test_proto.h" namespace operations_research { namespace sat { namespace { +using ::google::protobuf::contrib::parse_proto::ParseTestProto; using ::testing::ElementsAre; using ::testing::UnorderedElementsAre; @@ -155,6 +157,16 @@ TEST(GetSchreierVectorTest, ProjectivePlaneOrderTwo) { EXPECT_THAT(schrier_vector, ElementsAre(-1, -1, 0, 0, 1, 2, 2)); } +TEST(CreateSparsePermutationFromProtoTest, BasicReading) { + const SparsePermutationProto input = ParseTestProto(R"pb( + support: [ 1, 0, 3, 2, 7, 8, 9 ] + cycle_sizes: [ 2, 2, 3 ] + )pb"); + std::unique_ptr sp = + CreateSparsePermutationFromProto(10, input); + EXPECT_EQ(sp->DebugString(), "(0 1) (2 3) (7 8 9)"); +} + } // namespace } // namespace sat } // namespace operations_research