diff --git a/examples/cpp/cryptarithm.cc b/examples/cpp/cryptarithm.cc index 5f600bde69..a64cd10e8a 100644 --- a/examples/cpp/cryptarithm.cc +++ b/examples/cpp/cryptarithm.cc @@ -22,7 +22,6 @@ #include "base/commandlineflags.h" #include "base/commandlineflags.h" #include "base/logging.h" -#include "base/scoped_ptr.h" #include "constraint_solver/constraint_solver.h" namespace operations_research { diff --git a/examples/cpp/cvrptw.cc b/examples/cpp/cvrptw.cc index e3a76aaac6..2cd9f06adf 100644 --- a/examples/cpp/cvrptw.cc +++ b/examples/cpp/cvrptw.cc @@ -28,7 +28,6 @@ #include "base/commandlineflags.h" #include "base/integral_types.h" #include "base/logging.h" -#include "base/scoped_ptr.h" #include "base/stringprintf.h" #include "constraint_solver/routing.h" #include "base/random.h" diff --git a/examples/cpp/golomb.cc b/examples/cpp/golomb.cc index 34defdb19d..17cb8ef6bf 100644 --- a/examples/cpp/golomb.cc +++ b/examples/cpp/golomb.cc @@ -29,7 +29,6 @@ #include "base/commandlineflags.h" #include "base/integral_types.h" #include "base/logging.h" -#include "base/scoped_ptr.h" #include "base/stringprintf.h" #include "constraint_solver/constraint_solver.h" diff --git a/examples/cpp/network_routing.cc b/examples/cpp/network_routing.cc index 70e3fcfd9f..a4a6da5cac 100644 --- a/examples/cpp/network_routing.cc +++ b/examples/cpp/network_routing.cc @@ -36,7 +36,6 @@ #include "base/commandlineflags.h" #include "base/integral_types.h" #include "base/logging.h" -#include "base/scoped_ptr.h" #include "base/stringprintf.h" #include "base/concise_iterator.h" #include "base/map_util.h" diff --git a/examples/cpp/nqueens.cc b/examples/cpp/nqueens.cc index efac256c62..ed4ab43199 100644 --- a/examples/cpp/nqueens.cc +++ b/examples/cpp/nqueens.cc @@ -23,7 +23,6 @@ #include "base/commandlineflags.h" #include "base/integral_types.h" #include "base/logging.h" -#include "base/scoped_ptr.h" #include "base/stringprintf.h" #include "base/map_util.h" #include "constraint_solver/constraint_solveri.h" diff --git a/examples/cpp/parse_dimacs_assignment.h b/examples/cpp/parse_dimacs_assignment.h index 7ed41e6091..ad72ed324d 100644 --- a/examples/cpp/parse_dimacs_assignment.h +++ b/examples/cpp/parse_dimacs_assignment.h @@ -27,7 +27,6 @@ #include "base/callback.h" #include "base/commandlineflags.h" #include "base/logging.h" -#include "base/scoped_ptr.h" #include "graph/ebert_graph.h" #include "graph/linear_assignment.h" diff --git a/examples/cpp/pdptw.cc b/examples/cpp/pdptw.cc index 8a019213ee..853750aa57 100644 --- a/examples/cpp/pdptw.cc +++ b/examples/cpp/pdptw.cc @@ -155,7 +155,7 @@ bool SafeParseInt64Array(const std::string& str, std::vector* parsed_int) for (int i = 0; i < items.size(); ++i) { const char* item = items[i].c_str(); char* endptr = NULL; - (*parsed_int)[i] = strto64(item, &endptr, 10); // NOLINT + (*parsed_int)[i] = strto64(item, &endptr, 10); // The whole item should have been consumed. if (*endptr != '\0') return false; } diff --git a/examples/cpp/sat_runner.cc b/examples/cpp/sat_runner.cc index bef9bdaea3..5bb6ffcf6a 100644 --- a/examples/cpp/sat_runner.cc +++ b/examples/cpp/sat_runner.cc @@ -29,6 +29,7 @@ #include "sat/boolean_problem.h" #include "sat/sat_solver.h" #include "util/time_limit.h" +#include "algorithms/sparse_permutation.h" DEFINE_string( input, "", @@ -64,6 +65,10 @@ DEFINE_bool(search_optimal, false, "If true, search for the optimal solution. " "The algorithm is currently really basic."); +DEFINE_bool(use_symmetry, false, + "If true, find and exploit the eventual symmetries " + "of the problem."); + DEFINE_bool(refine_core, false, "If true, turn on the unsat_proof parameters and if the problem is " @@ -126,6 +131,14 @@ int Run() { LOG(FATAL) << "Issue when setting the objective bounds."; } + // Symmetries! + if (FLAGS_use_symmetry) { + LOG(INFO) << "Finding symmetries of the problem."; + std::vector> generators; + FindLinearBooleanProblemSymmetries(problem, &generators); + solver.AddSymmetries(&generators); + } + // Heuristics to drive the SAT search. UseObjectiveForSatAssignmentPreference(problem, &solver); diff --git a/examples/cpp/sports_scheduling.cc b/examples/cpp/sports_scheduling.cc index f1ded2404a..911864d2d0 100644 --- a/examples/cpp/sports_scheduling.cc +++ b/examples/cpp/sports_scheduling.cc @@ -59,7 +59,6 @@ #include "base/commandlineflags.h" #include "base/integral_types.h" #include "base/logging.h" -#include "base/scoped_ptr.h" #include "constraint_solver/constraint_solver.h" #include "constraint_solver/constraint_solveri.h" diff --git a/examples/cpp/tsp.cc b/examples/cpp/tsp.cc index 1e143fadfc..0d85f8cf71 100644 --- a/examples/cpp/tsp.cc +++ b/examples/cpp/tsp.cc @@ -30,7 +30,6 @@ #include "base/commandlineflags.h" #include "base/commandlineflags.h" #include "base/integral_types.h" -#include "base/scoped_ptr.h" #include "base/join.h" #include "constraint_solver/routing.h" #include "base/random.h" diff --git a/makefiles/Makefile.cpp.mk b/makefiles/Makefile.cpp.mk index 9845d68286..6818ef1869 100644 --- a/makefiles/Makefile.cpp.mk +++ b/makefiles/Makefile.cpp.mk @@ -52,11 +52,20 @@ DYNAMIC_GRAPH_DEPS = $(DYNAMIC_GRAPH_LIBS) $(DYNAMIC_BASE_LIBS) DYNAMIC_ROUTING_DEPS = $(DYNAMIC_ROUTING_LIBS) $(DYNAMIC_CP_LIBS) $(DYNAMIC_LP_LIBS) $(DYNAMIC_GRAPH_LIBS) $(DYNAMIC_BASE_LIBS) -DYNAMIC_FLATZINC_DEPS = $(DYNAMIC_FLATZINC_LIBS) $(DYNAMIC_CP_DEPS) +DYNAMIC_FLATZINC_DEPS = \ + $(DYNAMIC_FLATZINC_LIBS) \ + $(DYNAMIC_CP_DEPS) -DYNAMIC_FLATZINC2_DEPS = $(DYNAMIC_FLATZINC2_LIBS) $(DYNAMIC_CP_DEPS) +DYNAMIC_FLATZINC2_DEPS = \ + $(DYNAMIC_FLATZINC2_LIBS) \ + $(DYNAMIC_CP_DEPS) -DYNAMIC_DIMACS_DEPS = $(DYNAMIC_DIMACS_LIBS) $(DYNAMIC_LP_LIBS) $(DYNAMIC_GRAPH_LIBS) $(DYNAMIC_ALGORITHMS_LIBS) $(DYNAMIC_BASE_LIBS) +DYNAMIC_DIMACS_DEPS = \ + $(DYNAMIC_DIMACS_LIBS) \ + $(DYNAMIC_LP_LIBS) \ + $(DYNAMIC_GRAPH_LIBS) \ + $(DYNAMIC_ALGORITHMS_LIBS) \ + $(DYNAMIC_BASE_LIBS) DYNAMIC_FAP_DEPS = \ $(DYNAMIC_FAP_LIBS) \ @@ -66,7 +75,7 @@ DYNAMIC_FAP_DEPS = \ DYNAMIC_SAT_DEPS = \ $(DYNAMIC_SAT_LIBS) \ - $(DYNAMIC_BASE_LIBS) + $(DYNAMIC_ALGORITHMS_DEPS) DYNAMIC_ORTOOLS_DEPS = \ $(DYNAMIC_ORTOOLS_LIBS) @@ -120,7 +129,7 @@ DYNAMIC_FAP_LNK = \ DYNAMIC_SAT_LNK = \ $(DYNAMIC_PRE_LIB)sat$(DYNAMIC_POST_LIB) \ - $(DYNAMIC_BASE_LNK) + $(DYNAMIC_ALGORITHMS_LNK) DYNAMIC_ALL_LNK = \ $(DYNAMIC_PRE_LIB)algorithms$(DYNAMIC_POST_LIB) \ @@ -718,7 +727,10 @@ endif ALGORITHMS_LIB_OBJS=\ $(OBJ_DIR)/algorithms/hungarian.$O \ - $(OBJ_DIR)/algorithms/knapsack_solver.$O + $(OBJ_DIR)/algorithms/knapsack_solver.$O \ + $(OBJ_DIR)/algorithms/dynamic_partition.$O \ + $(OBJ_DIR)/algorithms/sparse_permutation.$O \ + $(OBJ_DIR)/algorithms/find_graph_symmetries.$O $(OBJ_DIR)/algorithms/hungarian.$O:$(SRC_DIR)/algorithms/hungarian.cc $(CCC) $(CFLAGS) -c $(SRC_DIR)/algorithms/hungarian.cc $(OBJ_OUT)$(OBJ_DIR)$Salgorithms$Shungarian.$O @@ -726,6 +738,15 @@ $(OBJ_DIR)/algorithms/hungarian.$O:$(SRC_DIR)/algorithms/hungarian.cc $(OBJ_DIR)/algorithms/knapsack_solver.$O:$(SRC_DIR)/algorithms/knapsack_solver.cc $(CCC) $(CFLAGS) -c $(SRC_DIR)/algorithms/knapsack_solver.cc $(OBJ_OUT)$(OBJ_DIR)$Salgorithms$Sknapsack_solver.$O +$(OBJ_DIR)/algorithms/dynamic_partition.$O:$(SRC_DIR)/algorithms/dynamic_partition.cc + $(CCC) $(CFLAGS) -c $(SRC_DIR)/algorithms/dynamic_partition.cc $(OBJ_OUT)$(OBJ_DIR)$Salgorithms$Sdynamic_partition.$O + +$(OBJ_DIR)/algorithms/sparse_permutation.$O:$(SRC_DIR)/algorithms/sparse_permutation.cc + $(CCC) $(CFLAGS) -c $(SRC_DIR)/algorithms/sparse_permutation.cc $(OBJ_OUT)$(OBJ_DIR)$Salgorithms$Ssparse_permutation.$O + +$(OBJ_DIR)/algorithms/find_graph_symmetries.$O:$(SRC_DIR)/algorithms/find_graph_symmetries.cc + $(CCC) $(CFLAGS) -c $(SRC_DIR)/algorithms/find_graph_symmetries.cc $(OBJ_OUT)$(OBJ_DIR)$Salgorithms$Sfind_graph_symmetries.$O + $(LIB_DIR)/$(LIBPREFIX)algorithms.$(DYNAMIC_LIB_SUFFIX): $(ALGORITHMS_LIB_OBJS) $(DYNAMIC_LINK_CMD) $(DYNAMIC_LINK_PREFIX)$(LIB_DIR)$S$(LIBPREFIX)algorithms.$(DYNAMIC_LIB_SUFFIX) $(ALGORITHMS_LIB_OBJS) @@ -1196,6 +1217,7 @@ SAT_LIB_OBJS = \ $(OBJ_DIR)/sat/pb_constraint.$O\ $(OBJ_DIR)/sat/sat_parameters.pb.$O\ $(OBJ_DIR)/sat/sat_solver.$O\ + $(OBJ_DIR)/sat/symmetry.$O\ $(OBJ_DIR)/sat/unsat_proof.$O satlibs: $(DYNAMIC_SAT_DEPS) $(STATIC_SAT_DEPS) @@ -1223,6 +1245,9 @@ $(OBJ_DIR)/sat/clause.$O: $(SRC_DIR)/sat/clause.cc $(SRC_DIR)/sat/sat_base.h $(S $(OBJ_DIR)/sat/unsat_proof.$O: $(SRC_DIR)/sat/unsat_proof.cc $(SRC_DIR)/sat/sat_base.h $(SRC_DIR)/sat/unsat_proof.h $(CCC) $(CFLAGS) -c $(SRC_DIR)/sat/unsat_proof.cc $(OBJ_OUT)$(OBJ_DIR)$Ssat$Sunsat_proof.$O +$(OBJ_DIR)/sat/symmetry.$O: $(SRC_DIR)/sat/symmetry.cc $(SRC_DIR)/sat/sat_base.h $(SRC_DIR)/sat/symmetry.h $(SRC_DIR)/sat/clause.h $(SRC_DIR)/sat/unsat_proof.h $(GEN_DIR)/sat/sat_parameters.pb.h + $(CCC) $(CFLAGS) -c $(SRC_DIR)/sat/symmetry.cc $(OBJ_OUT)$(OBJ_DIR)$Ssat$Ssymmetry.$O + $(GEN_DIR)/sat/sat_parameters.pb.cc: $(SRC_DIR)/sat/sat_parameters.proto $(PROTOBUF_DIR)/bin/protoc --proto_path=$(INC_DIR) --cpp_out=$(GEN_DIR) $(SRC_DIR)/sat/sat_parameters.proto diff --git a/makefiles/Makefile.port b/makefiles/Makefile.port index 09e116bd14..3d97321025 100644 --- a/makefiles/Makefile.port +++ b/makefiles/Makefile.port @@ -64,8 +64,9 @@ ifeq ("$(SYSTEM)","unix") CANDIDATE_JDK_HEADERS = \ /System/Library/Java/JavaVirtualMachines/1.6.0.jdk/Contents/Home/include \ /Developer/SDKs/MacOSX10.7.sdk/System/Library/Frameworks/JavaVM.framework/Versions/A/Headers \ - /System/Library/Frameworks/JavaVM.framework/Versions/A/Headers\ - /Library/Java/JavaVirtualMachines/jdk1.7.0_45.jdk/Contents/Home/include + /System/Library/Frameworks/JavaVM.framework/Versions/A/Headers \ + /Library/Java/JavaVirtualMachines/jdk1.7.0_45.jdk/Contents/Home/include \ + /Library/Java/JavaVirtualMachines/jdk1.7.0_51.jdk/Contents/Home/include SELECTED_JDK_DEF = MAC_JDK_HEADERS = $(wildcard $(CANDIDATE_JDK_HEADERS)) endif @@ -84,11 +85,11 @@ ifeq ("$(SYSTEM)","win") ifeq ("$(VisualStudioVersion)", "11.0") VISUAL_STUDIO=2012 VS_RELEASE=v110 - VS_COMTOOLS=110 + VS_COMTOOLS=110 else VISUAL_STUDIO=2010 VS_RELEASE=v100 - VS_COMTOOLS=100 + VS_COMTOOLS=100 endif endif ifeq ("$(Platform)", "X64") diff --git a/src/algorithms/dynamic_partition.cc b/src/algorithms/dynamic_partition.cc new file mode 100644 index 0000000000..f8e4c6389c --- /dev/null +++ b/src/algorithms/dynamic_partition.cc @@ -0,0 +1,238 @@ +// Copyright 2010-2013 Google +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +#include "algorithms/dynamic_partition.h" + +#include + +#include "base/stringprintf.h" +#include "base/join.h" + +namespace operations_research { + +DynamicPartition::DynamicPartition(int n) { + element_.assign(n, -1); + index_of_.assign(n, -1); + for (int i = 0; i < n; ++i) { + element_[i] = i; + index_of_[i] = i; + } + part_of_.assign(n, 0); + part_.push_back({/*start_index=*/0, /*end_index=*/n, /*parent_part=*/0}); +} + +DynamicPartition::DynamicPartition(const std::vector& initial_part_of_element) { + if (initial_part_of_element.empty()) return; + part_of_ = initial_part_of_element; + const int num_parts = 1 + *std::max_element(part_of_.begin(), part_of_.end()); + DCHECK_EQ(0, *std::min_element(part_of_.begin(), part_of_.end())); + part_.resize(num_parts); + + // Compute the actual start indices of each part, knowing that we'll sort + // them as they were given implicitly in "initial_part_of_element". + // The code looks a bit weird to do it in-place, with no additional memory. + for (int p = 0; p < num_parts; ++p) { + part_[p].end_index = 0; // Temporarily utilized as size_of_part. + part_[p].parent_part = p; + } + for (const int p : part_of_) ++part_[p].end_index; // size_of_part + int sum_part_sizes = 0; + for (int p = 0; p < num_parts; ++p) { + part_[p].start_index = sum_part_sizes; + sum_part_sizes += part_[p].end_index; // size of part. + } + + // Now that we have the correct start indices, we set the end indices to the + // start indices, and incrementally add all elements to their part, adjusting + // the end indices as we go. + for (Part& part : part_) part.end_index = part.start_index; + const int n = part_of_.size(); + element_.assign(n, -1); + index_of_.assign(n, -1); + for (int element = 0; element < n; ++element) { + Part* const part = &part_[part_of_[element]]; + element_[part->end_index] = element; + index_of_[element] = part->end_index; + ++part->end_index; + } + + // Verify that we did it right. + // TODO(user): either remove this or factor it out if it can be used + // elsewhere. + DCHECK_EQ(0, part_[0].start_index); + DCHECK_EQ(NumElements(), part_[NumParts() - 1].end_index); + for (int p = 1; p < NumParts(); ++p) { + DCHECK_EQ(part_[p - 1].end_index, part_[p].start_index); + } +} + +void DynamicPartition::Refine(const std::vector& distinguished_subset) { + // tmp_counter_of_part_[i] will contain the number of + // elements in distinguished_subset that were part of part #i. + tmp_counter_of_part_.resize(NumParts(), 0); + // We remember the Parts that were actually affected. + tmp_affected_parts_.clear(); + for (const int element : distinguished_subset) { + DCHECK_GE(element, 0); + DCHECK_LT(element, NumElements()); + const int part = part_of_[element]; + const int num_distinguished_elements_in_part = ++tmp_counter_of_part_[part]; + // Is this the first time that we touch this element's part? + if (num_distinguished_elements_in_part == 1) { + tmp_affected_parts_.push_back(part); + } + // Move the element to the end of its current Part. + const int old_index = index_of_[element]; + const int new_index = + part_[part].end_index - num_distinguished_elements_in_part; + DCHECK_GE(new_index, old_index) << "Duplicate element given to Refine(): " + << element; + // Perform the swap, keeping index_of_ up to date. + index_of_[element] = new_index; + index_of_[element_[new_index]] = old_index; + std::swap(element_[old_index], element_[new_index]); + } + + // Sort affected parts. This is important to behave as advertised in the .h. + // TODO(user, fdid): automatically switch to an O(N) sort when it's faster + // than this one, which is O(K log K) with K = tmp_affected_parts_.size(). + std::sort(tmp_affected_parts_.begin(), tmp_affected_parts_.end()); + + // Iterate on each affected part and split it, or keep it intact if all + // of its elements were distinguished. + for (const int part : tmp_affected_parts_) { + const int end_index = part_[part].end_index; + const int split_index = end_index - tmp_counter_of_part_[part]; + tmp_counter_of_part_[part] = 0; // Clean up after us. + DCHECK_GE(split_index, part_[part].start_index); + DCHECK_LT(split_index, end_index); + + // Do nothing if all elements were distinguished. + if (split_index == part_[part].start_index) continue; + + // Perform the split. + part_[part].end_index = split_index; + const int new_part = NumParts(); + part_.push_back({/*start_index*/ split_index, /*end_index*/ end_index, + /*parent_part*/ part}); + for (const int element : ElementsInPart(new_part)) { + part_of_[element] = new_part; + } + } +} + +void DynamicPartition::UndoRefineUntilNumPartsEqual(int original_num_parts) { + DCHECK_GE(NumParts(), original_num_parts); + DCHECK_GE(original_num_parts, 1); + while (NumParts() > original_num_parts) { + const int part_index = NumParts() - 1; + const int parent_part_index = part_[part_index].parent_part; + DCHECK_LT(parent_part_index, part_index) << "UndoRefineUntilNumPartsEqual()" + " called with " + "'original_num_parts' too low"; + for (const int element : ElementsInPart(part_index)) { + part_of_[element] = parent_part_index; + } + DCHECK_EQ(part_[part_index].start_index, + part_[parent_part_index].end_index); + part_[parent_part_index].end_index = part_[part_index].end_index; + part_.pop_back(); + } +} + +void DynamicPartition::SortElementsInPart(int p) { + const Part& part = part_[p]; + std::sort(element_.begin() + part.start_index, element_.begin() + part.end_index); + // Fix index_of_. + for (int index = part.start_index; index < part.end_index; ++index) { + index_of_[element_[index]] = index; + } +} + +std::string DynamicPartition::DebugString(DebugStringSorting sorting) const { + return ""; +} + +void MergingPartition::Reset(int num_nodes) { + DCHECK_GE(num_nodes, 0); + part_size_.assign(num_nodes, 1); + parent_.assign(num_nodes, -1); + for (int i = 0; i < num_nodes; ++i) parent_[i] = i; + tmp_part_bit_.assign(num_nodes, false); +} + +void MergingPartition::MergePartsOf(int node1, int node2) { + DCHECK_GE(node1, 0); + DCHECK_GE(node2, 0); + DCHECK_LT(node1, NumNodes()); + DCHECK_LT(node2, NumNodes()); + int root1 = GetRoot(node1); + int root2 = GetRoot(node2); + if (root1 == root2) return; + int s1 = part_size_[root1]; + int s2 = part_size_[root2]; + // Attach the smaller part to the larger one. Break ties by root index. + if (s1 < s2 || (s1 == s2 && root1 < root2)) { + std::swap(root1, root2); + std::swap(s1, s2); + } + + // Update the part size. Don't change part_size_[root2]: it won't be used + // again by further merges. + part_size_[root1] += part_size_[root2]; + SetParentAlongPathToRoot(node1, root1); + SetParentAlongPathToRoot(node2, root1); +} + +int MergingPartition::GetRootAndCompressPath(int node) { + DCHECK_GE(node, 0); + DCHECK_LT(node, NumNodes()); + const int root = GetRoot(node); + SetParentAlongPathToRoot(node, root); + return root; +} + +void MergingPartition::KeepOnlyOneNodePerPart(std::vector* nodes) { + int num_nodes_kept = 0; + for (const int node : *nodes) { + const int representative = GetRootAndCompressPath(node); + if (!tmp_part_bit_[representative]) { + tmp_part_bit_[representative] = true; + (*nodes)[num_nodes_kept++] = node; + } + } + nodes->resize(num_nodes_kept); + + // Clean up the tmp_part_bit_ vector. Since we've already compressed the + // paths (if backtracking was enabled), no need to do it again. + for (const int node : *nodes) tmp_part_bit_[GetRoot(node)] = false; +} + +void MergingPartition::FillEquivalenceClasses( + std::vector* node_equivalence_classes) { + node_equivalence_classes->assign(NumNodes(), -1); + int num_roots = 0; + for (int node = 0; node < NumNodes(); ++node) { + const int root = GetRootAndCompressPath(node); + if ((*node_equivalence_classes)[root] < 0) { + (*node_equivalence_classes)[root] = num_roots; + ++num_roots; + } + (*node_equivalence_classes)[node] = (*node_equivalence_classes)[root]; + } +} + +std::string MergingPartition::DebugString() { + return ""; +} + +} // namespace operations_research diff --git a/src/algorithms/dynamic_partition.h b/src/algorithms/dynamic_partition.h new file mode 100644 index 0000000000..46b5ac2246 --- /dev/null +++ b/src/algorithms/dynamic_partition.h @@ -0,0 +1,276 @@ +// Copyright 2010-2013 Google +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// TODO(user, fdid): refine this toplevel comment when this file settles. +// +// Two dynamic partition classes: one that incremental splits a partition +// into more and more parts; one that incremental merges a partition into less +// and less parts. +// +// GLOSSARY: +// The partition classes maintains a partition of N integers 0..N-1 +// (aka "elements") into disjoint equivalence classes (aka "parts"). +// +// SAFETY: +// Like std::vector crashes when used improperly, these classes are not "safe": +// most of their methods may crash if called with invalid arguments. The client +// code is responsible for using this class properly. A few DCHECKs() will help +// catch bugs, though. + +#ifndef OR_TOOLS_ALGORITHMS_DYNAMIC_PARTITION_H_ +#define OR_TOOLS_ALGORITHMS_DYNAMIC_PARTITION_H_ + +#include +#include +#include "base/logging.h" + +namespace operations_research { + +// Partition class that supports incremental splitting, with backtracking. +// See http://en.wikipedia.org/wiki/Partition_refinement . +// More precisely, the supported edit operations are: +// - Refine the partition so that a subset S (typically, |S| <<< N) +// of elements are all considered non-equivalent to any element in ¬S +// Typically, this should be done in O(|S|). +// - Undo the above operations (backtracking). +// +// TODO(user): rename this to BacktrackableSplittingPartition. +class DynamicPartition { + public: + // Creates a DynamicPartition on n elements, numbered 0..n-1. Start with + // the trivial partition (only one subset containing all elements). + explicit DynamicPartition(int n); + + // Ditto, but specify the initial part of each elements. Part indices must + // form a dense integer set starting at 0; eg. [2, 1, 0, 1, 1, 3, 0] is valid. + explicit DynamicPartition(const std::vector& initial_part_of_element); + + // Accessors. + int NumElements() const { return element_.size(); } + const int NumParts() const { return part_.size(); } + + // To iterate over the elements in part #i: + // for (int element : partition.ElementsInPart(i)) { ... } + // + // ORDERING OF ELEMENTS INSIDE PARTS: the order of elements within a given + // part is volatile, and may change with Refine() or UndoRefine*() operations, + // even if the part itself doesn't change. + struct IterablePart; + IterablePart ElementsInPart(int i) const; + + int PartOf(int element) const; + int SizeOfPart(int part) const; + int ParentOfPart(int part) const; + + // Refines the partition such that elements that are in distinguished_subset + // never share the same part as elements that aren't in that subset. + // This might be a no-op: in that case, NumParts() won't change, but the + // order of elements inside each part may change. + // + // ORDERING OF PARTS: + // For each i such that Part #i has a non-trivial intersection with + // "distinguished_subset" (neither empty, nor the full Part); Part #i is + // stripped out of all elements that are in "distinguished_subset", and + // those elements are sent to a newly created part, whose parent_part = i. + // The parts newly created by a single Refine() operations are sorted + // by parent_part. + // Example: a Refine() on a partition with 6 parts causes parts #1, #3 and + // #4 to be split: the partition will now contain 3 new parts: part #6 (with + // parent_part = 1), part #7 (with parent_part = 3) and part #8 (with + // parent_part = 4). + // + // TODO(user): the graph symmetry finder could probably benefit a lot from + // keeping track of one additional bit of information for each part that + // remains unchanged by a Refine() operation: was that part entirely *in* + // the distinguished subset or entirely *out*? + void Refine(const std::vector& distinguished_subset); + + // Undo one or several Refine() operations, until the number of parts + // becomes equal to "original_num_parts". + // Prerequisite: NumParts() >= original_num_parts. + void UndoRefineUntilNumPartsEqual(int original_num_parts); + + void SortElementsInPart(int part); + + // Dump the partition to a std::string. There might be different conventions for + // sorting the parts and the elements inside them. + enum DebugStringSorting { + // Elements are sorted within parts, and parts are then sorted + // lexicographically. + SORT_LEXICOGRAPHICALLY, + // Elements are sorted within parts, and parts are kept in order. + SORT_BY_PART, + }; + std::string DebugString(DebugStringSorting sorting) const; + + private: + // A DynamicPartition instance maintains a list of all of its elements, + // 'sorted' by partitions: elements of the same subset are contiguous + // in that list. + std::vector element_; + + // The reverse of elements_[]: element_[index_of_[i]] = i. + std::vector index_of_; + + // part_of_[i] is the index of the part that contains element i. + std::vector part_of_; + + struct Part { + // This part holds elements[start_index .. end_index-1]. + // INVARIANT: end_index > start_index. + int start_index; // Inclusive + int end_index; // Exclusive + + // The Part that this part was split out of. See the comment at Refine(). + // INVARIANT: part[i].parent_part <= i, and the equality holds iff part[i] + // has no parent. + int parent_part; // Index into the part[] array. + }; + std::vector part_; // The disjoint parts. + + // Used temporarily and exclusively by Refine(). This prevents Refine() + // from being thread-safe. + // INVARIANT: tmp_counter_of_part_ contains only 0s before and after Refine(). + std::vector tmp_counter_of_part_; + std::vector tmp_affected_parts_; +}; + +struct DynamicPartition::IterablePart { + std::vector::const_iterator begin() const { return begin_; } + std::vector::const_iterator end() const { return end_; } + std::vector::const_iterator begin_; + std::vector::const_iterator end_; +}; + +// Partition class that supports incremental merging, using the union-find +// algorithm (see http://en.wikipedia.org/wiki/Disjoint-set_data_structure). +class MergingPartition { + public: + // At first, all nodes are in their own singleton part. + MergingPartition() { Reset(0); } + void Reset(int num_nodes); + + int NumNodes() const { return parent_.size(); } + + // Complexity: amortized O(Ackermann⁻¹(N)) -- which is essentially O(1) -- + // where N is the number of nodes. + void MergePartsOf(int node1, int node2); // The 'union' of the union-find. + + // Specialized reader API: prunes "nodes" to only keep at most one node per + // part: any node which is in the same part as an earlier node will be pruned. + void KeepOnlyOneNodePerPart(std::vector* nodes); + + // Output the whole partition as node equivalence classes: if there are K + // parts and N nodes, node_equivalence_classes[i] will contain the part index + // (a number in 0..K-1) of node #i. Parts will be sorted by their first node + // (i.e. node 0 will always be in part 0; then the next node that isn't in + // part 0 will be in part 1, and so on). + void FillEquivalenceClasses(std::vector* node_equivalence_classes); + + // Dump all components, with nodes sorted within each part and parts + // sorted lexicographically. Eg. "0 1 3 4 | 2 5 | 6 7 8". + std::string DebugString(); + + // Advanced usage: sets 'node' to be in its original singleton. All nodes + // who may point to 'node' as a parent will remain in an inconsistent state. + // This can be used to reinitialize a MergingPartition that has been sparsely + // modified in O(|modifications|). + // CRASHES IF USED INCORRECTLY. + void ResetNode(int node); + + // public for testing. + int NumNodesInSamePartAs(int node) { + return part_size_[GetRootAndCompressPath(node)]; + } + + private: + // Union-find routines: + // Find the root of the union-find tree with leaf 'node', i.e. its + // representative node. + int GetRoot(int node) const; + // Along the upwards path from 'node' to its root, set the parent of all + // nodes (including the root) to 'parent'. + void SetParentAlongPathToRoot(int node, int parent); + // Combine the two above operations (so-called 'path compression'). + int GetRootAndCompressPath(int node); + + std::vector parent_; + std::vector part_size_; + + // Used transiently by KeepOnlyOneNodePerPart(). + std::vector tmp_part_bit_; +}; + +// *** Implementation of inline methods of the above classes. *** + +inline DynamicPartition::IterablePart DynamicPartition::ElementsInPart(int i) + const { + DCHECK_GE(i, 0); + DCHECK_LT(i, NumParts()); + return {element_.begin() + part_[i].start_index, + element_.begin() + part_[i].end_index}; +} + +inline int DynamicPartition::PartOf(int element) const { + DCHECK_GE(element, 0); + DCHECK_LT(element, part_of_.size()); + return part_of_[element]; +} + +inline int DynamicPartition::SizeOfPart(int part) const { + DCHECK_GE(part, 0); + DCHECK_LT(part, part_.size()); + const Part& p = part_[part]; + return p.end_index - p.start_index; +} + +inline int DynamicPartition::ParentOfPart(int part) const { + DCHECK_GE(part, 0); + DCHECK_LT(part, part_.size()); + return part_[part].parent_part; +} + +inline int MergingPartition::GetRoot(int node) const { + DCHECK_GE(node, 0); + DCHECK_LT(node, NumNodes()); + int child = node; + while (true) { + const int parent = parent_[child]; + if (parent == child) return child; + child = parent; + } +} + +inline void MergingPartition::SetParentAlongPathToRoot(int node, int parent) { + DCHECK_GE(node, 0); + DCHECK_LT(node, NumNodes()); + DCHECK_GE(parent, 0); + DCHECK_LT(parent, NumNodes()); + int child = node; + while (true) { + const int old_parent = parent_[child]; + parent_[child] = parent; + if (old_parent == child) return; + child = old_parent; + } +} + +inline void MergingPartition::ResetNode(int node) { + DCHECK_GE(node, 0); + DCHECK_LT(node, NumNodes()); + parent_[node] = node; + part_size_[node] = 1; +} + +} // namespace operations_research + +#endif // OR_TOOLS_ALGORITHMS_DYNAMIC_PARTITION_H_ diff --git a/src/algorithms/find_graph_symmetries.cc b/src/algorithms/find_graph_symmetries.cc new file mode 100644 index 0000000000..f8a39d703b --- /dev/null +++ b/src/algorithms/find_graph_symmetries.cc @@ -0,0 +1,570 @@ +// Copyright 2010-2013 Google +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +#include "algorithms/find_graph_symmetries.h" + +#include +#include "base/join.h" +#include "algorithms/dynamic_partition.h" +#include "algorithms/sparse_permutation.h" +#include "util/iterators.h" + +namespace operations_research { + +GraphSymmetryFinder::GraphSymmetryFinder(const Graph& graph) + : graph_(graph), + tmp_node_mapping_(NumNodes(), -1), + tmp_node_mask_(NumNodes(), false), + tmp_in_degree_(NumNodes(), 0), + tmp_nodes_with_indegree_(NumNodes() + 1) { + for (int i = 0; i < NumNodes(); ++i) tmp_node_mapping_[i] = i; + tmp_partition_.Reset(NumNodes()); +} + +namespace { +// Whether the "arcs1" adjacency list maps to "arcs2" under the +// dense node mapping "node_mapping". +// This method uses a transient bitmask on all the graph nodes, which +// should be entirely false before the call (and will be restored as such +// after it). +// +// TODO(user): Make this method support multi-arcs, and see if that's +// sufficient to make the whole algorithm support multi-arcs. +template +bool AdjacencyListMapsToAdjacencyList( + const Graph& graph, const AdjList& arcs1, const AdjList& arcs2, + const std::vector& node_mapping, std::vector* tmp_node_mask) { + int num_arcs_delta = 0; + bool match = true; + for (const int arc : arcs2) { + ++num_arcs_delta; + (*tmp_node_mask)[graph.Head(arc)] = true; + } + for (const int arc : arcs1) { + --num_arcs_delta; + const int mapped_head = node_mapping[graph.Head(arc)]; + if (!(*tmp_node_mask)[mapped_head]) { + match = false; + break; + } + (*tmp_node_mask)[mapped_head] = false; + } + if (num_arcs_delta != 0) match = false; + if (!match) { + // We need to clean up tmp_node_mask. + for (const int arc : arcs2) (*tmp_node_mask)[graph.Head(arc)] = false; + } + return match; +} +} // namespace + +bool GraphSymmetryFinder::IsGraphAutomorphism(const SparsePermutation& perm) + const { + bool is_automorphism = true; + // Build the node mapping for "perm". + for (int c = 0; c < perm.NumCycles(); ++c) { + // TODO(user): use the global element->image iterator when it exists. + int prev = *(perm.Cycle(c).end() - 1); + for (const int e : perm.Cycle(c)) { + tmp_node_mapping_[prev] = e; + prev = e; + } + } + + // Compare the outgoing and incoming adjacency lists. + for (int c = 0; is_automorphism && c < perm.NumCycles(); ++c) { + // TODO(user): use the global element->image iterator when it exists. + int prev = *(perm.Cycle(c).end() - 1); + for (const int e : perm.Cycle(c)) { + is_automorphism &= AdjacencyListMapsToAdjacencyList( + graph_, graph_.OutgoingArcs(prev), graph_.OutgoingArcs(e), + tmp_node_mapping_, &tmp_node_mask_); + is_automorphism &= AdjacencyListMapsToAdjacencyList( + graph_, graph_.IncomingArcs(prev), graph_.IncomingArcs(e), + tmp_node_mapping_, &tmp_node_mask_); + prev = e; + } + } + + // Clean up tmp_node_mapping_. + for (const int e : perm.Support()) tmp_node_mapping_[e] = e; + return is_automorphism; +} + +void GraphSymmetryFinder::RecursivelyRefinePartitionByAdjacency( + int first_unrefined_part_index, DynamicPartition* partition) { + // Assuming that the partition was refined based on the adjacency on + // parts [0 .. first_unrefined_part_index) already, we simply need to + // refine parts first_unrefined_part_index ... NumParts()-1, the latter bound + // being a moving target: + // When a part #p < first_unrefined_part_index gets modified, it's always + // split in two: itself, and a new part #p'. Since #p was already refined + // on, we only need to further refine on *one* of its two split parts. + // And this will be done because p' > first_unrefined_part_index. + // + // Thus, the following loop really does the full recursive refinement as + // advertised. + for (int part_index = first_unrefined_part_index; + part_index < partition->NumParts(); // Moving target! + ++part_index) { + // Count the aggregated in-degree of all nodes, only looking at arcs that + // come from the current part. + for (const int src : partition->ElementsInPart(part_index)) { + for (const int arc : graph_.OutgoingArcs(src)) { + const int dst = graph_.Head(arc); + const int in_degree = ++tmp_in_degree_[dst]; + if (in_degree == 1) tmp_nodes_with_nonzero_indegree_.push_back(dst); + } + } + // Group the nodes by (nonzero) in-degree. Remember the maximum in-degree. + int max_in_degree = 0; + for (const int node : tmp_nodes_with_nonzero_indegree_) { + const int in_degree = tmp_in_degree_[node]; + tmp_in_degree_[node] = 0; // To clean up after us. + max_in_degree = std::max(max_in_degree, in_degree); + tmp_nodes_with_indegree_[in_degree].push_back(node); + } + tmp_nodes_with_nonzero_indegree_.clear(); // To clean up after us. + // For each in-degree, refine the partition by the set of nodes with + // that in-degree. + for (int in_degree = 1; in_degree <= max_in_degree; ++in_degree) { + partition->Refine(tmp_nodes_with_indegree_[in_degree]); + tmp_nodes_with_indegree_[in_degree].clear(); // To clean up after us. + } + } +} + +void GraphSymmetryFinder::DistinguishNodeInPartition( + int node, DynamicPartition* partition) { + partition->Refine({node}); + RecursivelyRefinePartitionByAdjacency(partition->PartOf(node), partition); +} + +namespace { +// This method's API is hard to understand out of context. See its call site. +// This creates the permutation that sends each singleton of 'base' to the +// corresponding singleton of 'image'. +void ExtractPermutationFromPartitionSingletons( + const DynamicPartition& base, const DynamicPartition& image, + SparsePermutation* permutation) { + const int num_parts = base.NumParts(); + std::vector part_was_seen(num_parts, false); + for (int p = 0; p < num_parts; ++p) { + if (part_was_seen[p]) continue; + if (base.SizeOfPart(p) != 1) continue; + DCHECK_EQ(1, image.SizeOfPart(p)); + const int start = *base.ElementsInPart(p).begin(); + int next = *image.ElementsInPart(p).begin(); + if (next == start) continue; // Matching singletons. + // We found non-matching singletons. Iterate over the orbit that they + // are part of. + permutation->AddToCurrentCycle(start); + part_was_seen[p] = true; + for (int e = next; e != start;) { + permutation->AddToCurrentCycle(e); + const int part = base.PartOf(e); + part_was_seen[part] = true; + e = *image.ElementsInPart(part).begin(); + } + permutation->CloseCurrentCycle(); + } +} + +void MergeNodeEquivalenceClassesAccordingToPermutation( + const SparsePermutation& perm, MergingPartition* node_equivalence_classes) { + for (int c = 0; c < perm.NumCycles(); ++c) { + // TODO(user): use the global element->image iterator when it exists. + int prev = -1; + for (const int e : perm.Cycle(c)) { + if (prev >= 0) node_equivalence_classes->MergePartsOf(prev, e); + prev = e; + } + } +} +} // namespace + +void GraphSymmetryFinder::FindSymmetries( + std::vector* node_equivalence_classes_io, + std::vector>* generators, + std::vector* factorized_automorphism_group_size) { + if (node_equivalence_classes_io->size() != NumNodes()) { + LOG(DFATAL) << "GraphSymmetryFinder::FindSymmetries() was given an invalid" + << " 'node_equivalence_classes_io' argument."; + return; + } + DynamicPartition base_partition(*node_equivalence_classes_io); + + // First, break all inherent asymmetries in the graph. + RecursivelyRefinePartitionByAdjacency(/*first_unrefined_part_index=*/ 0, + &base_partition); + VLOG(4) << "Base partition: " + << base_partition.DebugString(DynamicPartition::SORT_BY_PART); + + // To find all permutations of the Graph that satisfy the current partition, + // we pick an element v that is not in a singleton part, and we + // split the search in two phases: + // 1) Find (the generators of) all permutations that keep v invariant. + // 2) For each w in PartOf(v) such that w != v: + // find *one* permutation that maps v to w, if it exists. + // if it does exists, add this to the generators. + // + // The part 1) is recursive. + // + // Since we can't really use true recursion because it will be too deep for + // the stack, we implement it iteratively. To do that, we unroll 1): + // the "invariant dive" is a single pass that successively refines the node + // partition with elements from non-singleton parts (the 'invariant node'), + // until all parts are singletons. + // We remember which nodes we picked as invariants, and also the successive + // partition sizes as we refine it, to allow us to backtrack. + // Then we'll perform 2) in the reverse order, backtracking the stack from 1) + // as using another dedicated stack for the search (see below). + struct InvariantDiveState { + int invariant_node; + int num_parts_before_refinement; + }; + std::vector invariant_dive_stack; + for (int invariant_node = 0; invariant_node < NumNodes(); ++invariant_node) { + if (base_partition.SizeOfPart(base_partition.PartOf(invariant_node)) != 1) { + invariant_dive_stack.push_back( + {invariant_node, base_partition.NumParts()}); + DistinguishNodeInPartition(invariant_node, &base_partition); + VLOG(4) << "Invariant dive: invariant node = " << invariant_node + << "; partition after: " + << base_partition.DebugString(DynamicPartition::SORT_BY_PART); + } + } + // Now we've dived to the bottom: we're left with the identity permutation, + // which we don't need as a generator. We move on to phase 2). + + generators->clear(); + MergingPartition node_equivalence_classes; + node_equivalence_classes.Reset(NumNodes()); + factorized_automorphism_group_size->clear(); + std::vector> permutations_displacing_node(NumNodes()); + + while (!invariant_dive_stack.empty()) { + // Backtrack the last step of 1) (the invariant dive). + const int root_node = invariant_dive_stack.back().invariant_node; + const int base_num_parts = + invariant_dive_stack.back().num_parts_before_refinement; + invariant_dive_stack.pop_back(); + base_partition.UndoRefineUntilNumPartsEqual(base_num_parts); + VLOG(4) << "Backtracking invariant dive: root node = " << root_node + << "; partition: " + << base_partition.DebugString(DynamicPartition::SORT_BY_PART); + + // Now we'll try to map "root_node" to all image nodes that seem compatible + // and that aren't "root_node" itself. + // + // Doing so, we're able to detect potential bad (or good) matches by + // refining the 'base' partition with "root_node"; and refining the + // 'image' partition (which represents the partition of images nodes, + // i.e. the nodes after applying the currently implicit permutation) + // with that candidate image node: if the two partitions don't match, then + // the candidate image isn't compatible. + // If the partitions do match, we might either find the underlying + // permutation directly, or we might need to further try and map other + // nodes to their image: this is a recursive search with backtracking. + // + // TODO(user): This copy may be slow. Consider moving it upwards; and + // update both the image and the base partition at the same time. + DynamicPartition image_partition = base_partition; + + // The potential images of root_node are the nodes in its part. They can be + // pruned by the already computed equivalence classes. + std::vector potential_root_image_nodes; + for (const int e : + image_partition.ElementsInPart(image_partition.PartOf(root_node))) { + if (e != root_node) potential_root_image_nodes.push_back(e); + } + node_equivalence_classes.KeepOnlyOneNodePerPart( + &potential_root_image_nodes); + + // Try to map "root_node" to all of its potential images. For each image, + // we only care about finding a single compatible permutation, if it exists. + while (!potential_root_image_nodes.empty()) { + const int root_image_node = potential_root_image_nodes.back(); + + std::unique_ptr permutation = + FindOneSuitablePermutation(root_node, root_image_node, + &base_partition, &image_partition, + *generators, permutations_displacing_node); + + if (permutation.get() != nullptr) { + // We found a permutation. We store it in the list of generators, and + // further prune out the remaining 'root' image candidates, taking into + // account the permutation we just found. + MergeNodeEquivalenceClassesAccordingToPermutation( + *permutation, &node_equivalence_classes); + // HACK(user): to make sure that we keep root_image_node as the + // representant of its part, we temporarily move it to the front + // of the vector. + std::swap(potential_root_image_nodes[0], + potential_root_image_nodes.back()); + node_equivalence_classes.KeepOnlyOneNodePerPart( + &potential_root_image_nodes); + std::swap(potential_root_image_nodes[0], + potential_root_image_nodes.back()); + + // Register it onto the permutations_displacing_node vector. + const int permutation_index = static_cast(generators->size()); + for (const int node : permutation->Support()) { + permutations_displacing_node[node].push_back(permutation_index); + } + + // Move the permutation to the generator list (this also transfers + // ownership). + generators->push_back(std::move(permutation)); + } + + potential_root_image_nodes.pop_back(); + } + + // We keep track of the size of the orbit of 'root_node' under the + // current subgroup: this is one of the factors of the total group size. + // TODO(user): better, more complete explanation. + factorized_automorphism_group_size->push_back( + node_equivalence_classes.NumNodesInSamePartAs(root_node)); + } + node_equivalence_classes.FillEquivalenceClasses(node_equivalence_classes_io); +} + +std::unique_ptr +GraphSymmetryFinder::FindOneSuitablePermutation( + int root_node, int root_image_node, DynamicPartition* base_partition, + DynamicPartition* image_partition, + const std::vector>& generators_found_so_far, + const std::vector>& permutations_displacing_node) { + // TODO(user): break down this method into smaller pieces, if there is a good + // way. + std::unique_ptr permutation; + + const int base_num_parts = base_partition->NumParts(); + DCHECK_EQ(base_partition->DebugString(DynamicPartition::SORT_BY_PART), + image_partition->DebugString(DynamicPartition::SORT_BY_PART)); + DistinguishNodeInPartition(root_node, base_partition); + + DCHECK(search_states_.empty()); + search_states_.push_back( + {/*base_node=*/root_node, + /*potential_image_nodes=*/{root_image_node}, + /*num_parts_before_trying_to_map_base_node=*/base_num_parts, + /*potential_image_nodes_were_pruned=*/true}); + while (!search_states_.empty()) { + SearchState* const ss = &search_states_.back(); + // At this stage, we're supposed to have: + // - A base_partition that has already been refined on ss->base_node + // - An image partition that hasn't been refined. + const int unrefined_num_parts = image_partition->NumParts(); + if (ss->potential_image_nodes.empty()) { + VLOG(4) << " Backtracking one level up: base_node " << ss->base_node + << " has no suitable image left to consider."; + base_partition->UndoRefineUntilNumPartsEqual(unrefined_num_parts); + image_partition->UndoRefineUntilNumPartsEqual( + ss->num_parts_before_trying_to_map_base_node); + search_states_.pop_back(); + continue; + } + + // Pop the currently chosen image node. + const int image_node = ss->potential_image_nodes.back(); + ss->potential_image_nodes.pop_back(); + + // Try to see if the base permutation and the image permutation would + // agree. + // TODO(user, fdid): Optimize this: For example, check for + // incompatibilities between the two partition as we (recursively) + // refine them, to catch mismatches earlier. + VLOG(4) << " Distinguishing image node " << image_node; + DistinguishNodeInPartition(image_node, image_partition); + VLOG(4) << " Image partition (refined) : " + << image_partition->DebugString(DynamicPartition::SORT_BY_PART); + + // Verify the compatibility of the base and image partitions. Since they + // were compatible before the Refinement, it suffices to search among + // the newly created parts. + bool compatible = base_partition->NumParts() == image_partition->NumParts(); + for (int p = unrefined_num_parts; + compatible && p < base_partition->NumParts(); ++p) { + compatible &= + base_partition->ParentOfPart(p) == image_partition->ParentOfPart(p) && + base_partition->SizeOfPart(p) == image_partition->SizeOfPart(p); + } + int part_to_map = -1; + if (!compatible) { + VLOG(4) << " Incompatible partitions!"; + } else { + VLOG(4) << " Partitions look compatible."; + // Look for non-trivially matching parts, i.e. non-singleton parts that + // don't have the same elements. + // TODO(user, fdid): optimize this. Some ideas: + // - Remember singletons to skip them quickly. + // - Maybe there's a way to avoid having to re-sort the elements in each + // part, or to do some of that sorting in the DynamicPartition code. + int non_singleton_part = -1; + for (int p = 0; p < base_partition->NumParts(); ++p) { + if (base_partition->SizeOfPart(p) == 1) continue; // Singletons. + if (non_singleton_part < 0) non_singleton_part = p; + base_partition->SortElementsInPart(p); + image_partition->SortElementsInPart(p); + const DynamicPartition::IterablePart base_part = + base_partition->ElementsInPart(p); + const DynamicPartition::IterablePart image_part = + image_partition->ElementsInPart(p); + if (!std::equal(base_part.begin(), base_part.end(), + image_part.begin())) { + part_to_map = p; + break; + } + } + + if (part_to_map == -1) { + // All the parts are matching! We have our permutation candidate: + // it sends singletons of base_permutation onto the corresponding + // ones of image_permutation, and is the identity on all other + // elements. + permutation.reset(new SparsePermutation(NumNodes())); + ExtractPermutationFromPartitionSingletons( + *base_partition, *image_partition, permutation.get()); + VLOG(4) << " Full partition match! Permutation candidate: " + << permutation->DebugString(); + + if (IsGraphAutomorphism(*permutation)) { + VLOG(4) << " Valid automorphism found!"; + break; + } + VLOG(4) << " That permutation is NOT a valid automorphism."; + permutation.reset(nullptr); + if (non_singleton_part == -1) { + VLOG(4) << " The permutation was fully specified; backtracking"; + } else { + VLOG(4) << " Deepening the search..."; + part_to_map = non_singleton_part; + } + } + } + if (part_to_map == -1) { + VLOG(4) << " Apply the backtracking: choosing a different image node."; + image_partition->UndoRefineUntilNumPartsEqual(unrefined_num_parts); + // TODO(user): apply a smarter test to decide whether to do the pruning + // or not: we can accurately estimate the cost of pruning (iterate through + // all generators found so far) and its estimated benefit (the cost of + // the search below the state that we're currently in, times the expected + // number of pruned nodes). Sometimes it may be better to skip the + // pruning. + if (!ss->potential_image_nodes_were_pruned && + ss->potential_image_nodes.size() > 0) { + // Prune the remaining potential image nodes: there is no permutation + // that maps base_node -> image_node that is compatible with the current + // partitions, so there can't be a permutation that maps base_node -> X + // either, for all X in the orbit of 'image_node' under valid + // permutations compatible with the current partitions. + FindPermutationsCompatibleWithPartition( + permutations_displacing_node[image_node], generators_found_so_far, + image_partition, &tmp_compatible_permutations_); + for (const SparsePermutation* perm : tmp_compatible_permutations_) { + // TODO(user): ignore cycles that are outside of image_part. + MergeNodeEquivalenceClassesAccordingToPermutation(*perm, + &tmp_partition_); + } + // HACK(user): temporarily re-inject image_node in the front of + // 'potential_image_nodes' so that it's the only one we keep in its + // class. + ss->potential_image_nodes.push_back(image_node); + std::swap(ss->potential_image_nodes.back(), + ss->potential_image_nodes[0]); + tmp_partition_.KeepOnlyOneNodePerPart(&ss->potential_image_nodes); + std::swap(ss->potential_image_nodes.back(), + ss->potential_image_nodes[0]); + ss->potential_image_nodes.pop_back(); + ss->potential_image_nodes_were_pruned = true; + + // Reset "tmp_partition_" sparsely. + for (const SparsePermutation* perm : tmp_compatible_permutations_) { + for (const int node : perm->Support()) tmp_partition_.ResetNode(node); + } + } + // Actually backtrack now. + continue; + } + + // There are some non-trivially matching parts, so we have to keep + // searching deeper, by looking into that part. + const DynamicPartition::IterablePart base_part = + base_partition->ElementsInPart(part_to_map); + const DynamicPartition::IterablePart image_part = + image_partition->ElementsInPart(part_to_map); + // TODO(user, fdid): try some heuristics to optimize the choice of the + // base node. For example, select a base node that maps to itself. + const int next_base_node = *base_part.begin(); + search_states_.push_back( + {/*base_node*/ next_base_node, + /*potential_image_nodes*/ {image_part.begin(), image_part.end()}, + /*num_parts_before_trying_to_map_base_node*/ unrefined_num_parts}); + DistinguishNodeInPartition(next_base_node, base_partition); + VLOG(4) << " Distinguishing new base node " << next_base_node; + } + + // Whether we found a permutation or not, we must reset the partitions to + // their original state before returning, and clear the search. + base_partition->UndoRefineUntilNumPartsEqual(base_num_parts); + image_partition->UndoRefineUntilNumPartsEqual(base_num_parts); + search_states_.clear(); + + return permutation; // Will return nullptr if uninitialized. +} + +void GraphSymmetryFinder::FindPermutationsCompatibleWithPartition( + const std::vector& permutation_indices, + const std::vector>& permutation_vector, + DynamicPartition* partition, + std::vector* compatible_permutations) { + compatible_permutations->clear(); + // TODO(user): investigate further optimizations: maybe it's possible + // to incrementally maintain the set of permutations that is compatible + // with the current partition, instead of recomputing it here? + for (const int p : permutation_indices) { + const SparsePermutation& permutation = *permutation_vector[p]; + // First, a quick compatibility check: the permutation's cycles must be + // smaller or equal to the size of the part that they are included in. + bool compatible = true; + for (int c = 0; c < permutation.NumCycles(); ++c) { + const SparsePermutation::Iterator cycle = permutation.Cycle(c); + if (cycle.size() > + partition->SizeOfPart(partition->PartOf(*cycle.begin()))) { + compatible = false; + break; + } + } + if (!compatible) continue; + // Now the full compatibility check: each cycle of the permutation must + // be fully included in an image part. + for (int c = 0; c < permutation.NumCycles(); ++c) { + int part = -1; + for (const int node : permutation.Cycle(c)) { + if (partition->PartOf(node) != part) { + if (part >= 0) { + compatible = false; + break; + } + part = partition->PartOf(node); // Initilization of 'part'. + } + } + } + if (!compatible) continue; + // The permutation is fully compatible! + compatible_permutations->push_back(&permutation); + } +} + +} // namespace operations_research diff --git a/src/algorithms/find_graph_symmetries.h b/src/algorithms/find_graph_symmetries.h new file mode 100644 index 0000000000..e6e578b486 --- /dev/null +++ b/src/algorithms/find_graph_symmetries.h @@ -0,0 +1,156 @@ +// Copyright 2010-2013 Google +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// TODO(user, fdid): Refine this toplevel comment when this file settles. + +#ifndef OR_TOOLS_ALGORITHMS_FIND_GRAPH_SYMMETRIES_H_ +#define OR_TOOLS_ALGORITHMS_FIND_GRAPH_SYMMETRIES_H_ + +#include "base/unique_ptr.h" +#include + +#include "algorithms/dynamic_partition.h" +#include "graph/graph.h" + +namespace operations_research { + +class DynamicPartition; +class SparsePermutation; + +class GraphSymmetryFinder { + public: + typedef ReverseArcMixedGraph<> Graph; + // "graph" must not have multi-arcs. + // TODO(user): support multi-arcs. + explicit GraphSymmetryFinder(const Graph& graph); + + // Whether the given permutation is an automorphism of the graph given at + // construction. This costs O(sum(degree(x))) (the sum is over all nodes x + // that are displaced by the permutation). + bool IsGraphAutomorphism(const SparsePermutation& perm) const; + + // Find a set of generators of the automorphism subgroup of the graph that + // respects the given node equivalence classes. The generators are themselves + // permutations of the nodes: see http://en.wikipedia.org/wiki/Automorphism; + // and these permutations may only map a node onto a node of its equivalence + // class: two nodes i and j are in the same equivalence class iff + // node_equivalence_classes_io[i] == node_equivalence_classes_io[j]; + // + // This set of generators is not necessarily the smallest possible (nor in + // the number of generators, nor in the size of these generators), but it is + // minimal in that no generator can be removed while keeping the generated + // group intact. + // TODO(user): properly verify the minimality in unit tests. + // + // Note that if "generators" is empty, then the graph has no symmetry: the + // only automorphism is the identity. + // + // The equivalence classes are actually an input/output: they are refined + // according to all asymmetries found. In the end, n1 and n2 will be + // considered equivalent (i.e. node_equivalence_classes_io[n1] == + // node_equivalence_classes_io[n2]) if and only if there exists a + // permutation of nodes that: + // - keeps the graph invariant + // - maps n1 onto n2 + // - maps each node to a node of its original equivalence class. + // + // This method also outputs the size of the automorphism group, expressed as + // a factorized product of integers (note that the size itself may be as + // large as N!). + // + // This method is largely based on the following article, published in 2008: + // "Faster Symmetry Discovery using Sparsity of Symmetries" by Darga, Sakallah + // and Markov. http://web.eecs.umich.edu/~imarkov/pubs/conf/dac08-sym.pdf. + void FindSymmetries(std::vector* node_equivalence_classes_io, + std::vector>* generators, + std::vector* factorized_automorphism_group_size); + + // **** Methods below are public FOR TESTING ONLY. **** + + // Fully refine the partition of nodes, using the graph as symmetry breaker. + // This means applying the following steps on each part P of the partition: + // - Compute the aggregated in-degree of all nodes of the graph, only looking + // at arcs originating from nodes in P. + // - For each in-degree d=1...max_in_degree, refine the partition by the set + // of nodes with in-degree d. + // And recursively applying it on all new or modified parts. + // + // In our use cases, we may call this in a scenario where the partition was + // already partially refined on all parts #0...#K, then you should set + // "first_unrefined_part_index" to K+1. + void RecursivelyRefinePartitionByAdjacency( + int first_unrefined_part_index, DynamicPartition* partition); + + // Special wrapper of the above method: assuming that partition is already + // fully refined, further refine it by {node}, and propagate by adjacency. + void DistinguishNodeInPartition(int node, DynamicPartition* partition); + + private: + const Graph& graph_; + + inline int NumNodes() const { return graph_.num_nodes(); } + + // Internal search code used in FindSymmetries(), split out for readability: + // find one permutation (if it exists) that maps root_node to root_image_node + // and such that the image of "base_partition" by that permutation is equal to + // the "image_partition". If no such permutation exists, returns nullptr. + // + // "generators_found_so_far" and "permutations_displacing_node" are used for + // pruning in the search. The former is just the "generators" vector of + // FindGraphSymmetries(), with the permutations found so far; and the latter + // is an inverted index from each node to all permutations (that we found) + // that displace it. + std::unique_ptr FindOneSuitablePermutation( + int root_node, int root_image_node, DynamicPartition* base_partition, + DynamicPartition* image_partition, + const std::vector>& generators_found_so_far, + const std::vector>& permutations_displacing_node); + + // Among a list of permutations (the subset of 'permutation_vector' whose + // indices are "permutation_indices"), find those that are compatible with + // the given partition, i.e. whose orbits are all included in a partition's + // part (distinct orbits may be included in distinct parts). + // This is used uniquely within FindOneSuitablePermutation(). + void FindPermutationsCompatibleWithPartition( + const std::vector& permutation_indices, + const std::vector>& permutation_vector, + DynamicPartition* partition, + std::vector* compatible_permutations); + + // Temporary vector used by IsGraphAutomorphism(). When not in use, the + // mapping should be tmp_node_mapping_[i] = i for all i. + mutable std::vector tmp_node_mapping_; + mutable std::vector tmp_node_mask_; + + // Temporary vectors transiently used by RefinePartitionByAdjacencyFrom(), + // and cleaned up after each call. + std::vector tmp_in_degree_; + std::vector tmp_nodes_with_nonzero_indegree_; + std::vector> tmp_nodes_with_indegree_; + + // Temporary data used within FindOneSuitablePermutation(). + MergingPartition tmp_partition_; + std::vector tmp_compatible_permutations_; + + // Data structure used by FindOneSuitablePermutation(). See the .cc + struct SearchState { + int base_node; + std::vector potential_image_nodes; + int num_parts_before_trying_to_map_base_node; + bool potential_image_nodes_were_pruned; + }; + std::vector search_states_; +}; + +} // namespace operations_research + +#endif // OR_TOOLS_ALGORITHMS_FIND_GRAPH_SYMMETRIES_H_ diff --git a/src/algorithms/knapsack_solver.h b/src/algorithms/knapsack_solver.h index f92c62f1fb..9fbf615922 100644 --- a/src/algorithms/knapsack_solver.h +++ b/src/algorithms/knapsack_solver.h @@ -65,7 +65,6 @@ #include "base/integral_types.h" #include "base/logging.h" #include "base/macros.h" -#include "base/scoped_ptr.h" namespace operations_research { diff --git a/src/algorithms/sparse_permutation.cc b/src/algorithms/sparse_permutation.cc new file mode 100644 index 0000000000..b6ae7863e3 --- /dev/null +++ b/src/algorithms/sparse_permutation.cc @@ -0,0 +1,54 @@ +// Copyright 2010-2013 Google +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +#include "algorithms/sparse_permutation.h" + +#include +#include "base/logging.h" +#include "base/join.h" + +namespace operations_research { + +void SparsePermutation::RemoveCycles(const std::vector& cycle_indices) { + // TODO(user): make this a class member to avoid allocation if the complexity + // becomes an issue. In this case, also optimize the loop below by not copying + // the first cycles. + std::vector should_be_deleted(NumCycles(), false); + for (int i : cycle_indices) { + DCHECK_GE(i, 0); + DCHECK_LT(i, NumCycles()); + DCHECK(!should_be_deleted[i]) + << "Duplicate index given to RemoveCycles(): " << i; + should_be_deleted[i] = true; + } + int new_cycles_size = 0; // new index in cycles_ + int new_cycle_ends_size = 0; // new index in cycle_ends_ + int start = 0; + for (int i = 0; i < NumCycles(); ++i) { + const int end = cycle_ends_[i]; + if (!should_be_deleted[i]) { + for (int j = start; j < end; ++j) { + cycles_[new_cycles_size++] = cycles_[j]; + } + cycle_ends_[new_cycle_ends_size++] = new_cycles_size; + } + start = end; + } + cycles_.resize(new_cycles_size); + cycle_ends_.resize(new_cycle_ends_size); +} + +std::string SparsePermutation::DebugString() const { + return ""; +} + +} // namespace operations_research diff --git a/src/algorithms/sparse_permutation.h b/src/algorithms/sparse_permutation.h new file mode 100644 index 0000000000..661ac8ca2e --- /dev/null +++ b/src/algorithms/sparse_permutation.h @@ -0,0 +1,125 @@ +// Copyright 2010-2013 Google +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +#ifndef OR_TOOLS_ALGORITHMS_SPARSE_PERMUTATION_H_ +#define OR_TOOLS_ALGORITHMS_SPARSE_PERMUTATION_H_ + +#include +#include + +#include "base/logging.h" + +namespace operations_research { + +// A compact representation for permutations of {0..N-1} that displace few +// elements: it needs O(K) memory for a permutation that displace K elements. +class SparsePermutation { + public: + explicit SparsePermutation(int size) : size_(size) {} // Identity. + + // TODO(user, fdid): complete the reader API. + int Size() const { return size_; } + int NumCycles() const { return cycle_ends_.size(); } + + // Returns the "support" of this permutation, that is the set of elements + // displaced by it. + const std::vector& Support() const { return cycles_; } + + // The permutation has NumCycles() cycles numbered 0 .. NumCycles()-1. + // To iterate over cycle #i of the permutation, do this: + // for (const int e : permutation.Cycle(i)) { ... + struct Iterator; + Iterator Cycle(int i) const; + + // This is useful to iterate over the pair {element, image} of a permutation: + // + // for (int c = 0; c < perm.NumCycles(); ++c) { + // int element = LastElementInCycle(c); + // for (int image : perm.Cycle(c)) { + // // The pair is (element, image). + // ... + // element = image; + // } + // } + // + // TODO(user): Provide a full iterator for this? note that we have more + // information with the loop above. Not sure it is needed though. + int LastElementInCycle(int i) const; + + // To add a cycle to the permutation, repeatedly call AddToCurrentCycle() + // with the cycle's orbit, then call CloseCurrentCycle(); + // This shouldn't be called on trivial cycles (of length 1). + void AddToCurrentCycle(int x); + void CloseCurrentCycle(); + + // Removes the cycles with given indices from the permutation. This + // works in O(K) for a permutation displacing K elements. + void RemoveCycles(const std::vector& cycle_indices); + + // Output all non-identity cycles of the permutation, sorted + // lexicographically (each cycle is described starting by its smallest + // element; and all cycles are sorted lexicographically against each other). + // This isn't efficient; use for debugging only. + // Example: "(1 4 3) (5 9) (6 8 7)". + std::string DebugString() const; + + private: + const int size_; + std::vector cycles_; + std::vector cycle_ends_; +}; + +inline void SparsePermutation::AddToCurrentCycle(int x) { + DCHECK_GE(x, 0); + DCHECK_LT(x, size_); + cycles_.push_back(x); +} + +inline void SparsePermutation::CloseCurrentCycle() { + if (cycle_ends_.empty()) { + DCHECK_GE(cycles_.size(), 2); + } else { + DCHECK_GE(cycles_.size(), cycle_ends_.back() + 2); + } + cycle_ends_.push_back(cycles_.size()); +} + +struct SparsePermutation::Iterator { + // These typedefs allow this iterator to be used within testing::ElementsAre. + typedef int value_type; + typedef std::vector::const_iterator const_iterator; + + std::vector::const_iterator begin() const { return begin_; } + std::vector::const_iterator end() const { return end_; } + const std::vector::const_iterator begin_; + const std::vector::const_iterator end_; + + int size() const { return end_ - begin_; } +}; + +inline SparsePermutation::Iterator SparsePermutation::Cycle(int i) const { + DCHECK_GE(i, 0); + DCHECK_LT(i, NumCycles()); + return {cycles_.begin() + (i == 0 ? 0 : cycle_ends_[i - 1]), + cycles_.begin() + cycle_ends_[i]}; +} + +inline int SparsePermutation::LastElementInCycle(int i) const { + DCHECK_GE(i, 0); + DCHECK_LT(i, cycle_ends_.size()); + DCHECK_GT(cycle_ends_[i], i == 0 ? 0 : cycle_ends_[i - 1]); + return cycles_[cycle_ends_[i] - 1]; +} + +} // namespace operations_research + +#endif // OR_TOOLS_ALGORITHMS_SPARSE_PERMUTATION_H_ diff --git a/src/base/random.cc b/src/base/random.cc index 45adfd703c..641b560785 100644 --- a/src/base/random.cc +++ b/src/base/random.cc @@ -71,13 +71,13 @@ static inline uint32 Word32At(const char* ptr) { int32 ACMRandom::HostnamePidTimeSeed() { char name[PATH_MAX + 20]; // need 12 bytes for 3 'empty' uint32's - assert(sizeof(name) - PATH_MAX > sizeof(uint32) * 3); // NOLINT + assert(sizeof(name) - PATH_MAX > sizeof(uint32) * 3); if (gethostname(name, PATH_MAX) != 0) { strcpy(name, "default-hostname"); // NOLINT } const int namelen = strlen(name); - for (int i = 0; i < sizeof(uint32) * 3; ++i) { // NOLINT + for (int i = 0; i < sizeof(uint32) * 3; ++i) { name[namelen + i] = '\0'; // so we mix 0's once we get to end-of-std::string } #if defined(__GNUC__) @@ -92,10 +92,10 @@ int32 ACMRandom::HostnamePidTimeSeed() { return 0; #endif uint32 c = 0; - for (int i = 0; i < namelen; i += sizeof(uint32) * 3) { // NOLINT + for (int i = 0; i < namelen; i += sizeof(uint32) * 3) { a += Word32At(name + i); - b += Word32At(name + i + sizeof(uint32)); // NOLINT - c += Word32At(name + i + sizeof(uint32) + sizeof(uint32)); // NOLINT + b += Word32At(name + i + sizeof(uint32)); + c += Word32At(name + i + sizeof(uint32) + sizeof(uint32)); mix(a, b, c); } c += namelen; // one final mix diff --git a/src/constraint_solver/alldiff_cst.cc b/src/constraint_solver/alldiff_cst.cc index ffbcee9158..687e7d011c 100644 --- a/src/constraint_solver/alldiff_cst.cc +++ b/src/constraint_solver/alldiff_cst.cc @@ -20,7 +20,6 @@ #include "base/integral_types.h" #include "base/logging.h" -#include "base/scoped_ptr.h" #include "constraint_solver/constraint_solver.h" #include "constraint_solver/constraint_solveri.h" #include "util/string_array.h" diff --git a/src/constraint_solver/assignment.cc b/src/constraint_solver/assignment.cc index 7087b8082e..0b75207179 100644 --- a/src/constraint_solver/assignment.cc +++ b/src/constraint_solver/assignment.cc @@ -522,7 +522,7 @@ template void RealLoad(const AssignmentProto& assignment_proto, Container* const container, int (AssignmentProto::*GetSize)() const, - const Proto& (AssignmentProto::*GetElem)(int) const) { // NOLINT + const Proto& (AssignmentProto::*GetElem)(int) const) { bool fast_load = (container->Size() == (assignment_proto.*GetSize)()); for (int i = 0; fast_load && i < (assignment_proto.*GetSize)(); ++i) { Element* const element = container->MutableElement(i); diff --git a/src/constraint_solver/collect_variables.cc b/src/constraint_solver/collect_variables.cc index 2093378dea..8c000493fb 100644 --- a/src/constraint_solver/collect_variables.cc +++ b/src/constraint_solver/collect_variables.cc @@ -19,7 +19,6 @@ #include "base/integral_types.h" #include "base/logging.h" #include "base/macros.h" -#include "base/scoped_ptr.h" #include "base/stl_util.h" #include "constraint_solver/constraint_solver.h" #include "constraint_solver/constraint_solveri.h" diff --git a/src/constraint_solver/constraint_solver.cc b/src/constraint_solver/constraint_solver.cc index 8c566b73df..bd2211a1ea 100644 --- a/src/constraint_solver/constraint_solver.cc +++ b/src/constraint_solver/constraint_solver.cc @@ -26,7 +26,6 @@ #include "base/integral_types.h" #include "base/logging.h" #include "base/macros.h" -#include "base/scoped_ptr.h" #include "base/stringprintf.h" #include "base/file.h" #include "base/recordio.h" @@ -2520,13 +2519,13 @@ bool Solver::HasName(const PropagationBaseObject* const object) const { // ------------------ Useful Operators ------------------ -std::ostream& operator<<(std::ostream& out, const Solver* const s) { // NOLINT +std::ostream& operator<<(std::ostream& out, const Solver* const s) { out << s->DebugString(); return out; } std::ostream& operator<<(std::ostream& out, - const BaseObject* const o) { // NOLINT + const BaseObject* const o) { out << o->DebugString(); return out; } diff --git a/src/constraint_solver/constraint_solver.h b/src/constraint_solver/constraint_solver.h index 411f26d5a1..8e93575722 100644 --- a/src/constraint_solver/constraint_solver.h +++ b/src/constraint_solver/constraint_solver.h @@ -1651,6 +1651,13 @@ class Solver { const std::vector& active, const std::vector& cumuls, const std::vector& transits); + // Delayed version of the same constraint: propagation on the nexts variables + // is delayed until all constraints have propagated. + // TODO(user): Merge with other path-cumuls constraints. + Constraint* MakeDelayedPathCumul(const std::vector& nexts, + const std::vector& active, + const std::vector& cumuls, + const std::vector& transits); // Creates a constraint which accumulates values along a path such that: // cumuls[next[i]] = cumuls[i] + transit_evaluator(i, next[i]). // Active variables indicate if the corresponding next variable is active; diff --git a/src/constraint_solver/constraint_solveri.h b/src/constraint_solver/constraint_solveri.h index d26a0b29d3..824759433e 100644 --- a/src/constraint_solver/constraint_solveri.h +++ b/src/constraint_solver/constraint_solveri.h @@ -62,7 +62,6 @@ #include "base/commandlineflags.h" #include "base/integral_types.h" #include "base/logging.h" -#include "base/scoped_ptr.h" #include "base/sysinfo.h" #include "base/timer.h" #include "base/join.h" diff --git a/src/constraint_solver/count_cst.cc b/src/constraint_solver/count_cst.cc index 73bccad6cf..403bb4c60d 100644 --- a/src/constraint_solver/count_cst.cc +++ b/src/constraint_solver/count_cst.cc @@ -19,7 +19,6 @@ #include "base/integral_types.h" #include "base/logging.h" -#include "base/scoped_ptr.h" #include "base/stringprintf.h" #include "base/concise_iterator.h" #include "constraint_solver/constraint_solver.h" diff --git a/src/constraint_solver/default_search.cc b/src/constraint_solver/default_search.cc index 8e37ff1c50..b75e81b08b 100644 --- a/src/constraint_solver/default_search.cc +++ b/src/constraint_solver/default_search.cc @@ -23,7 +23,6 @@ #include "base/integral_types.h" #include "base/logging.h" #include "base/macros.h" -#include "base/scoped_ptr.h" #include "base/stl_util.h" #include "constraint_solver/constraint_solver.h" diff --git a/src/constraint_solver/deviation.cc b/src/constraint_solver/deviation.cc index ab838cc395..8b43b37d44 100644 --- a/src/constraint_solver/deviation.cc +++ b/src/constraint_solver/deviation.cc @@ -19,7 +19,6 @@ #include "base/integral_types.h" #include "base/logging.h" -#include "base/scoped_ptr.h" #include "base/stringprintf.h" #include "base/mathutil.h" #include "constraint_solver/constraint_solver.h" @@ -95,7 +94,8 @@ class Deviation : public Constraint { RepairGreedySum(BuildGreedySum(true)); int64 minimal_deviation = 0; for (int i = 0; i < size_; ++i) { - minimal_deviation += abs(scaled_vars_assigned_value_[i] - total_sum_); + minimal_deviation += + std::abs(scaled_vars_assigned_value_[i] - total_sum_); } return minimal_deviation; } diff --git a/src/constraint_solver/diffn.cc b/src/constraint_solver/diffn.cc index 16376aa943..5722fbbfde 100644 --- a/src/constraint_solver/diffn.cc +++ b/src/constraint_solver/diffn.cc @@ -16,7 +16,6 @@ #include "base/integral_types.h" #include "base/logging.h" -#include "base/scoped_ptr.h" #include "base/stringprintf.h" #include "base/concise_iterator.h" #include "base/int_type.h" diff --git a/src/constraint_solver/element.cc b/src/constraint_solver/element.cc index bb434616c1..90fe5ab3b2 100644 --- a/src/constraint_solver/element.cc +++ b/src/constraint_solver/element.cc @@ -19,7 +19,6 @@ #include "base/callback.h" #include "base/integral_types.h" #include "base/logging.h" -#include "base/scoped_ptr.h" #include "base/stringprintf.h" #include "constraint_solver/constraint_solver.h" #include "constraint_solver/constraint_solveri.h" diff --git a/src/constraint_solver/expr_array.cc b/src/constraint_solver/expr_array.cc index d83ca237bb..6fbe8d02c8 100644 --- a/src/constraint_solver/expr_array.cc +++ b/src/constraint_solver/expr_array.cc @@ -20,7 +20,6 @@ #include "base/integral_types.h" #include "base/logging.h" -#include "base/scoped_ptr.h" #include "base/stringprintf.h" #include "base/mathutil.h" #include "constraint_solver/constraint_solver.h" diff --git a/src/constraint_solver/expr_cst.cc b/src/constraint_solver/expr_cst.cc index 7c1eae5ece..4896832921 100644 --- a/src/constraint_solver/expr_cst.cc +++ b/src/constraint_solver/expr_cst.cc @@ -21,7 +21,6 @@ #include "base/commandlineflags.h" #include "base/integral_types.h" #include "base/logging.h" -#include "base/scoped_ptr.h" #include "base/stringprintf.h" #include "constraint_solver/constraint_solver.h" #include "constraint_solver/constraint_solveri.h" diff --git a/src/constraint_solver/expressions.cc b/src/constraint_solver/expressions.cc index d4b3b55842..44b39b09d3 100644 --- a/src/constraint_solver/expressions.cc +++ b/src/constraint_solver/expressions.cc @@ -22,7 +22,6 @@ #include "base/commandlineflags.h" #include "base/integral_types.h" #include "base/logging.h" -#include "base/scoped_ptr.h" #include "base/stringprintf.h" #include "base/map_util.h" #include "constraint_solver/constraint_solver.h" @@ -6649,7 +6648,7 @@ void IntVar::SetValues(const std::vector& values) { // STLSortAndRemoveDuplicates from base/stl_util.h to the // existing open_source/base/stl_util.h and using it here. // TODO(user): We could filter out values not in the var. - std::vector& tmp = solver()->tmp_vector_; // NOLINT + std::vector& tmp = solver()->tmp_vector_; tmp.clear(); tmp.insert(tmp.end(), values.begin(), values.end()); std::sort(tmp.begin(), tmp.end()); @@ -6726,7 +6725,7 @@ bool Solver::IsADifference(IntExpr* expr, IntExpr** const left, } // This is a dynamic cast to check the type of expr. // It returns nullptr is expr is not a subclass of SubIntExpr. - SubIntExpr* const sub_expr = dynamic_cast(expr); // NOLINT + SubIntExpr* const sub_expr = dynamic_cast(expr); if (sub_expr != nullptr) { *left = sub_expr->left(); *right = sub_expr->right(); diff --git a/src/constraint_solver/gcc.cc b/src/constraint_solver/gcc.cc index a3cdc2ed01..49ad33fcbb 100644 --- a/src/constraint_solver/gcc.cc +++ b/src/constraint_solver/gcc.cc @@ -23,7 +23,6 @@ #include "base/integral_types.h" #include "base/logging.h" #include "base/macros.h" -#include "base/scoped_ptr.h" #include "base/int_type_indexed_vector.h" #include "base/int_type.h" #include "base/map_util.h" diff --git a/src/constraint_solver/graph_constraints.cc b/src/constraint_solver/graph_constraints.cc index a09f6375ae..d84213ca73 100644 --- a/src/constraint_solver/graph_constraints.cc +++ b/src/constraint_solver/graph_constraints.cc @@ -850,6 +850,287 @@ bool PathCumul::AcceptLink(int i, int j) const { CapSub(cumul_j->Min(), cumul_i->Max()) <= transit_i->Max(); } +namespace { +template class StampedVector { + public: + StampedVector() : stamp_(0) {} + const std::vector& Values(Solver* solver) { + CheckStamp(solver); + return values_; + } + void PushBack(Solver* solver, const T& value) { + CheckStamp(solver); + values_.push_back(value); + } + void Clear(Solver* solver) { + values_.clear(); + stamp_ = solver->fail_stamp(); + } + + private: + void CheckStamp(Solver* solver) { + if (solver->fail_stamp() > stamp_) { + Clear(solver); + } + } + + std::vector values_; + uint64 stamp_; +}; +} // namespace + +class DelayedPathCumul : public Constraint { + public: + DelayedPathCumul(Solver* const solver, + const std::vector& nexts, + const std::vector& active, + const std::vector& cumuls, + const std::vector& transits) + : Constraint(solver), + nexts_(nexts), + active_(active), + cumuls_(cumuls), + transits_(transits), + cumul_transit_demons_(cumuls.size(), nullptr), + path_demon_(nullptr), + touched_(), + chain_starts_(cumuls.size(), -1), + chain_ends_(cumuls.size(), -1), + is_chain_start_(cumuls.size(), false), + prevs_(cumuls.size(), -1), + supports_(nexts.size()), + was_bound_(nexts.size(), false), + has_cumul_demon_(cumuls.size(), false) { + for (int64 i = 0; i < cumuls_.size(); ++i) { + cumul_transit_demons_[i] = MakeDelayedConstraintDemon1( + solver, this, &DelayedPathCumul::CumulRange, "CumulRange", i); + chain_starts_[i] = i; + chain_ends_[i] = i; + } + path_demon_ = solver->RegisterDemon(MakeDelayedConstraintDemon0( + solver, this, &DelayedPathCumul::PropagatePaths, "PropagatePaths")); + for (int i = 0; i < nexts_.size(); ++i) { + supports_[i] = -1; + } + } + virtual ~DelayedPathCumul() {} + virtual void Post() { + for (int i = 0; i < nexts_.size(); ++i) { + if (!nexts_[i]->Bound()) { + Demon* const demon = + MakeConstraintDemon1(solver(), this, &DelayedPathCumul::NextBound, + "NextBound", i); + nexts_[i]->WhenBound(demon); + } + } + for (int i = 0; i < active_.size(); ++i) { + if (!active_[i]->Bound()) { + Demon* const demon = + MakeConstraintDemon1(solver(), this, &DelayedPathCumul::ActiveBound, + "ActiveBound", i); + active_[i]->WhenBound(demon); + } + } + } + virtual void InitialPropagate() { + touched_.Clear(solver()); + for (int i = 0; i < nexts_.size(); ++i) { + if (nexts_[i]->Bound()) { + NextBound(i); + } + } + for (int i = 0; i < active_.size(); ++i) { + if (active_[i]->Bound()) { + ActiveBound(i); + } + } + } + // TODO(user): Merge NextBound and ActiveBound to re-use the same demon + // for next and active variables. + void NextBound(int index) { + if (active_[index]->Min() > 0) { + const int next = nexts_[index]->Min(); + PropagateLink(index, next); + touched_.PushBack(solver(), index); + EnqueueDelayedDemon(path_demon_); + } + } + void ActiveBound(int index) { + if (nexts_[index]->Bound()) { + NextBound(index); + } + } + void PropagatePaths() { + // Detecting new chains. + const std::vector& touched_values = touched_.Values(solver()); + for (const int touched : touched_values) { + chain_starts_[touched] = touched; + chain_ends_[touched] = touched; + is_chain_start_[touched] = false; + const int next = nexts_[touched]->Min(); + chain_starts_[next] = next; + chain_ends_[next] = next; + is_chain_start_[next] = false; + } + for (const int touched : touched_values) { + if (touched >= nexts_.size()) continue; + IntVar* const next_var = nexts_[touched]; + if (!was_bound_[touched] && next_var->Bound() && + active_[touched]->Min() > 0) { + const int64 next = next_var->Min(); + was_bound_.SetValue(solver(), touched, true); + chain_starts_[chain_ends_[next]] = chain_starts_[touched]; + chain_ends_[chain_starts_[touched]] = chain_ends_[next]; + is_chain_start_[next] = false; + is_chain_start_[chain_starts_[touched]] = true; + } + } + // Propagating new chains. + for (const int touched : touched_values) { + // Is touched the start of a chain ? + if (is_chain_start_[touched]) { + // Propagate min cumuls from chain_starts[touch] to chain_ends_[touch]. + int64 current = touched; + int64 next = nexts_[current]->Min(); + while (current != chain_ends_[touched]) { + prevs_.SetValue(solver(), next, current); + PropagateLink(current, next); + current = next; + if (current != chain_ends_[touched]) { + next = nexts_[current]->Min(); + } + } + // Propagate max cumuls from chain_ends_[i] to chain_starts_[i]. + int64 prev = prevs_[current]; + while (current != touched) { + PropagateLink(prev, current); + current = prev; + if (current != touched) { + prev = prevs_[current]; + } + } + // Now that the chain has been propagated in both directions, adding + // demons for the corresponding cumul and transit variables for + // future changes in their range. + current = touched; + while (current != chain_ends_[touched]) { + if (!has_cumul_demon_[current]) { + Demon* const demon = cumul_transit_demons_[current]; + cumuls_[current]->WhenRange(demon); + transits_[current]->WhenRange(demon); + has_cumul_demon_.SetValue(solver(), current, true); + } + current = nexts_[current]->Min(); + } + if (!has_cumul_demon_[current]) { + Demon* const demon = cumul_transit_demons_[current]; + cumuls_[current]->WhenRange(demon); + if (current < transits_.size()) { + transits_[current]->WhenRange(demon); + UpdateSupport(current); + } + has_cumul_demon_.SetValue(solver(), current, true); + } + } + } + touched_.Clear(solver()); + } + + void Accept(ModelVisitor* const visitor) const { + visitor->BeginVisitConstraint(ModelVisitor::kPathCumul, this); + visitor->VisitIntegerVariableArrayArgument(ModelVisitor::kNextsArgument, + nexts_); + visitor->VisitIntegerVariableArrayArgument(ModelVisitor::kActiveArgument, + active_); + visitor->VisitIntegerVariableArrayArgument(ModelVisitor::kCumulsArgument, + cumuls_); + visitor->VisitIntegerVariableArrayArgument(ModelVisitor::kTransitsArgument, + transits_); + visitor->EndVisitConstraint(ModelVisitor::kPathCumul, this); + } + + std::string DebugString() const { + std::string out = "DelayedPathCumul("; + for (int i = 0; i < nexts_.size(); ++i) { + out += nexts_[i]->DebugString() + " " + cumuls_[i]->DebugString(); + } + out += ")"; + return out; + } + + private: + void CumulRange(int64 index) { + if (index < nexts_.size()) { + if (nexts_[index]->Bound()) { + if (active_[index]->Min() > 0) { + PropagateLink(index, nexts_[index]->Min()); + } + } else { + UpdateSupport(index); + } + } + if (prevs_[index] >= 0) { + PropagateLink(prevs_[index], index); + } else { + for (int i = 0; i < nexts_.size(); ++i) { + if (index == supports_[i]) { + UpdateSupport(i); + } + } + } + } + void UpdateSupport(int index) { + int support = supports_[index]; + if (support < 0 || !AcceptLink(index, support)) { + IntVar* const next = nexts_[index]; + for (int i = next->Min(); i <= next->Max(); ++i) { + if (i != support && AcceptLink(index, i)) { + supports_[index] = i; + return; + } + } + active_[index]->SetMax(0); + } + } + void PropagateLink(int64 index, int64 next) { + IntVar* const cumul_var = cumuls_[index]; + IntVar* const next_cumul_var = cumuls_[next]; + IntVar* const transit = transits_[index]; + const int64 transit_min = transit->Min(); + const int64 transit_max = transit->Max(); + next_cumul_var->SetMin(CapAdd(cumul_var->Min(), transit_min)); + next_cumul_var->SetMax(CapAdd(cumul_var->Max(), transit_max)); + const int64 next_cumul_min = next_cumul_var->Min(); + const int64 next_cumul_max = next_cumul_var->Max(); + cumul_var->SetMin(CapSub(next_cumul_min, transit_max)); + cumul_var->SetMax(CapSub(next_cumul_max, transit_min)); + transit->SetMin(CapSub(next_cumul_min, cumul_var->Max())); + transit->SetMax(CapSub(next_cumul_max, cumul_var->Min())); + } + bool AcceptLink(int index, int next) const { + IntVar* const cumul_var = cumuls_[index]; + IntVar* const next_cumul_var = cumuls_[next]; + IntVar* const transit = transits_[index]; + return transit->Min() <= CapSub(next_cumul_var->Max(), cumul_var->Min()) && + CapSub(next_cumul_var->Min(), cumul_var->Max()) <= transit->Max(); + } + + const std::vector nexts_; + const std::vector active_; + const std::vector cumuls_; + const std::vector transits_; + std::vector cumul_transit_demons_; + Demon* path_demon_; + StampedVector touched_; + std::vector chain_starts_; + std::vector chain_ends_; + std::vector is_chain_start_; + RevArray prevs_; + std::vector supports_; + RevArray was_bound_; + RevArray has_cumul_demon_; +}; + // cumuls[next[i]] = cumuls[i] + transit_evaluator(i, next[i]) class ResultCallback2PathCumul : public BasePathCumul { @@ -1044,4 +1325,13 @@ Constraint* Solver::MakePathCumul(const std::vector& nexts, return RevAlloc(new ResultCallback2SlackPathCumul(this, nexts, active, cumuls, slacks, transit_evaluator)); } + +Constraint* Solver::MakeDelayedPathCumul(const std::vector& nexts, + const std::vector& active, + const std::vector& cumuls, + const std::vector& transits) { + CHECK_EQ(nexts.size(), active.size()); + CHECK_EQ(transits.size(), nexts.size()); + return RevAlloc(new DelayedPathCumul(this, nexts, active, cumuls, transits)); +} } // namespace operations_research diff --git a/src/constraint_solver/hybrid.cc b/src/constraint_solver/hybrid.cc index 1190ad7786..5b90eeb868 100644 --- a/src/constraint_solver/hybrid.cc +++ b/src/constraint_solver/hybrid.cc @@ -18,7 +18,6 @@ #include "base/commandlineflags.h" #include "base/integral_types.h" #include "base/macros.h" -#include "base/scoped_ptr.h" #include "base/stl_util.h" #include "constraint_solver/constraint_solver.h" #include "constraint_solver/constraint_solveri.h" diff --git a/src/constraint_solver/io.cc b/src/constraint_solver/io.cc index 8456558979..1eaa8d4a75 100644 --- a/src/constraint_solver/io.cc +++ b/src/constraint_solver/io.cc @@ -22,7 +22,6 @@ #include "base/callback.h" #include "base/integral_types.h" #include "base/logging.h" -#include "base/scoped_ptr.h" #include "base/concise_iterator.h" #include "base/map_util.h" #include "base/stl_util.h" diff --git a/src/constraint_solver/local_search.cc b/src/constraint_solver/local_search.cc index c96efd7c8b..34357c0f24 100644 --- a/src/constraint_solver/local_search.cc +++ b/src/constraint_solver/local_search.cc @@ -26,7 +26,6 @@ #include "base/integral_types.h" #include "base/logging.h" #include "base/macros.h" -#include "base/scoped_ptr.h" #include "base/bitmap.h" #include "base/concise_iterator.h" #include "base/map_util.h" diff --git a/src/constraint_solver/model_cache.cc b/src/constraint_solver/model_cache.cc index 51b9c2deaa..6de0868d40 100644 --- a/src/constraint_solver/model_cache.cc +++ b/src/constraint_solver/model_cache.cc @@ -17,7 +17,6 @@ #include "base/commandlineflags.h" #include "base/integral_types.h" #include "base/logging.h" -#include "base/scoped_ptr.h" #include "base/stl_util.h" #include "constraint_solver/constraint_solver.h" #include "constraint_solver/constraint_solveri.h" diff --git a/src/constraint_solver/mtsearch.cc b/src/constraint_solver/mtsearch.cc index 6781796be9..640f14ecda 100644 --- a/src/constraint_solver/mtsearch.cc +++ b/src/constraint_solver/mtsearch.cc @@ -20,7 +20,6 @@ #include "base/integral_types.h" #include "base/logging.h" #include "base/mutex.h" -#include "base/scoped_ptr.h" #include "base/synchronization.h" #include "base/threadpool.h" #include "constraint_solver/assignment.pb.h" diff --git a/src/constraint_solver/pack.cc b/src/constraint_solver/pack.cc index ddf0fcbb0a..5d6bac0b19 100644 --- a/src/constraint_solver/pack.cc +++ b/src/constraint_solver/pack.cc @@ -22,7 +22,6 @@ #include "base/integral_types.h" #include "base/logging.h" -#include "base/scoped_ptr.h" #include "base/stringprintf.h" #include "base/concise_iterator.h" #include "constraint_solver/constraint_solver.h" diff --git a/src/constraint_solver/resource.cc b/src/constraint_solver/resource.cc index f2f4257929..a466091ad1 100644 --- a/src/constraint_solver/resource.cc +++ b/src/constraint_solver/resource.cc @@ -29,7 +29,6 @@ #include "base/integral_types.h" #include "base/logging.h" #include "base/macros.h" -#include "base/scoped_ptr.h" #include "base/stringprintf.h" #include "base/join.h" #include "base/stl_util.h" diff --git a/src/constraint_solver/routing.cc b/src/constraint_solver/routing.cc index cedf5fbf3d..89f0958db2 100644 --- a/src/constraint_solver/routing.cc +++ b/src/constraint_solver/routing.cc @@ -25,7 +25,6 @@ #include "base/commandlineflags.h" #include "base/integral_types.h" #include "base/logging.h" -#include "base/scoped_ptr.h" #include "base/bitmap.h" #include "base/concise_iterator.h" #include "base/map_util.h" @@ -942,7 +941,7 @@ bool RoutingModel::AddDimensionWithCapacityInternal( } dimension->Initialize(vehicle_capacity, capacity, cached_evaluators, slack_max); - solver_->AddConstraint(solver_->MakePathCumul( + solver_->AddConstraint(solver_->MakeDelayedPathCumul( nexts_, active_, dimension->cumuls(), dimension->transits())); if (fix_start_cumul_to_zero) { for (int i = 0; i < vehicles_; ++i) { @@ -1379,7 +1378,8 @@ void RoutingModel::CloseModel() { } std::vector zero_transit(size, solver_->MakeIntConst(Zero())); solver_->AddConstraint( - solver_->MakePathCumul(nexts_, active_, vehicle_vars_, zero_transit)); + solver_->MakeDelayedPathCumul( + nexts_, active_, vehicle_vars_, zero_transit)); // Add constraints to bind vehicle_vars_[i] to -1 in case that node i is not // active. @@ -1404,7 +1404,8 @@ void RoutingModel::CloseModel() { // is_bound_to_end_ variables constraints. solver_->AddConstraint( - solver_->MakePathCumul(nexts_, active_, is_bound_to_end_, zero_transit)); + solver_->MakeDelayedPathCumul( + nexts_, active_, is_bound_to_end_, zero_transit)); for (const int64 end : ends_) { is_bound_to_end_[end]->SetValue(1); } @@ -3543,13 +3544,13 @@ RoutingModel::GetOrCreateLocalSearchFilters() { LocalSearchFilter* filter = solver_->MakeLocalSearchObjectiveFilter( nexts_, NewPermanentCallback(this, &RoutingModel::GetHomogeneousCost), - objective_callback, cost_, Solver::EQ, Solver::SUM); + objective_callback, cost_, Solver::LE, Solver::SUM); filters_.push_back(filter); } else { LocalSearchFilter* filter = solver_->MakeLocalSearchObjectiveFilter( nexts_, vehicle_vars_, NewPermanentCallback(this, &RoutingModel::GetArcCostForVehicle), - objective_callback, cost_, Solver::EQ, Solver::SUM); + objective_callback, cost_, Solver::LE, Solver::SUM); filters_.push_back(filter); } } diff --git a/src/constraint_solver/routing.h b/src/constraint_solver/routing.h index f835c0db19..eb75cbaa42 100644 --- a/src/constraint_solver/routing.h +++ b/src/constraint_solver/routing.h @@ -165,7 +165,6 @@ #include "base/integral_types.h" #include "base/logging.h" #include "base/macros.h" -#include "base/scoped_ptr.h" #include "base/int_type_indexed_vector.h" #include "base/int_type.h" #include "base/hash.h" diff --git a/src/constraint_solver/routing_search.cc b/src/constraint_solver/routing_search.cc index f078983333..e0401f0f49 100644 --- a/src/constraint_solver/routing_search.cc +++ b/src/constraint_solver/routing_search.cc @@ -116,10 +116,11 @@ class NodeDisjunctionFilter : public RoutingLocalSearchFilter { return true; } else { IntVar* const cost_var = routing_model_.CostVar(); - return new_objective_value <= cost_var->Max() && - new_objective_value >= cost_var->Min(); + // Only compare to max as a cost lower bound is computed. + return new_objective_value <= cost_var->Max(); } } + virtual std::string DebugString() const { return "NodeDisjunctionFilter"; } private: virtual void OnSynchronize() { @@ -172,20 +173,25 @@ class BasePathFilter : public RoutingLocalSearchFilter { protected: static const int64 kUnassigned; - int64 GetNext(const Assignment::IntContainer& container, int64 node) const; + int64 GetNext(int64 node) const { + return (new_nexts_[node] == kUnassigned) + ? (IsVarSynced(node) ? Value(node) : kUnassigned) + : new_nexts_[node]; + } int NumPaths() const { return starts_.size(); } int64 Start(int i) const { return starts_[i]; } int GetPath(int64 node) const { return paths_[node]; } private: virtual void InitializeAcceptPath() {} - virtual bool AcceptPath(const Assignment::IntContainer& container, - int64 path_start) = 0; + virtual bool AcceptPath(int64 path_start) = 0; virtual bool FinalizeAcceptPath() { return true; } std::vector node_path_starts_; std::vector starts_; std::vector paths_; + std::vector new_nexts_; + std::vector delta_touched_; }; const int64 BasePathFilter::kUnassigned = -1; @@ -195,12 +201,18 @@ BasePathFilter::BasePathFilter(const std::vector& nexts, Callback1* objective_callback) : RoutingLocalSearchFilter(nexts, objective_callback), node_path_starts_(next_domain_size, kUnassigned), - paths_(nexts.size(), -1) {} + paths_(nexts.size(), -1), + new_nexts_(nexts.size(), kUnassigned) {} bool BasePathFilter::Accept(const Assignment* delta, const Assignment* deltadelta) { + for (const int touched : delta_touched_) { + new_nexts_[touched] = kUnassigned; + } + delta_touched_.clear(); const Assignment::IntContainer& container = delta->IntVarContainer(); const int delta_size = container.Size(); + delta_touched_.reserve(delta_size); // Determining touched paths. Number of touched paths should be very small // given the set of available operators (1 or 2 paths), so performing // a linear search to find an element is faster than using a set. @@ -210,6 +222,13 @@ bool BasePathFilter::Accept(const Assignment* delta, IntVar* const var = new_element.Var(); int64 index = kUnassigned; if (FindIndex(var, &index)) { + if (new_element.Bound()) { + new_nexts_[index] = new_element.Value(); + delta_touched_.push_back(index); + } else { + // LNS detected + return true; + } const int64 start = node_path_starts_[index]; if (start != kUnassigned && find(touched_paths.begin(), touched_paths.end(), start) == @@ -222,7 +241,7 @@ bool BasePathFilter::Accept(const Assignment* delta, InitializeAcceptPath(); bool accept = true; for (const int touched_path : touched_paths) { - if (!AcceptPath(container, touched_path)) { + if (!AcceptPath(touched_path)) { accept = false; break; } @@ -273,21 +292,6 @@ void BasePathFilter::OnSynchronize() { } } -int64 BasePathFilter::GetNext(const Assignment::IntContainer& container, - int64 node) const { - const IntVar* const next_var = Var(node); - int64 next = IsVarSynced(node) ? Value(node) : kUnassigned; - const IntVarElement* const element = container.ElementPtrOrNull(next_var); - if (element != nullptr) { - if (element->Bound()) { - next = element->Value(); - } else { - return kUnassigned; - } - } - return next; -} - // PathCumul filter. class PathCumulFilter : public BasePathFilter { @@ -297,6 +301,9 @@ class PathCumulFilter : public BasePathFilter { Callback1* objective_callback); virtual ~PathCumulFilter() {} virtual void OnSynchronize(); + virtual std::string DebugString() const { + return "PathCumulFilter(" + name_ + ")"; + } private: // This structure stores the "best" path cumul value for a solution, the path @@ -360,8 +367,7 @@ class PathCumulFilter : public BasePathFilter { virtual void InitializeAcceptPath() { cumul_cost_delta_ = total_current_cumul_cost_value_; } - virtual bool AcceptPath(const Assignment::IntContainer& container, - int64 path_start); + virtual bool AcceptPath(int64 path_start); virtual bool FinalizeAcceptPath(); bool FilterSpanCost() const { return global_span_cost_coefficient_ != 0; } @@ -408,6 +414,7 @@ class PathCumulFilter : public BasePathFilter { int64 delta_max_end_cumul_; // Note: small_ordered_set only support non-hash sets. small_ordered_set> delta_paths_; + const std::string name_; bool lns_detected_; }; @@ -430,6 +437,7 @@ PathCumulFilter::PathCumulFilter(const RoutingModel& routing_model, cost_var_(routing_model.CostVar()), capacity_evaluator_(dimension.capacity_evaluator()), delta_max_end_cumul_(kint64min), + name_(dimension.name()), lns_detected_(false) { for (const int64 coefficient : vehicle_span_cost_coefficients_) { if (coefficient != 0) { @@ -565,8 +573,7 @@ void PathCumulFilter::OnSynchronize() { } } -bool PathCumulFilter::AcceptPath(const Assignment::IntContainer& container, - int64 path_start) { +bool PathCumulFilter::AcceptPath(int64 path_start) { int64 node = path_start; int64 cumul = cumuls_[node]->Min(); cumul_cost_delta_ += GetCumulSoftCost(node, cumul); @@ -577,16 +584,27 @@ bool PathCumulFilter::AcceptPath(const Assignment::IntContainer& container, ? kint64max : capacity_evaluator_->Run(vehicle); Solver::IndexEvaluator2* const evaluator = evaluators_[vehicle]; - // Check that the path is feasible with regards to cumul bounds, scanning - // the paths from start to end (caching path node sequences and transits - // for further span cost filtering). + // Evaluating route length to reserve memory to store transit information. + int number_of_route_arcs = 0; while (node < Size()) { - const int64 next = GetNext(container, node); + const int64 next = GetNext(node); + // TODO(user): This shouldn't be needed anymore as the such deltas should + // have been filtered already. if (next == kUnassigned) { - // LNS detected, return true since path was ok up to now. + // LNS detected, return true since other paths were ok up to now. lns_detected_ = true; return true; } + ++number_of_route_arcs; + node = next; + } + delta_path_transits_.ReserveTransits(path, number_of_route_arcs); + // Check that the path is feasible with regards to cumul bounds, scanning + // the paths from start to end (caching path node sequences and transits + // for further span cost filtering). + node = path_start; + while (node < Size()) { + const int64 next = GetNext(node); const int64 transit = evaluator->Run(node, next); total_transit += transit; const int64 transit_slack = transit + slacks_[node]->Min(); @@ -685,8 +703,8 @@ bool PathCumulFilter::FinalizeAcceptPath() { injected_objective_value_ + cumul_cost_delta_ + global_span_cost_coefficient_ * (new_max_end - new_min_start); PropagateObjectiveValue(new_objective_value); - return new_objective_value <= cost_var_->Max() && - new_objective_value >= cost_var_->Min(); + // Only compare to max as a cost lower bound is computed. + return new_objective_value <= cost_var_->Max(); } void PathCumulFilter::InitializeSupportedPathCumul( @@ -723,7 +741,7 @@ class NodePrecedenceFilter : public BasePathFilter { NodePrecedenceFilter(const std::vector& nexts, int next_domain_size, const RoutingModel::NodePairs& pairs); virtual ~NodePrecedenceFilter() {} - bool AcceptPath(const Assignment::IntContainer& container, int64 path_start); + bool AcceptPath(int64 path_start); private: std::vector pair_firsts_; @@ -742,8 +760,7 @@ NodePrecedenceFilter::NodePrecedenceFilter(const std::vector& nexts, } } -bool NodePrecedenceFilter::AcceptPath(const Assignment::IntContainer& container, - int64 path_start) { +bool NodePrecedenceFilter::AcceptPath(int64 path_start) { std::vector visited(Size(), false); int64 node = path_start; int64 path_length = 1; @@ -758,7 +775,7 @@ bool NodePrecedenceFilter::AcceptPath(const Assignment::IntContainer& container, return false; } visited[node] = true; - const int64 next = GetNext(container, node); + const int64 next = GetNext(node); if (next == kUnassigned) { // LNS detected, return true since path was ok up to now. return true; diff --git a/src/constraint_solver/sched_constraints.cc b/src/constraint_solver/sched_constraints.cc index 02bec0d11f..9bb7a7fb34 100644 --- a/src/constraint_solver/sched_constraints.cc +++ b/src/constraint_solver/sched_constraints.cc @@ -26,7 +26,6 @@ #include "base/integral_types.h" #include "base/logging.h" #include "base/macros.h" -#include "base/scoped_ptr.h" #include "base/stringprintf.h" #include "constraint_solver/constraint_solver.h" #include "constraint_solver/constraint_solveri.h" diff --git a/src/constraint_solver/sched_search.cc b/src/constraint_solver/sched_search.cc index 42ab7f91e1..5da5eb6a96 100644 --- a/src/constraint_solver/sched_search.cc +++ b/src/constraint_solver/sched_search.cc @@ -17,7 +17,6 @@ #include "base/integral_types.h" #include "base/logging.h" -#include "base/scoped_ptr.h" #include "base/stringprintf.h" #include "constraint_solver/constraint_solver.h" #include "constraint_solver/constraint_solveri.h" diff --git a/src/constraint_solver/search.cc b/src/constraint_solver/search.cc index 4055d2fc6c..b473f1c07e 100644 --- a/src/constraint_solver/search.cc +++ b/src/constraint_solver/search.cc @@ -25,7 +25,6 @@ #include "base/integral_types.h" #include "base/logging.h" #include "base/macros.h" -#include "base/scoped_ptr.h" #include "base/stringprintf.h" #include "base/timer.h" #include "base/join.h" @@ -4342,7 +4341,7 @@ SearchMonitor* Solver::MakeConstantRestart(int frequency) { // This is called Symmetry Breaking During Search (Ian Gent, Barbara // Smith, ECAI 2000). // http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.42.3788&rep=rep1&type=pdf -// // NOLINT +// class SymmetryManager : public SearchMonitor { public: SymmetryManager(Solver* const s, const std::vector& visitors) diff --git a/src/constraint_solver/table.cc b/src/constraint_solver/table.cc index 204c5d6846..0d9078c944 100644 --- a/src/constraint_solver/table.cc +++ b/src/constraint_solver/table.cc @@ -22,7 +22,6 @@ #include "base/commandlineflags.h" #include "base/integral_types.h" #include "base/logging.h" -#include "base/scoped_ptr.h" #include "base/stringprintf.h" #include "base/concise_iterator.h" #include "base/map_util.h" diff --git a/src/constraint_solver/trace.cc b/src/constraint_solver/trace.cc index 1c38660151..b60cf2e4c5 100644 --- a/src/constraint_solver/trace.cc +++ b/src/constraint_solver/trace.cc @@ -21,7 +21,6 @@ #include "base/commandlineflags.h" #include "base/integral_types.h" #include "base/logging.h" -#include "base/scoped_ptr.h" #include "base/stringprintf.h" #include "base/map_util.h" #include "constraint_solver/constraint_solver.h" diff --git a/src/constraint_solver/tree_monitor.cc b/src/constraint_solver/tree_monitor.cc index 1d32aeb3c7..e43e645156 100644 --- a/src/constraint_solver/tree_monitor.cc +++ b/src/constraint_solver/tree_monitor.cc @@ -22,7 +22,6 @@ #include "base/integral_types.h" #include "base/logging.h" -#include "base/scoped_ptr.h" #include "base/stringprintf.h" #include "base/file.h" #include "base/concise_iterator.h" diff --git a/src/constraint_solver/visitor.cc b/src/constraint_solver/visitor.cc index 2a837c28cb..c0e260d30b 100644 --- a/src/constraint_solver/visitor.cc +++ b/src/constraint_solver/visitor.cc @@ -20,7 +20,6 @@ #include "base/integral_types.h" #include "base/logging.h" #include "base/macros.h" -#include "base/scoped_ptr.h" #include "base/concise_iterator.h" #include "base/map_util.h" #include "base/stl_util.h" diff --git a/src/flatzinc/fz.cc b/src/flatzinc/fz.cc index fcbc532847..afbc53e0b9 100644 --- a/src/flatzinc/fz.cc +++ b/src/flatzinc/fz.cc @@ -1,38 +1,3 @@ -/* -*- mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*- */ -/* - * Main authors: - * Guido Tack - * Modified: - * Laurent Perron for OR-Tools (laurent.perron@gmail.com) - * - * Copyright: - * Guido Tack, 2007 - * - * This file is part of Gecode, the generic constraint - * development environment: - * http://www.gecode.org - * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sublicense, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice shall be - * included in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - * - */ - #include // NOLINT #include // NOLINT #include @@ -41,7 +6,6 @@ #include "base/commandlineflags.h" #include "base/integral_types.h" #include "base/logging.h" -#include "base/scoped_ptr.h" #include "base/stringprintf.h" #include "base/threadpool.h" #include "flatzinc/flatzinc.h" diff --git a/src/graph/bellman_ford.cc b/src/graph/bellman_ford.cc index db05f9bc54..409593d3a3 100644 --- a/src/graph/bellman_ford.cc +++ b/src/graph/bellman_ford.cc @@ -16,7 +16,6 @@ #include "base/callback.h" #include "base/integral_types.h" -#include "base/scoped_ptr.h" namespace operations_research { class BellmanFord { diff --git a/src/graph/cliques.cc b/src/graph/cliques.cc index 3b1db680df..baf11d734f 100644 --- a/src/graph/cliques.cc +++ b/src/graph/cliques.cc @@ -20,7 +20,6 @@ #include #include "base/callback.h" -#include "base/scoped_ptr.h" #include "base/hash.h" namespace operations_research { diff --git a/src/graph/cliques.h b/src/graph/cliques.h index 04e7997793..8cd0e38cd5 100644 --- a/src/graph/cliques.h +++ b/src/graph/cliques.h @@ -26,7 +26,6 @@ #include #include "base/callback.h" -#include "base/scoped_ptr.h" template class ResultCallback2; diff --git a/src/graph/dijkstra.cc b/src/graph/dijkstra.cc index a91d4ebcee..ce0ee37565 100644 --- a/src/graph/dijkstra.cc +++ b/src/graph/dijkstra.cc @@ -17,7 +17,6 @@ #include "base/callback.h" #include "base/integral_types.h" -#include "base/scoped_ptr.h" #include "base/adjustable_priority_queue.h" namespace operations_research { diff --git a/src/graph/ebert_graph.h b/src/graph/ebert_graph.h index cb52268725..2778ee038c 100644 --- a/src/graph/ebert_graph.h +++ b/src/graph/ebert_graph.h @@ -177,7 +177,6 @@ #include "base/integral_types.h" #include "base/logging.h" #include "base/macros.h" -#include "base/scoped_ptr.h" #include "base/stringprintf.h" #include "util/permutation.h" #include "util/zvector.h" diff --git a/src/graph/graph.swig b/src/graph/graph.swig index 94b2da7a8f..7007b55cd8 100644 --- a/src/graph/graph.swig +++ b/src/graph/graph.swig @@ -20,7 +20,6 @@ #include "graph/assignment.h" #include "graph/max_flow.h" #include "graph/min_cost_flow.h" -using std::string; %} #if defined(SWIGPYTHON) diff --git a/src/graph/linear_assignment.h b/src/graph/linear_assignment.h index 0c3547f59d..f571045a60 100644 --- a/src/graph/linear_assignment.h +++ b/src/graph/linear_assignment.h @@ -183,7 +183,6 @@ #include "base/integral_types.h" #include "base/logging.h" #include "base/macros.h" -#include "base/scoped_ptr.h" #include "base/stringprintf.h" #include "graph/ebert_graph.h" #include "util/permutation.h" diff --git a/src/graph/max_flow.h b/src/graph/max_flow.h index b8a6fbe995..46a54ab579 100644 --- a/src/graph/max_flow.h +++ b/src/graph/max_flow.h @@ -121,7 +121,6 @@ #include "base/integral_types.h" #include "base/logging.h" #include "base/macros.h" -#include "base/scoped_ptr.h" #include "graph/ebert_graph.h" #include "graph/graph.h" #include "util/stats.h" diff --git a/src/graph/shortestpaths.h b/src/graph/shortestpaths.h index ceefbe269c..dfb86c3a72 100644 --- a/src/graph/shortestpaths.h +++ b/src/graph/shortestpaths.h @@ -25,7 +25,6 @@ #include "base/callback.h" #include "base/integral_types.h" #include "base/macros.h" -#include "base/scoped_ptr.h" namespace operations_research { diff --git a/src/linear_solver/cbc_interface.cc b/src/linear_solver/cbc_interface.cc index 62860af93d..dba4fe7c0a 100644 --- a/src/linear_solver/cbc_interface.cc +++ b/src/linear_solver/cbc_interface.cc @@ -22,7 +22,6 @@ #include "base/commandlineflags.h" #include "base/integral_types.h" #include "base/logging.h" -#include "base/scoped_ptr.h" #include "base/stringprintf.h" #include "base/timer.h" #include "base/hash.h" diff --git a/src/linear_solver/clp_interface.cc b/src/linear_solver/clp_interface.cc index 31299e0410..268de7d67a 100644 --- a/src/linear_solver/clp_interface.cc +++ b/src/linear_solver/clp_interface.cc @@ -21,7 +21,6 @@ #include "base/commandlineflags.h" #include "base/integral_types.h" #include "base/logging.h" -#include "base/scoped_ptr.h" #include "base/stringprintf.h" #include "base/timer.h" #include "base/strutil.h" diff --git a/src/linear_solver/glpk_interface.cc b/src/linear_solver/glpk_interface.cc index 0e92543584..2c5abe2944 100644 --- a/src/linear_solver/glpk_interface.cc +++ b/src/linear_solver/glpk_interface.cc @@ -24,7 +24,6 @@ #include "base/commandlineflags.h" #include "base/integral_types.h" #include "base/logging.h" -#include "base/scoped_ptr.h" #include "base/stringprintf.h" #include "base/timer.h" #include "base/concise_iterator.h" diff --git a/src/linear_solver/gurobi_interface.cc b/src/linear_solver/gurobi_interface.cc index c9a7ae08c5..68673c581c 100644 --- a/src/linear_solver/gurobi_interface.cc +++ b/src/linear_solver/gurobi_interface.cc @@ -22,7 +22,6 @@ #include "base/commandlineflags.h" #include "base/integral_types.h" #include "base/logging.h" -#include "base/scoped_ptr.h" #include "base/stringprintf.h" #include "base/timer.h" #include "base/concise_iterator.h" diff --git a/src/linear_solver/linear_solver.cc b/src/linear_solver/linear_solver.cc index cac6f709a5..64d21ee67f 100644 --- a/src/linear_solver/linear_solver.cc +++ b/src/linear_solver/linear_solver.cc @@ -26,7 +26,6 @@ #include "base/commandlineflags.h" #include "base/integral_types.h" #include "base/logging.h" -#include "base/scoped_ptr.h" #include "base/stringprintf.h" #include "base/timer.h" #include "base/file.h" diff --git a/src/linear_solver/linear_solver.h b/src/linear_solver/linear_solver.h index a6738c8f98..8a90556210 100644 --- a/src/linear_solver/linear_solver.h +++ b/src/linear_solver/linear_solver.h @@ -140,7 +140,6 @@ #include "base/integral_types.h" #include "base/logging.h" -#include "base/scoped_ptr.h" #include "base/timer.h" diff --git a/src/linear_solver/scip_interface.cc b/src/linear_solver/scip_interface.cc index 9ec499e11b..eb98935ebf 100644 --- a/src/linear_solver/scip_interface.cc +++ b/src/linear_solver/scip_interface.cc @@ -21,7 +21,6 @@ #include "base/commandlineflags.h" #include "base/integral_types.h" #include "base/logging.h" -#include "base/scoped_ptr.h" #include "base/stringprintf.h" #include "base/timer.h" #include "base/concise_iterator.h" @@ -403,7 +402,7 @@ void SCIPInterface::ExtractNewConstraints() { SCIP_CONS* scip_constraint = NULL; const bool is_lazy = ct->is_lazy(); // See - // http://scip.zib.de/doc/html/cons__linear_8h.shtml#aa7aed137a4130b35b168812414413481 + // http://scip.zib.de/doc/html/cons__linear_8h.php#aa7aed137a4130b35b168812414413481 // for an explanation of the parameters. ORTOOLS_SCIP_CALL(SCIPcreateConsLinear( scip_, &scip_constraint, ct->name().empty() ? "" : ct->name().c_str(), diff --git a/src/ortools/algorithms/__init__.py b/src/ortools/algorithms/__init__.py index adfe472141..c4e25e6d8d 100644 --- a/src/ortools/algorithms/__init__.py +++ b/src/ortools/algorithms/__init__.py @@ -1,3 +1,3 @@ import os as _os -__path__.append(_os.path.join(__path__[0], '../../gen/ortools/algorithms')) -__path__.append(_os.path.join(__path__[0], '../../../lib')) +__path__.append(_os.path.join(__path__[0], '..', '..', 'gen', 'ortools', 'algorithms')) +__path__.append(_os.path.join(__path__[0], '..', '..', '..', 'lib')) diff --git a/src/ortools/constraint_solver/__init__.py b/src/ortools/constraint_solver/__init__.py index ab8c5a1e15..362e77d8df 100644 --- a/src/ortools/constraint_solver/__init__.py +++ b/src/ortools/constraint_solver/__init__.py @@ -1,3 +1,3 @@ import os as _os -__path__.append(_os.path.join(__path__[0], '../../gen/ortools/constraint_solver')) -__path__.append(_os.path.join(__path__[0], '../../../lib')) +__path__.append(_os.path.join(__path__[0], '..', '..', 'gen', 'ortools', 'constraint_solver')) +__path__.append(_os.path.join(__path__[0], '..', '..', '..', 'lib')) diff --git a/src/ortools/graph/__init__.py b/src/ortools/graph/__init__.py index d3a17f655b..c0dd3b1db2 100644 --- a/src/ortools/graph/__init__.py +++ b/src/ortools/graph/__init__.py @@ -1,3 +1,3 @@ import os as _os -__path__.append(_os.path.join(__path__[0], '../../gen/ortools/graph')) -__path__.append(_os.path.join(__path__[0], '../../../lib')) +__path__.append(_os.path.join(__path__[0], '..', '..', 'gen', 'ortools', 'graph')) +__path__.append(_os.path.join(__path__[0], '..', '..', '..', 'lib')) diff --git a/src/ortools/linear_solver/__init__.py b/src/ortools/linear_solver/__init__.py index 8846f4883f..2d0db96333 100644 --- a/src/ortools/linear_solver/__init__.py +++ b/src/ortools/linear_solver/__init__.py @@ -1,3 +1,3 @@ import os as _os -__path__.append(_os.path.join(__path__[0], '../../gen/ortools/linear_solver')) -__path__.append(_os.path.join(__path__[0], '../../../lib')) +__path__.append(_os.path.join(__path__[0], '..', '..', 'gen', 'ortools', 'linear_solver')) +__path__.append(_os.path.join(__path__[0], '..', '..', '..', 'lib')) diff --git a/src/sat/boolean_problem.cc b/src/sat/boolean_problem.cc index 08ac1c9b27..13393ea933 100644 --- a/src/sat/boolean_problem.cc +++ b/src/sat/boolean_problem.cc @@ -12,7 +12,12 @@ // limitations under the License. #include "sat/boolean_problem.h" +#include "base/hash.h" + #include "base/join.h" +#include "base/map_util.h" +#include "base/hash.h" +#include "algorithms/find_graph_symmetries.h" namespace operations_research { namespace sat { @@ -243,5 +248,190 @@ void ExtractSubproblem(const LinearBooleanProblem& problem, } } +namespace { +// A simple class to generate equivalence class number for +// GenerateGraphForSymmetryDetection(). +class IdGenerator { + public: + IdGenerator() {} + + // If the pair (type, coefficient) was never seen before, then generate + // a new id, otherwise return the previously generated id. + int GetId(int type, int64 coefficient) { + const std::pair key(type, coefficient); + return LookupOrInsert(&id_map_, key, id_map_.size()); + } + + private: + hash_map, int> id_map_; +}; +} // namespace. + +// Returns a graph whose automorphisms can be mapped back to the symmetries of +// the given LinearBooleanProblem. +// +// Any permutation of the graph that respects the initial_equivalence_classes +// output can be mapped to a symmetry of the given problem simply by taking its +// restriction on the first 2 * num_variables nodes and interpreting its index +// as a literal index. In a sense, a node with a low enough index #i is in +// one-to-one correspondance with a literals #i (using the index representation +// of literal). +// +// The format of the initial_equivalence_classes is the same as the one +// described in GraphSymmetryFinder::FindSymmetries(). The classes must be dense +// in [0, num_classes) and any symmetry will only map nodes with the same class +// between each other. +template +Graph* GenerateGraphForSymmetryDetection( + const LinearBooleanProblem& problem, + std::vector* initial_equivalence_classes) { + // First, we convert the problem to its canonical representation. + const int num_variables = problem.num_variables(); + CanonicalBooleanLinearProblem canonical_problem; + std::vector cst; + for (const LinearBooleanConstraint& constraint : problem.constraints()) { + cst.clear(); + for (int i = 0; i < constraint.literals_size(); ++i) { + const Literal literal(constraint.literals(i)); + cst.push_back(LiteralWithCoeff(literal, constraint.coefficients(i))); + } + CHECK(canonical_problem.AddLinearConstraint( + constraint.has_lower_bound(), constraint.lower_bound(), + constraint.has_upper_bound(), constraint.upper_bound(), &cst)); + } + + // TODO(user): reserve the memory for the graph? not sure it is worthwhile + // since it would require some linear scan of the problem though. + Graph* graph = new Graph(); + initial_equivalence_classes->clear(); + + // We will construct a graph with 3 different types of node that must be + // in different equivalent classes. + enum NodeType { LITERAL_NODE, CONSTRAINT_NODE, CONSTRAINT_COEFFICIENT_NODE }; + IdGenerator id_generator; + + // First, we need one node per literal with an edge between each literal + // and its negation. + for (int i = 0; i < num_variables; ++i) { + // We have two nodes for each variable. + // Note that the indices are in [0, 2 * num_variables) and in one to one + // correspondance with the index representation of a literal. + const Literal literal = Literal(VariableIndex(i), true); + graph->AddArc(literal.Index().value(), literal.NegatedIndex().value()); + graph->AddArc(literal.NegatedIndex().value(), literal.Index().value()); + } + + // We use 0 for their initial equivalence class, but that may be modified + // with the objective coefficient (see below). + initial_equivalence_classes->assign( + 2 * num_variables, id_generator.GetId(NodeType::LITERAL_NODE, 0)); + + // Literals with different objective coeffs shouldn't be in the same class. + if (problem.type() == LinearBooleanProblem::MINIMIZATION || + problem.type() == LinearBooleanProblem::MAXIMIZATION) { + // We need to canonicalize the objective to regroup literals corresponding + // to the same variables. + std::vector expr; + const LinearObjective& objective = problem.objective(); + for (int i = 0; i < objective.literals_size(); ++i) { + const Literal literal(objective.literals(i)); + expr.push_back(LiteralWithCoeff(literal, objective.coefficients(i))); + } + // Note that we don't care about the offset or optimization direction here, + // we just care about literals with the same canonical coefficient. + int64 shift; + int64 max_value; + ComputeBooleanLinearExpressionCanonicalForm(&expr, &shift, &max_value); + for (LiteralWithCoeff term : expr) { + (*initial_equivalence_classes)[term.literal.Index().value()] = + id_generator.GetId(NodeType::LITERAL_NODE, term.coefficient); + } + } + + // Then, for each constraint, we will have one or more nodes. + for (int i = 0; i < canonical_problem.NumConstraints(); ++i) { + // First we have a node for the constraint with an equivalence class + // depending on the rhs. + // + // Note: Since we add nodes one by one, initial_equivalence_classes->size() + // gives the number of nodes at any point, which we use as next node index. + const int constraint_node_index = initial_equivalence_classes->size(); + initial_equivalence_classes->push_back(id_generator.GetId( + NodeType::CONSTRAINT_NODE, canonical_problem.Rhs(i))); + + // This node will also be connected to all literals of the constraint + // with a coefficient of 1. Literals with new coefficients will be grouped + // under a new node connected to the constraint_node_index. + // + // Note that this works because a canonical constraint is sorted by + // increasing coefficient value (all positive). + int current_node_index = constraint_node_index; + int64 previous_coefficient = 1; + for (LiteralWithCoeff term : canonical_problem.Constraint(i)) { + if (term.coefficient != previous_coefficient) { + current_node_index = initial_equivalence_classes->size(); + initial_equivalence_classes->push_back(id_generator.GetId( + NodeType::CONSTRAINT_COEFFICIENT_NODE, term.coefficient)); + previous_coefficient = term.coefficient; + + // Connect this node to the constraint node. Note that we don't + // technically need the arcs in both directions, but that may help a bit + // the algorithm to find symmetries. + graph->AddArc(constraint_node_index, current_node_index); + graph->AddArc(current_node_index, constraint_node_index); + } + + // Connect this node to the associated term.literal node. Note that we + // don't technically need the arcs in both directions, but that may help a + // bit the algorithm to find symmetries. + graph->AddArc(current_node_index, term.literal.Index().value()); + graph->AddArc(term.literal.Index().value(), current_node_index); + } + } + graph->Build(); + DCHECK_EQ(graph->num_nodes(), initial_equivalence_classes->size()); + return graph; +} + +void FindLinearBooleanProblemSymmetries( + const LinearBooleanProblem& problem, + std::vector>* generators) { + std::vector equivalence_classes; + std::unique_ptr graph( + GenerateGraphForSymmetryDetection( + problem, &equivalence_classes)); + LOG(INFO) << "Graph has " << graph->num_nodes() << " nodes and " + << graph->num_arcs() / 2 << " edges."; + GraphSymmetryFinder symmetry_finder(*graph.get()); + std::vector factorized_automorphism_group_size; + symmetry_finder.FindSymmetries(&equivalence_classes, generators, + &factorized_automorphism_group_size); + + // Remove from the permutations the part not concerning the literals. + // Note that some permutation may becomes empty, which means that we had + // duplicates constraints. TODO(user): Remove them beforehand? + double average_support_size = 0.0; + int num_generators = 0; + for (int i = 0; i < generators->size(); ++i) { + SparsePermutation* permutation = (*generators)[i].get(); + std::vector to_delete; + for (int j = 0; j < permutation->NumCycles(); ++j) { + if (*(permutation->Cycle(j).begin()) >= 2 * problem.num_variables()) { + to_delete.push_back(j); + } + } + permutation->RemoveCycles(to_delete); + if (!permutation->Support().empty()) { + average_support_size += permutation->Support().size(); + swap((*generators)[num_generators], (*generators)[i]); + ++num_generators; + } + } + generators->resize(num_generators); + average_support_size /= num_generators; + LOG(INFO) << "# of generators: " << num_generators; + LOG(INFO) << "Average support size: " << average_support_size; +} + } // namespace sat } // namespace operations_research diff --git a/src/sat/boolean_problem.h b/src/sat/boolean_problem.h index cbaccd5634..24532fc991 100644 --- a/src/sat/boolean_problem.h +++ b/src/sat/boolean_problem.h @@ -15,6 +15,7 @@ #include "sat/boolean_problem.pb.h" #include "sat/sat_solver.h" +#include "algorithms/sparse_permutation.h" namespace operations_research { namespace sat { @@ -58,6 +59,14 @@ void ExtractSubproblem(const LinearBooleanProblem& problem, const std::vector& constraint_indices, LinearBooleanProblem* subproblem); +// Returns a list of generators of the symmetry group of the given problem. Each +// generator is a permutation of the integer range [0, 2n) where n is the number +// of variables of the problem. They are permutations of the (index +// representation of the) problem literals. +void FindLinearBooleanProblemSymmetries( + const LinearBooleanProblem& problem, + std::vector>* generators); + } // namespace sat } // namespace operations_research diff --git a/src/sat/clause.cc b/src/sat/clause.cc index 2ad57b36dc..88306788db 100644 --- a/src/sat/clause.cc +++ b/src/sat/clause.cc @@ -13,7 +13,6 @@ #include "sat/clause.h" #include -#include #include "base/unique_ptr.h" #include #include @@ -87,48 +86,85 @@ bool LiteralWatchers::PropagateOnFalse(Literal false_literal, Trail* trail) { DCHECK(is_clean_); std::vector& watchers = watchers_on_false_[false_literal.Index()]; const VariablesAssignment& assignment = trail->Assignment(); - int new_index = 0; // Note(user): It sounds better to inspect the list in order, this is because // small clauses like binary or ternary clauses will often propagate and thus // stay at the beginning of the list. - const int initial_size = watchers.size(); - for (int i = 0; i < initial_size; ++i) { - ++num_inspected_clauses_; - + std::vector::iterator new_it = watchers.begin(); + for (std::vector::iterator it = watchers.begin(); it != watchers.end(); + ++it) { // Don't even look at the clause memory if the blocking literal is true. - if (assignment.IsLiteralTrue(watchers[i].blocking_literal)) { - watchers[new_index] = watchers[i]; - ++new_index; + if (assignment.IsLiteralTrue(it->blocking_literal)) { + *new_it++ = *it; continue; } - SatClause* clause = watchers[i].clause; - if (!clause->PropagateOnFalse(false_literal, trail)) { - // Conflict: All literals of this clause are false. - memmove(&watchers[new_index], &watchers[i], - (initial_size - i) * sizeof(Watcher)); - watchers.resize(new_index + initial_size - i); - return false; + // If the other watched literal is true, just change the blocking literal. + Literal* literals = it->clause->literals(); + const Literal other_watched_literal = + (literals[1] == false_literal) ? literals[0] : literals[1]; + if (other_watched_literal != it->blocking_literal && + assignment.IsLiteralTrue(other_watched_literal)) { + *new_it++ = Watcher(it->clause, other_watched_literal); + continue; } - // Update the watched literal if clause->FirstLiteral() changed. - // See the contract of PropagateOnFalse(). - if (clause->FirstLiteral() != false_literal) { - AttachOnFalse(clause->FirstLiteral(), clause->SecondLiteral(), clause); + // Look for another literal to watch. + { + int i = 2; + const int size = it->clause->Size(); + while (i < size && assignment.IsLiteralFalse(literals[i])) ++i; + if (i < size) { + // literal[i] is undefined or true, it's now the new literal to watch. + // Note that by convention, we always keep the two watched literals at + // the beginning of the clause. + literals[0] = other_watched_literal; + literals[1] = literals[i]; + literals[i] = false_literal; + AttachOnFalse(literals[1], other_watched_literal, it->clause); + continue; + } + } + + // At this point other_watched_literal is either false or undefined, all + // other literals are false. + if (assignment.IsLiteralFalse(other_watched_literal)) { + // Conflict: All literals of it->clause are false. + trail->SetFailingSatClause( + ClauseRef(it->clause->begin(), it->clause->end()), it->clause); + trail->SetFailingResolutionNode(it->clause->ResolutionNodePointer()); + num_inspected_clauses_ += it - watchers.begin() + 1; + watchers.erase(new_it, it); + return false; } else { - watchers[new_index] = Watcher(clause, clause->SecondLiteral()); - ++new_index; + // Propagation: other_watched_literal is undefined, set it to true and + // put it at position 0. Note that the position 0 is important because + // we will need later to recover the literal that was propagated from the + // clause using this convention. + literals[0] = other_watched_literal; + literals[1] = false_literal; + trail->EnqueueWithSatClauseReason(other_watched_literal, it->clause); + *new_it++ = *it; } } - watchers.resize(new_index); + num_inspected_clauses_ += watchers.size(); + watchers.erase(new_it, watchers.end()); return true; } bool LiteralWatchers::AttachAndPropagate(SatClause* clause, Trail* trail) { SCOPED_TIME_STAT(&stats_); ++num_watched_clauses_; - UpdateStatistics(*clause, /*added=*/true); + // Updating the statistics for each learned clause take quite a lot of time + // (like 6% of the total running time). So for now, we just compute the + // statistics depending on the problem clauses. Note that this do not take + // into account the other type of constraint though. + // + // TODO(user): This is actually not used a lot with the default parameters. + // Find a better way for the "initial" polarity choice and tie breaking + // between literal choices. Note that it seems none of the modern SAT solver + // relies on this. + if (!clause->IsLearned()) UpdateStatistics(*clause, /*added=*/true); clause->SortLiterals(statistics_, parameters_); return clause->AttachAndEnqueuePotentialUnitPropagation(trail, this); } @@ -136,7 +172,7 @@ bool LiteralWatchers::AttachAndPropagate(SatClause* clause, Trail* trail) { void LiteralWatchers::LazyDetach(SatClause* clause) { SCOPED_TIME_STAT(&stats_); --num_watched_clauses_; - UpdateStatistics(*clause, /*added=*/false); + if (!clause->IsLearned()) UpdateStatistics(*clause, /*added=*/false); clause->LazyDetach(); is_clean_ = false; needs_cleaning_[clause->FirstLiteral().Index()] = true; @@ -182,6 +218,7 @@ void BinaryImplicationGraph::AddBinaryClause(Literal a, Literal b) { SCOPED_TIME_STAT(&stats_); implications_[a.Negated().Index()].push_back(b); implications_[b.Negated().Index()].push_back(a); + ++num_implications_; } void BinaryImplicationGraph::AddBinaryConflict(Literal a, Literal b, @@ -225,8 +262,131 @@ bool BinaryImplicationGraph::PropagateOnTrue(Literal true_literal, return true; } -void BinaryImplicationGraph::MinimizeClause(const Trail& trail, - std::vector* conflict) { +// Here, we remove all the literal whose negation are implied by the negation of +// the 1-UIP literal (which always appear first in the given conflict). Note +// that this algorithm is "optimal" in the sense that it leads to a minimized +// conflict with a backjump level as low as possible. However, not all possible +// literals are removed. +void BinaryImplicationGraph::MinimizeConflictWithReachability( + std::vector* conflict) { + SCOPED_TIME_STAT(&stats_); + dfs_stack_.clear(); + + // Compute the reachability from the literal "not(conflict->front())" using + // an iterative dfs. + const LiteralIndex root_literal_index = conflict->front().NegatedIndex(); + is_marked_.ClearAndResize(LiteralIndex(implications_.size())); + is_marked_.Set(root_literal_index); + + // TODO(user): This sounds like a good idea, but somehow it seems better not + // to do that even though it is almost for free. Investigate more. + // + // The idea here is that since we already compute the reachability from the + // root literal, we can use this computation to remove any implication + // root_literal => b if there is already root_literal => a and b is reachable + // from a. + const bool also_prune_direct_implication_list = false; + + // We treat the direct implications differently so we can also remove the + // redundant implications from this list at the same time. + std::vector& direct_implications = implications_[root_literal_index]; + for (const Literal l : direct_implications) { + if (is_marked_[l.Index()]) continue; + dfs_stack_.push_back(l); + while (!dfs_stack_.empty()) { + const LiteralIndex index = dfs_stack_.back().Index(); + dfs_stack_.pop_back(); + if (!is_marked_[index]) { + is_marked_.Set(index); + for (Literal implied : implications_[index]) { + if (!is_marked_[implied.Index()]) dfs_stack_.push_back(implied); + } + } + } + + // The "trick" is to unmark 'l'. This way, if we explore it twice, it means + // that this l is reachable from some other 'l' from the direct implication + // list. Remarks: + // - We don't loose too much complexity when this happen since a literal + // can be unmarked only once, so in the worst case we loop twice over its + // children. Moreover, this literal will be pruned for later calls. + // - This is correct, i.e. we can't prune too many literals because of a + // strongly connected component. Proof by contradiction: If we take the + // first (in direct_implications) literal from a removed SCC, it must + // have marked all the others. But because they are marked, they will not + // be explored again and so can't mark the first literal. + if (also_prune_direct_implication_list) { + is_marked_.Clear(l.Index()); + } + } + + // Now we can prune the direct implications list and make sure are the + // literals there are marked. + if (also_prune_direct_implication_list) { + int new_size = 0; + for (const Literal l : direct_implications) { + if (!is_marked_[l.Index()]) { + is_marked_.Set(l.Index()); + direct_implications[new_size] = l; + ++new_size; + } + } + if (new_size < direct_implications.size()) { + num_redundant_implications_ += direct_implications.size() - new_size; + direct_implications.resize(new_size); + } + } + + RemoveRedundantLiterals(conflict); +} + +// Same as MinimizeConflictWithReachability() but also mark (in the given +// SparseBitset) the reachable literal already assigned to false. These literals +// will be implied if the 1-UIP literal is assigned to false, and the classic +// minimization algorithm can take advantage of that. +void BinaryImplicationGraph::MinimizeConflictFirst( + const Trail& trail, std::vector* conflict, + SparseBitset* marked) { + SCOPED_TIME_STAT(&stats_); + is_marked_.ClearAndResize(LiteralIndex(implications_.size())); + dfs_stack_.clear(); + dfs_stack_.push_back(conflict->front().Negated()); + while (!dfs_stack_.empty()) { + const Literal literal = dfs_stack_.back(); + dfs_stack_.pop_back(); + if (!is_marked_[literal.Index()]) { + is_marked_.Set(literal.Index()); + // If the literal is assigned to false, we mark it. + if (trail.Assignment().IsLiteralFalse(literal)) { + marked->Set(literal.Variable()); + } + for (Literal implied : implications_[literal.Index()]) { + if (!is_marked_[implied.Index()]) dfs_stack_.push_back(implied); + } + } + } + RemoveRedundantLiterals(conflict); +} + +void BinaryImplicationGraph::RemoveRedundantLiterals( + std::vector* conflict) { + SCOPED_TIME_STAT(&stats_); + int new_index = 1; + for (int i = 1; i < conflict->size(); ++i) { + if (!is_marked_[(*conflict)[i].NegatedIndex()]) { + (*conflict)[new_index] = (*conflict)[i]; + ++new_index; + } + } + if (new_index < conflict->size()) { + ++num_minimization_; + num_literals_removed_ += conflict->size() - new_index; + conflict->resize(new_index); + } +} + +void BinaryImplicationGraph::MinimizeConflictExperimental( + const Trail& trail, std::vector* conflict) { SCOPED_TIME_STAT(&stats_); is_marked_.ClearAndResize(LiteralIndex(implications_.size())); is_removed_.ClearAndResize(LiteralIndex(implications_.size())); @@ -243,10 +403,11 @@ void BinaryImplicationGraph::MinimizeClause(const Trail& trail, // cycle. Note that this is not optimal in the sense that we may not remove // a literal that can be removed. // - // TODO(user): no need to explore the unique literal of the current decision - // level since it can't be removed. - int index = 0; - for (int i = 0; i < conflict->size(); ++i) { + // Note that there is no need to explore the unique literal of the highest + // decision level since it can't be removed. Because this is a conflict, such + // literal is always at position 0, so we start directly at 1. + int index = 1; + for (int i = 1; i < conflict->size(); ++i) { const Literal lit = (*conflict)[i]; const int lit_level = trail.Info(lit.Variable()).level; bool keep_literal = true; @@ -374,7 +535,7 @@ bool LiteralWithLargerWeightFirst(const WeightedLiteral& wv1, void SatClause::SortLiterals( const ITIVector& statistics, const SatParameters& parameters) { - CHECK(!IsAttached()); + DCHECK(!IsAttached()); const SatParameters::LiteralOrdering literal_order = parameters.literal_ordering(); if (literal_order != SatParameters::LITERAL_IN_ORDER) { @@ -425,7 +586,7 @@ bool SatClause::AttachAndEnqueuePotentialUnitPropagation( if (num_literal_not_false == 1) { // To maintain the validity of the 2-watcher algorithm, we need to watch - // the false literal with the highest decision levels. + // the false literal with the highest decision level. int max_level = trail->Info(literals_[1].Variable()).level; for (int i = 2; i < size_; ++i) { const int level = trail->Info(literals_[i].Variable()).level; @@ -435,11 +596,9 @@ bool SatClause::AttachAndEnqueuePotentialUnitPropagation( } } - // If there is a propagation, make literals_[1] the propagated literal and - // enqueue it. + // Propagates literals_[0] if it is undefined. if (!trail->Assignment().IsLiteralTrue(literals_[0])) { - std::swap(literals_[0], literals_[1]); - trail->EnqueueWithSatClauseReason(literals_[1], this); + trail->EnqueueWithSatClauseReason(literals_[0], this); } } @@ -450,49 +609,6 @@ bool SatClause::AttachAndEnqueuePotentialUnitPropagation( return true; } -// Propagates one watched literal becoming false. This method maintains the -// invariant that watched literals are always in position 0 and 1. -bool SatClause::PropagateOnFalse(Literal watched_literal, Trail* trail) { - const VariablesAssignment& assignment = trail->Assignment(); - DCHECK(IsAttached()); - DCHECK_GE(size_, 2); - DCHECK(assignment.IsLiteralFalse(watched_literal)); - - // The instantiated literal should be in position 0. - if (literals_[1] == watched_literal) { - literals_[1] = literals_[0]; - literals_[0] = watched_literal; - } - DCHECK_EQ(literals_[0], watched_literal); - - // If the other watched literal is true, do nothing. - if (assignment.IsLiteralTrue(literals_[1])) return true; - - for (int i = 2; i < size_; ++i) { - if (assignment.IsLiteralFalse(literals_[i])) continue; - - // Note(user): If the value of literals_[i] is true, it is possible to leave - // the watched literal unchanged. However this seems less efficient. Even if - // we swap it with the literal at position 2 to speed up future checks. - - // literal[i] is undefined or true, it's now the new literal to watch. - literals_[0] = literals_[i]; - literals_[i] = watched_literal; - return true; - } - - // Literals_[1] is either false or undefined, all other literals are false. - if (assignment.IsLiteralFalse(literals_[1])) { - trail->SetFailingSatClause(ToClauseRef(), this); - trail->SetFailingResolutionNode(resolution_node_); - return false; - } - - // Literals_[1] is undefined, set it to true. - trail->EnqueueWithSatClauseReason(literals_[1], this); - return true; -} - bool SatClause::IsSatisfied(const VariablesAssignment& assignment) const { for (const Literal literal : *this) { if (assignment.IsLiteralTrue(literal)) return true; diff --git a/src/sat/clause.h b/src/sat/clause.h index f190edbcaa..3871e9feb9 100644 --- a/src/sat/clause.h +++ b/src/sat/clause.h @@ -23,7 +23,6 @@ #include "base/integral_types.h" #include "base/logging.h" -#include "base/scoped_ptr.h" #include "base/stringprintf.h" #include "base/timer.h" #include "base/int_type_indexed_vector.h" @@ -74,29 +73,31 @@ class SatClause { // Allows for range based iteration: for (Literal literal : clause) {}. const Literal* const begin() const { return &(literals_[0]); } const Literal* const end() const { return &(literals_[size_]); } - - // Returns a ClauseRef that point to this clause. - ClauseRef ToClauseRef() const { return ClauseRef(begin(), end()); } + Literal* literals() { return &(literals_[0]); } // Returns the first and second literals. These are always the watched // literals if the clause is attached in the LiteralWatchers. Literal FirstLiteral() const { return literals_[0]; } Literal SecondLiteral() const { return literals_[1]; } + // Returns the literal that was propagated to true. This only works for a + // clause that just propagated this literal. Otherwise, this will just returns + // a literal of the clause. + Literal PropagatedLiteral() const { return literals_[0]; } + + // Returns the reason for the last unit propagation of this clause. The + // preconditions are the same as for PropagatedLiteral(). + ClauseRef PropagationReason() const { + // Note that we don't need to include the propagated literal. + return ClauseRef(&(literals_[1]), end()); + } + // Removes literals that are fixed. This should only be called at level 0 // where a literal is fixed iff it is assigned. Aborts and returns true if // they are not all false. bool RemoveFixedLiteralsAndTestIfTrue(const VariablesAssignment& assignment, std::vector* removed_literals); - // Propagates watched_literal which just became false in the clause. Returns - // false if an inconsistency was detected. - // - // IMPORTANT: If a new literal needs watching instead, then FirstLiteral() - // will be the new watched literal, otherwise it will be equal to the given - // watched_literal. - bool PropagateOnFalse(Literal watched_literal, Trail* trail); - // True if the clause is learned. bool IsLearned() const { return is_learned_; } @@ -145,7 +146,7 @@ class SatClause { private: // The data is packed so that only 16 bytes are used for these fields. // Note that the max lbd is the maximum depth of the search tree (decision - // levels), so it should fit easily in 29 bits. Note that we can also upper + // levels), so it should fit easily in 30 bits. Note that we can also upper // bound it without hurting too much the clause cleaning heuristic. bool is_learned_ : 1; bool is_attached_ : 1; @@ -155,7 +156,8 @@ class SatClause { // This is only needed when the parameter unsat_proof() is true. // TODO(user): It is possible to use less memory when this is not the case - // by some tweaks in Create() and in the way we access it. + // by some tweaks in Create() and in the way we access it. Note that not + // storing this seems to gain about 2% in speed overall. ResolutionNode* resolution_node_; // This class store the literals inline, and literals_ mark the starts of the @@ -276,12 +278,17 @@ class LiteralWatchers { class BinaryImplicationGraph { public: BinaryImplicationGraph() - : num_propagations_(0), + : num_implications_(0), + num_propagations_(0), num_minimization_(0), num_literals_removed_(0), + num_redundant_implications_(0), stats_("BinaryImplicationGraph") {} ~BinaryImplicationGraph() { - IF_STATS_ENABLED(LOG(INFO) << stats_.StatString()); + IF_STATS_ENABLED({ + LOG(INFO) << stats_.StatString(); + LOG(INFO) << "num_redundant_implications " << num_redundant_implications_; + }); } // Resizes the data structure. @@ -300,12 +307,19 @@ class BinaryImplicationGraph { // This calls trail->Enqueue() on the newly assigned literals. bool PropagateOnTrue(Literal true_literal, Trail* trail); - // Uses the binary implication graph to minimize the given clause by removing - // literals that implies others. + // Uses the binary implication graph to minimize the given conflict by + // removing literals that implies others. The idea is that if a and b are two + // literals from the given conflict and a => b (which is the same as not(b) => + // not(a)) then a is redundant and can be removed. // - // TODO(user): The current algorithm is minimalist, and just look at direct - // implication. Investigate recursive version. - void MinimizeClause(const Trail& trail, std::vector* clause); + // Note that removing as many literals as possible is too time consuming, so + // we use different heuristics/algorithms to do this minimization. + // See the binary_minimization_algorithm SAT parameter and the .cc for more + // details about the different algorithms. + void MinimizeConflictWithReachability(std::vector* c); + void MinimizeConflictExperimental(const Trail& trail, std::vector* c); + void MinimizeConflictFirst(const Trail& trail, std::vector* c, + SparseBitset* marked); // This must only be called at decision level 0 after all the possible // propagations. It: @@ -321,16 +335,16 @@ class BinaryImplicationGraph { int64 num_literals_removed() const { return num_literals_removed_; } // Returns the number of current implications. - int64 NumberOfImplications() const { - int num = 0; - for (const std::vector& v : implications_) num += v.size(); - return num / 2; - } + int64 NumberOfImplications() const { return num_implications_; } private: + // Remove any literal whose negation is marked (except the first one). + void RemoveRedundantLiterals(std::vector* conflict); + // This is indexed by the Index() of a literal. Each list stores the // literals that are implied if the index literal becomes true. ITIVector > implications_; + int64 num_implications_; // Holds the last conflicting binary clause. Literal temporary_clause_[2]; @@ -339,6 +353,7 @@ class BinaryImplicationGraph { int64 num_propagations_; int64 num_minimization_; int64 num_literals_removed_; + int64 num_redundant_implications_; // Bitset used by MinimizeClause(). // TODO(user): use the same one as the one used in the classic minimization @@ -347,6 +362,9 @@ class BinaryImplicationGraph { SparseBitset is_marked_; SparseBitset is_removed_; + // Temporary stack used by MinimizeClauseWithReachability(). + std::vector dfs_stack_; + mutable StatsGroup stats_; DISALLOW_COPY_AND_ASSIGN(BinaryImplicationGraph); }; diff --git a/src/sat/pb_constraint.cc b/src/sat/pb_constraint.cc index 1ce12e64e4..d80ae25042 100644 --- a/src/sat/pb_constraint.cc +++ b/src/sat/pb_constraint.cc @@ -32,8 +32,9 @@ bool CoeffComparator(const LiteralWithCoeff& a, const LiteralWithCoeff& b) { } // namespace -bool PbCannonicalForm(std::vector* cst, Coefficient* bound_shift, - Coefficient* max_value) { +bool ComputeBooleanLinearExpressionCanonicalForm(std::vector* cst, + Coefficient* bound_shift, + Coefficient* max_value) { *bound_shift = 0; *max_value = 0; @@ -88,7 +89,7 @@ bool PbCannonicalForm(std::vector* cst, Coefficient* bound_shi } // TODO(user): Also check for no duplicates literals + unit tests. -bool LinearConstraintIsCannonical(const std::vector& cst) { +bool BooleanLinearExpressionIsCanonical(const std::vector& cst) { Coefficient previous = 1; for (LiteralWithCoeff term : cst) { if (term.coefficient < previous) return false; @@ -97,6 +98,59 @@ bool LinearConstraintIsCannonical(const std::vector& cst) { return true; } +// TODO(user): Use more complex simplification like dividing by the gcd of +// everyone and using less different coefficients if possible. +void SimplifyCanonicalBooleanLinearConstraint(std::vector* cst, + Coefficient* rhs) { + // Replace all coefficient >= rhs by rhs + 1 (these literal must actually be + // false). Note that the linear sum of literals remains canonical. + // + // TODO(user): It is probably better to remove these literals and have other + // constraint setting them to false from the symmetry finder perspective. + for (LiteralWithCoeff& x : *cst) { + if (x.coefficient > *rhs) x.coefficient = *rhs + 1; + } +} + +bool CanonicalBooleanLinearProblem::AddLinearConstraint( + bool use_lower_bound, Coefficient lower_bound, bool use_upper_bound, + Coefficient upper_bound, std::vector* cst) { + // Cannonicalize the linear expresion of the constraint. + Coefficient bound_shift; + Coefficient max_value; + if (!ComputeBooleanLinearExpressionCanonicalForm(cst, &bound_shift, + &max_value)) { + return false; + } + if (use_upper_bound) { + Coefficient ub = upper_bound; + if (!SafeAddInto(bound_shift, &ub)) return false; + if (!AddConstraint(*cst, max_value, ub)) return false; + } + if (use_lower_bound) { + // We transform the constraint into an upper-bounded one. + for (int i = 0; i < cst->size(); ++i) { + (*cst)[i].literal = (*cst)[i].literal.Negated(); + } + Coefficient ub = max_value; + if (!SafeAddInto(-lower_bound, &ub)) return false; + if (!SafeAddInto(-bound_shift, &ub)) return false; + if (!AddConstraint(*cst, max_value, ub)) return false; + } + return true; +} + +bool CanonicalBooleanLinearProblem::AddConstraint( + const std::vector& cst, Coefficient max_value, + Coefficient rhs) { + if (rhs < 0) return false; // Trivially unsatisfiable. + if (rhs >= max_value) return true; // Trivially satisfiable. + constraints_.emplace_back(cst.begin(), cst.end()); + rhs_.push_back(rhs); + SimplifyCanonicalBooleanLinearConstraint(&constraints_.back(), &rhs_.back()); + return true; +} + UpperBoundedLinearConstraint::UpperBoundedLinearConstraint( const std::vector& cst, ResolutionNode* node) : node_(node) { @@ -290,7 +344,7 @@ bool PbConstraints::AddConstraint(const std::vector& cst, constraints_.back().ChangeResolutionNode(node); return constraints_.back().InitializeRhs(rhs, propagation_trail_index_, &slacks_.back(), trail_, - &reason_scratchpad_); + &conflict_scratchpad_); } else { // The constraint is redundant, so there is nothing to do. return true; @@ -302,7 +356,7 @@ bool PbConstraints::AddConstraint(const std::vector& cst, slacks_.push_back(0); if (!constraints_.back().InitializeRhs(rhs, propagation_trail_index_, &slacks_.back(), trail_, - &reason_scratchpad_)) { + &conflict_scratchpad_)) { return false; } for (LiteralWithCoeff term : cst) { @@ -332,8 +386,6 @@ bool PbConstraints::PropagateNext() { if (slack < 0 && !conflict) { update.need_untrail_inspection = true; ++num_constraint_lookups_; - // Important: we must use the conflict_scratchpad_ here not the - // reason_scratchpad_. if (!constraints_[update.index.value()].Propagate( order, &slacks_[update.index], trail_, &conflict_scratchpad_)) { trail_->SetFailingClause(ClauseRef(conflict_scratchpad_)); diff --git a/src/sat/pb_constraint.h b/src/sat/pb_constraint.h index cded77d1e4..af67f85b6b 100644 --- a/src/sat/pb_constraint.h +++ b/src/sat/pb_constraint.h @@ -42,7 +42,7 @@ inline std::ostream& operator<<(std::ostream& os, LiteralWithCoeff term) { return os; } -// Puts the given pseudo-Boolean formula in cannonical form: +// Puts the given Boolean linear expression in canonical form: // - Merge all the literal corresponding to the same variable. // - Remove zero coefficients. // - Make all the coefficients positive. @@ -54,15 +54,63 @@ inline std::ostream& operator<<(std::ostream& os, LiteralWithCoeff term) { // initial pseudo-Boolean constraint was // lhs < initial_pb_formula < rhs // then the new one is: -// lhs + bound_shift < cannonical_form < rhs + bound_shift +// lhs + bound_shift < canonical_form < rhs + bound_shift // -// Finaly, this will return false, if some integer overflow or underflow occured -// during the reduction to the cannonical form. -bool PbCannonicalForm(std::vector* cst, Coefficient* bound_shift, - Coefficient* max_value); +// Finally, this will return false, if some integer overflow or underflow +// occured during the reduction to the canonical form. +bool ComputeBooleanLinearExpressionCanonicalForm(std::vector* cst, + Coefficient* bound_shift, + Coefficient* max_value); -// Returns true iff the linear constraint is in cannonical form. -bool LinearConstraintIsCannonical(const std::vector& cst); +// Returns true iff the Boolean linear expression is in canonical form. +bool BooleanLinearExpressionIsCanonical(const std::vector& cst); + +// Given a Boolean linear constraint in canonical form, simplify its +// coefficients using simple heuristics. +void SimplifyCanonicalBooleanLinearConstraint(std::vector* cst, + Coefficient* rhs); + +// Holds a set of boolean linear constraints in canonical form: +// - The constraint is a linear sum of LiteralWithCoeff <= rhs. +// - The linear sum satisfies the properties described in +// ComputeBooleanLinearExpressionCanonicalForm(). +// +// TODO(user): Simplify further the constraints. +// +// TODO(user): Remove the duplication between this and what the sat solver +// is doing in AddLinearConstraint() which is basically the same. +// +// TODO(user): Remove duplicate constraints? some problems have them, and +// this is not ideal for the symmetry computation since it leads to a lot of +// symmetries of the associated graph that are not useful. +class CanonicalBooleanLinearProblem { + public: + CanonicalBooleanLinearProblem() {} + + // Adds a new constraint to the problem. The bounds are inclusive. + // Returns false in case of a possible overflow or if the constraint is + // never satisfiable. + // + // TODO(user): Use a return status to distinguish errors if needed. + bool AddLinearConstraint(bool use_lower_bound, Coefficient lower_bound, + bool use_upper_bound, Coefficient upper_bound, + std::vector* cst); + + // Getters. All the constraints are guaranteed to be in canonical form. + int NumConstraints() const { return constraints_.size(); } + const Coefficient Rhs(int i) const { return rhs_[i]; } + const std::vector& Constraint(int i) const { + return constraints_[i]; + } + + private: + bool AddConstraint(const std::vector& cst, Coefficient max_value, + Coefficient rhs); + + std::vector rhs_; + std::vector> constraints_; + DISALLOW_COPY_AND_ASSIGN(CanonicalBooleanLinearProblem); +}; // This class contains "half" the propagation logic for a constraint of the form // sum ci * li <= rhs, ci positive coefficients, li literals. @@ -211,15 +259,13 @@ class PbConstraints { // equals to the given on are not processed for propagation. void Untrail(int trail_index); - // Returns the reason for the given variable assignement. + // Computes the reason for the given variable assignement. // Note that this should only be called if the reason type is PB_PROPAGATION. - const std::vector& ReasonFor(VariableIndex var) const { + void ReasonFor(VariableIndex var, std::vector* reason) const { SCOPED_TIME_STAT(&stats_); const AssignmentInfo& info = trail_->Info(var); DCHECK_EQ(info.type, AssignmentInfo::PB_PROPAGATION); - info.pb_constraint->FillReason(*trail_, info.source_trail_index, - &reason_scratchpad_); - return reason_scratchpad_; + info.pb_constraint->FillReason(*trail_, info.source_trail_index, reason); } private: @@ -242,11 +288,7 @@ class PbConstraints { // processed by this class. int propagation_trail_index_; - // Temporary vector to hold the reason of a pseudo-Boolean propagation. - // Important: the conflict must use another vector since these scratchpads - // must remain valid as long as they are needed by the sat solver and we do - // need to compute reasons and not overwrite the conflict. - mutable std::vector reason_scratchpad_; + // Temporary vector to hold the last conflict of a pseudo-Boolean propagation. mutable std::vector conflict_scratchpad_; // We use a dequeue to store the pseudo-Boolean constraint because we want @@ -298,13 +340,8 @@ class PbReasonCache { const AssignmentInfo& info = trail_.Info(var); return std::make_pair(info.pb_constraint, info.source_trail_index); } -#if defined(_MSC_VER) - hash_map, - VariableIndex, - PairPointerIntHasher > map_; -#else + hash_map, VariableIndex> map_; -#endif DISALLOW_COPY_AND_ASSIGN(PbReasonCache); }; diff --git a/src/sat/sat_base.h b/src/sat/sat_base.h index 4c91a16f87..3b6e40feeb 100644 --- a/src/sat/sat_base.h +++ b/src/sat/sat_base.h @@ -79,6 +79,10 @@ class Literal { bool operator==(Literal other) const { return index_ == other.index_; } bool operator!=(Literal other) const { return index_ != other.index_; } + bool operator<(const Literal& literal) const { + return Index() < literal.Index(); + } + private: int index_; }; @@ -216,19 +220,30 @@ struct AssignmentInfo { CLAUSE_PROPAGATION, BINARY_PROPAGATION, PB_PROPAGATION, + SYMMETRY_PROPAGATION, }; - Type type; + Type type : 3; // The decision level at which this assignment was made. This starts at 0 and // increases each time the solver takes a SEARCH_DECISION. - int level; + int level : 29; // The index of this assignment in the trail. int trail_index; -// Some data about this assignment used to compute the reason clause when it -// becomes needed. Note that depending on the type, these fields will not be -// used and be left uninitialized. + // The rest of this struct contains data about this assignment used to compute + // the reason clause when it becomes needed. Note that depending on the type, + // some fields will not be used and left uninitialized. We use unions to gain + // a bit of memory. + + union { + SatClause* sat_clause; + ResolutionNode* resolution_node; + UpperBoundedLinearConstraint* pb_constraint; + int symmetry_index; + }; + +// Visual C++ has a problem with a Literal inside an union. #if defined(_MSC_VER) struct { #else @@ -237,11 +252,6 @@ struct AssignmentInfo { Literal literal; int source_trail_index; }; - union { - SatClause* sat_clause; - ResolutionNode* resolution_node; - UpperBoundedLinearConstraint* pb_constraint; - }; }; // The solver trail stores the assignement made by the solver in order. @@ -292,6 +302,12 @@ class Trail { current_info_.pb_constraint = cst; Enqueue(true_literal, AssignmentInfo::PB_PROPAGATION); } + void EnqueueWithSymmetricReason(Literal true_literal, int source_trail_index, + int symmetry_index) { + current_info_.source_trail_index = source_trail_index; + current_info_.symmetry_index = symmetry_index; + Enqueue(true_literal, AssignmentInfo::SYMMETRY_PROPAGATION); + } // Dequeues the last assigned literal and returns it. // Note that we do not touch it assignement info. diff --git a/src/sat/sat_parameters.proto b/src/sat/sat_parameters.proto index 72705122c5..d5ca67bcce 100644 --- a/src/sat/sat_parameters.proto +++ b/src/sat/sat_parameters.proto @@ -149,7 +149,14 @@ message SatParameters { // Whether to expoit the binary clause to minimize learned clauses further. // This will have an effect only if treat_binary_clauses_separately is true. - optional bool use_binary_clauses_minimization = 34 [default = true]; + enum BinaryMinizationAlgorithm { + NO_BINARY_MINIMIZATION = 0; + BINARY_MINIMIZATION_FIRST = 1; + BINARY_MINIMIZATION_WITH_REACHABILITY = 2; + EXPERIMENTAL_BINARY_MINIMIZATION = 3; + } + optional BinaryMinizationAlgorithm binary_minimization_algorithm = 34 + [default = BINARY_MINIMIZATION_FIRST]; // For an optimization problem, whether we follow some hints in order to find // a better first solution. For a variable with hint, the solver will always diff --git a/src/sat/sat_solver.cc b/src/sat/sat_solver.cc index d71a1385bd..3008fd222f 100644 --- a/src/sat/sat_solver.cc +++ b/src/sat/sat_solver.cc @@ -32,6 +32,7 @@ SatSolver::SatSolver() : num_variables_(0), num_constraints_(0), pb_constraints_(&trail_), + symmetry_propagator_(&trail_), current_decision_level_(0), propagation_trail_index_(0), binary_propagation_trail_index_(0), @@ -84,6 +85,7 @@ void SatSolver::SetNumVariables(int num_variables) { watched_clauses_.Resize(num_variables); trail_.Resize(num_variables); pb_constraints_.Resize(num_variables); + symmetry_propagator_.Resize(num_variables); queue_elements_.resize(num_variables); activities_.resize(num_variables, 0.0); objective_weights_.resize(num_variables << 1, 0.0); @@ -138,6 +140,7 @@ bool SatSolver::AddUnitClause(Literal true_literal) { if (trail_.Assignment().IsLiteralTrue(true_literal)) return true; trail_.EnqueueWithUnitReason(true_literal, CreateRootResolutionNode()); ++num_constraints_; + if (!Propagate()) return ModelUnsat(); return true; } @@ -177,7 +180,6 @@ bool SatSolver::AddProblemClauseInternal(const std::vector& literals, trail_.EnqueueWithUnitReason(literals[0], node); // Not assigned. return true; } - // Create a new clause. std::unique_ptr clause( SatClause::Create(literals, SatClause::PROBLEM_CLAUSE, node)); @@ -198,7 +200,7 @@ bool SatSolver::AddLinearConstraintInternal(const std::vector& Coefficient rhs, Coefficient max_value) { SCOPED_TIME_STAT(&stats_); - DCHECK(LinearConstraintIsCannonical(cst)); + DCHECK(BooleanLinearExpressionIsCanonical(cst)); if (rhs < 0) return ModelUnsat(); // Unsatisfiable constraint. if (rhs >= max_value) return true; // Always satisfied constraint. @@ -257,10 +259,11 @@ bool SatSolver::AddLinearConstraint(bool use_lower_bound, cst->resize(index); } - // Cannonicalize the constraint. + // Canonicalize the constraint. Coefficient bound_shift; Coefficient max_value; - CHECK(PbCannonicalForm(cst, &bound_shift, &max_value)); + CHECK(ComputeBooleanLinearExpressionCanonicalForm(cst, &bound_shift, + &max_value)); CHECK(SafeAddInto(fixed_variable_shift, &bound_shift)); if (use_upper_bound) { @@ -279,6 +282,7 @@ bool SatSolver::AddLinearConstraint(bool use_lower_bound, if (!AddLinearConstraintInternal(*cst, ub, max_value)) return ModelUnsat(); } ++num_constraints_; + if (!Propagate()) return ModelUnsat(); return true; } @@ -310,17 +314,6 @@ void SatSolver::AddLearnedClauseAndEnqueueUnitPropagation( } } -bool SatSolver::InitialPropagation() { - SCOPED_TIME_STAT(&stats_); - CHECK_EQ(CurrentDecisionLevel(), 0); - if (!Propagate()) { - return ModelUnsat(); - } - ProcessNewlyFixedVariableResolutionNodes(); - ProcessNewlyFixedVariables(); - return true; -} - int SatSolver::EnqueueDecisionAndBackjumpOnConflict(Literal true_literal) { SCOPED_TIME_STAT(&stats_); CHECK_EQ(propagation_trail_index_, trail_.Index()); @@ -342,26 +335,26 @@ int SatSolver::EnqueueDecisionAndBackjumpOnConflict(Literal true_literal) { int first_propagation_index = trail_.Index(); NewDecision(true_literal); while (!Propagate()) { - // If Propagate() fail at level 0, then the problem is UNSAT. - if (CurrentDecisionLevel() == 0) { - is_model_unsat_ = true; - return kUnsatTrailIndex; - } reason_cache_.Clear(); // A conflict occured, compute a nice reason for this failure. ComputeFirstUIPConflict(trail_.FailingClause(), &learned_conflict_, &reason_used_to_infer_the_conflict_); + + // An empty conflict means that the problem is UNSAT. + if (learned_conflict_.empty()) { + is_model_unsat_ = true; + return kUnsatTrailIndex; + } DCHECK(IsConflictValid(learned_conflict_)); // Update the activity of all the variables in the first UIP clause. // Also update the activity of the last level variables expanded (and // thus discarded) during the first UIP computation. Note that both // sets are disjoint. - const int initial_lbd = ComputeLbd(learned_conflict_); const int lbd_limit = parameters_.use_lbd() && parameters_.use_glucose_bump_again_strategy() - ? initial_lbd + ? ComputeLbd(learned_conflict_) : 0; BumpVariableActivities(learned_conflict_, lbd_limit); BumpVariableActivities(reason_used_to_infer_the_conflict_, lbd_limit); @@ -374,16 +367,47 @@ int SatSolver::EnqueueDecisionAndBackjumpOnConflict(Literal true_literal) { } BumpReasonActivities(reason_used_to_infer_the_conflict_); + // Minimizing the conflict with binary clauses first has two advantages. + // First, there is no need to compute a reason for the variables eliminated + // this way. Second, more variables may be marked (in is_marked_) and + // MinimizeConflict() can take advantage of that. + if (binary_implication_graph_.NumberOfImplications() != 0 && + parameters_.binary_minimization_algorithm() == + SatParameters::BINARY_MINIMIZATION_FIRST) { + binary_implication_graph_.MinimizeConflictFirst( + trail_, &learned_conflict_, &is_marked_); + DCHECK(IsConflictValid(learned_conflict_)); + } + // Minimize the learned conflict. +#ifndef NDEBUG + const int initial_lbd = ComputeLbd(learned_conflict_); +#endif MinimizeConflict(&learned_conflict_, &reason_used_to_infer_the_conflict_); +#ifndef NDEBUG DCHECK(IsConflictValid(learned_conflict_)); DCHECK_EQ(initial_lbd, ComputeLbd(learned_conflict_)); - if (parameters_.treat_binary_clauses_separately() && - parameters_.use_binary_clauses_minimization()) { +#endif + + // Minimize it further with binary clauses? + if (binary_implication_graph_.NumberOfImplications() != 0) { // Note that on the contrary to the MinimizeConflict() above that // just uses the reason graph, this minimization can change the // clause LBD and even the backtracking level. - binary_implication_graph_.MinimizeClause(trail_, &learned_conflict_); + switch (parameters_.binary_minimization_algorithm()) { + case SatParameters::NO_BINARY_MINIMIZATION: + FALLTHROUGH_INTENDED; + case SatParameters::BINARY_MINIMIZATION_FIRST: + break; + case SatParameters::BINARY_MINIMIZATION_WITH_REACHABILITY: + binary_implication_graph_.MinimizeConflictWithReachability( + &learned_conflict_); + break; + case SatParameters::EXPERIMENTAL_BINARY_MINIMIZATION: + binary_implication_graph_.MinimizeConflictExperimental( + trail_, &learned_conflict_); + break; + } DCHECK(IsConflictValid(learned_conflict_)); } @@ -455,8 +479,14 @@ int SatSolver::EnqueueDecisionAndBacktrackOnConflict(Literal true_literal) { void SatSolver::Backtrack(int target_level) { SCOPED_TIME_STAT(&stats_); - DCHECK_GT(CurrentDecisionLevel(), 0); + // Do nothing if the CurrentDecisionLevel() is already correct. + // This is needed, otherwise target_trail_index below will remain at zero and + // that will cause some problems. Note that we could forbid an user to call + // Backtrack() with the current level, but that is annoying when you just + // want to reset the solver with Backtrack(0). + if (CurrentDecisionLevel() == target_level) return; DCHECK_GE(target_level, 0); + DCHECK_LE(target_level, CurrentDecisionLevel()); ++counters_.num_failures; int target_trail_index = 0; while (current_decision_level_ > target_level) { @@ -474,6 +504,16 @@ int NextMultipleOf(int64 value, int64 interval) { } } // namespace +void SatSolver::SetAssignmentPreference(Literal literal, double weight) { + SCOPED_TIME_STAT(&stats_); + if (!parameters_.use_optimization_hints()) return; + DCHECK_GE(weight, 0.0); + DCHECK_LE(weight, 1.0); + queue_elements_[literal.Variable()].tie_breaker = weight; + objective_weights_[literal.Index()] = 0.0; + objective_weights_[literal.NegatedIndex()] = 1.0; +} + SatSolver::Status SatSolver::Solve() { SCOPED_TIME_STAT(&stats_); if (is_model_unsat_) return MODEL_UNSAT; @@ -488,20 +528,8 @@ SatSolver::Status SatSolver::Solve() { << binary_implication_graph_.NumberOfImplications(); LOG(INFO) << "Number of linear constraints: " << pb_constraints_.NumberOfConstraints(); - LOG(INFO) << "Number of initial fixed variables: " << trail_.Index(); - } - - // Performs the initial propagation if some variables are fixed. - if (CurrentDecisionLevel() == 0 && !InitialPropagation()) { - if (parameters_.log_search_progress()) { - LOG(INFO) << "UNSAT (initial propagation) \n" << StatusString(); - } - return MODEL_UNSAT; - } - if (parameters_.log_search_progress()) { - LOG(INFO) << "Number of assigned variables after initial propagation: " - << trail_.Index(); - LOG(INFO) << "Number of initial watched clauses: " + LOG(INFO) << "Number of fixed variables: " << trail_.Index(); + LOG(INFO) << "Number of watched clauses: " << watched_clauses_.num_watched_clauses(); LOG(INFO) << "Parameters: " << parameters_.ShortDebugString(); } @@ -578,10 +606,6 @@ SatSolver::Status SatSolver::Solve() { LOG(INFO) << RunningStatisticsString(); LOG(INFO) << "SAT \n" << StatusString(); } - if (!IsAssignmentValid(trail_.Assignment())) { - LOG(ERROR) << "Something is wrong, the computed model is not true"; - return INTERNAL_ERROR; - } return MODEL_SAT; } @@ -591,7 +615,7 @@ SatSolver::Status SatSolver::Solve() { Backtrack(assumption_level); } - // Choose the next decision variable and propagate it. + // Choose the next decision variable. Literal next_branch = NextBranch(); // Note that if use_optimization_hints() is false, then objective_weights_ @@ -705,17 +729,11 @@ void SatSolver::UpdateClauseActivityIncrement() { bool SatSolver::IsConflictValid(const std::vector& literals) { SCOPED_TIME_STAT(&stats_); if (literals.empty()) return false; - const int current_level = CurrentDecisionLevel(); - int num_literal_at_current_level = 0; - for (const Literal literal : literals) { - const int level = DecisionLevel(literal.Variable()); - if (level <= 0 || level > current_level) return false; - if (level == current_level) { - ++num_literal_at_current_level; - if (num_literal_at_current_level > 1) return false; - } + const int highest_level = DecisionLevel(literals[0].Variable()); + for (int i = 1; i < literals.size(); ++i) { + const int level = DecisionLevel(literals[i].Variable()); + if (level <= 0 || level >= highest_level) return false; } - if (num_literal_at_current_level != 1) return false; return true; } @@ -723,27 +741,23 @@ int SatSolver::ComputeBacktrackLevel(const std::vector& literals) { SCOPED_TIME_STAT(&stats_); DCHECK_GT(CurrentDecisionLevel(), 0); - // Note(user): if the learned clause is of size 1, we backtack all the way to + // We want the highest decision level among literals other than the first one. + // Note that this level will always be smaller than that of the first literal. + // + // Note(user): if the learned clause is of size 1, we backtrack all the way to // the beginning. It may be possible to follow another behavior, but then the // code require some special cases in // AddLearnedClauseAndEnqueueUnitPropagation() to fix the literal and not // backtrack over it. Also, subsequent propagated variables may not have a // correct level in this case. - if (literals.size() == 1) { - return 0; - } - - // We want the highest level which is not the current one. int backtrack_level = 0; - const int current_level = CurrentDecisionLevel(); - for (const Literal literal : literals) { - const int level = DecisionLevel(literal.Variable()); - if (level != current_level) { - backtrack_level = std::max(backtrack_level, level); - } + for (int i = 1; i < literals.size(); ++i) { + const int level = DecisionLevel(literals[i].Variable()); + backtrack_level = std::max(backtrack_level, level); } VLOG(2) << Indent() << "backtrack_level: " << backtrack_level; - DCHECK_LT(backtrack_level, current_level); + DCHECK_LT(backtrack_level, DecisionLevel(literals[0].Variable())); + DCHECK_LE(DecisionLevel(literals[0].Variable()), CurrentDecisionLevel()); return backtrack_level; } @@ -794,10 +808,9 @@ std::string SatSolver::StatusString() const { StringPrintf(" num inspected clauses: %" GG_LL_FORMAT "d\n", watched_clauses_.num_inspected_clauses()) + StringPrintf( - " num learned literals: %lld (%.2f%% forgotten)\n", + " num learned literals: %lld (avg: %.1f /clause)\n", counters_.num_literals_learned, - 100.0 * static_cast(counters_.num_literals_forgotten) / - static_cast(counters_.num_literals_learned)) + + 1.0 * counters_.num_literals_learned / counters_.num_failures) + StringPrintf(" num restarts: %d\n", restart_count_); } @@ -861,12 +874,8 @@ void SatSolver::ProcessNewlyFixedVariables() { int num_detached_clauses = 0; int num_binary = 0; - // Note that this may break IsAssignmentValid(), or at least the guarantee - // behind it since we modify the clauses. However, we do check the assigment - // validity again at the proto level. Moreover the proto is const and never - // change. - // - // TODO(user): Remove or investigate and correct IsAssignmentValid(). + // We remove the clauses that are always true and the fixed literals from the + // others. for (int i = 0; i < 2; ++i) { for (SatClause* clause : (i == 0) ? problem_clauses_ : learned_clauses_) { if (clause->IsAttached()) { @@ -927,60 +936,94 @@ void SatSolver::ProcessNewlyFixedVariables() { bool SatSolver::Propagate() { SCOPED_TIME_STAT(&stats_); // Inspect all the assignements that still need to be propagated. - DCHECK_GE(binary_propagation_trail_index_, propagation_trail_index_); - do { - // We want to prioritize the propagations due to binary clauses. To do so, - // before each assignement is propagated with the non-binary clauses, we - // propagate ALL the assignment on the trail using the binary clauses. + // To have reasons as short as possible and easy to compute, we prioritize + // the propagation order between the different constraint types. + while (true) { + // First we inspect ALL the binary clauses. // // All the literals with trail index smaller than // binary_propagation_trail_index_ are the assignments that were already // propagated using binary clauses and thus do not need to be inspected // again. - while (binary_propagation_trail_index_ < trail_.Index()) { - const Literal literal = trail_[binary_propagation_trail_index_]; - ++binary_propagation_trail_index_; - if (!binary_implication_graph_.PropagateOnTrue(literal, &trail_)) { - return false; + if (binary_implication_graph_.NumberOfImplications() != 0) { + while (binary_propagation_trail_index_ < trail_.Index()) { + const Literal literal = trail_[binary_propagation_trail_index_]; + ++binary_propagation_trail_index_; + if (!binary_implication_graph_.PropagateOnTrue(literal, &trail_)) { + return false; + } } } - // Propagate one literal using non-binary clauses. - bool more_work = false; - if (propagation_trail_index_ < trail_.Index()) { + // For the other types, the idea is to abort the inspection as soon as at + // least one propagation occur so we can loop over and test again the + // highest priority constraint types using the new information. + const int old_index = trail_.Index(); + + // Non-binary clauses. + while (trail_.Index() == old_index && + propagation_trail_index_ < old_index) { const Literal literal = trail_[propagation_trail_index_]; ++propagation_trail_index_; DCHECK_EQ(DecisionLevel(literal.Variable()), CurrentDecisionLevel()); if (!watched_clauses_.PropagateOnFalse(literal.Negated(), &trail_)) { return false; } - more_work = true; } + if (trail_.Index() > old_index) continue; - // Propagate one literal using linear constraints. - if (pb_constraints_.PropagationNeeded()) { - if (!pb_constraints_.PropagateNext()) return false; - more_work = true; + // Symmetry. + while (trail_.Index() == old_index && + symmetry_propagator_.PropagationNeeded()) { + if (!symmetry_propagator_.PropagateNext()) { + // This is a bit more involved since we need to fetch a reason in order + // to compute the conflict. + trail_.SetFailingClause(ClauseRef(symmetry_propagator_.LastConflict( + Reason(symmetry_propagator_.VariableAtTheSourceOfLastConflict())))); + return false; + } } - if (!more_work) break; - } while (true); + if (trail_.Index() > old_index) continue; + + // General linear constraints. + while (trail_.Index() == old_index && pb_constraints_.PropagationNeeded()) { + if (!pb_constraints_.PropagateNext()) return false; + } + if (trail_.Index() > old_index) continue; + break; + } return true; } ClauseRef SatSolver::Reason(VariableIndex var) const { DCHECK(trail_.Assignment().IsVariableAssigned(var)); - switch (trail_.Info(var).type) { + const AssignmentInfo& info = trail_.Info(var); + switch (info.type) { case AssignmentInfo::SEARCH_DECISION: case AssignmentInfo::UNIT_REASON: return ClauseRef(); case AssignmentInfo::CLAUSE_PROPAGATION: - return trail_.Info(var).sat_clause->ToClauseRef(); + return info.sat_clause->PropagationReason(); case AssignmentInfo::BINARY_PROPAGATION: { - const Literal* literal = &trail_.Info(var).literal; + const Literal* literal = &info.literal; return ClauseRef(literal, literal + 1); } case AssignmentInfo::PB_PROPAGATION: - return ClauseRef(pb_constraints_.ReasonFor(var)); + pb_constraints_.ReasonFor(var, &tmp_reason_); + return ClauseRef(tmp_reason_); + case AssignmentInfo::SYMMETRY_PROPAGATION: + // Note that we need to use an intermediate vector because + // Reason(source.Variable()) may be a reference to tmp_reason_ (if this + // variable was also propagated by symmetry). + // + // TODO(user): beware of deep recursion. Make this iterative. + // TODO(user): Cache some reasons to avoid recomputing them? + Literal source = trail_[info.source_trail_index]; + symmetry_propagator_.Permute(info.symmetry_index, + Reason(source.Variable()), + &tmp_intermediate_reason_); + swap(tmp_intermediate_reason_, tmp_reason_); + return ClauseRef(tmp_reason_); } } @@ -1057,6 +1100,7 @@ void SatSolver::ComputeInitialVariableOrdering() { void SatSolver::Untrail(int target_trail_index) { SCOPED_TIME_STAT(&stats_); pb_constraints_.Untrail(target_trail_index); + symmetry_propagator_.Untrail(target_trail_index); while (trail_.Index() > target_trail_index) { const Literal assigned_literal = trail_.Dequeue(); @@ -1095,33 +1139,6 @@ void SatSolver::ComputeUnsatCore(std::vector* core) { unsat_proof_.UnlockNode(final_node); } -bool SatSolver::IsAssignmentValid(const VariablesAssignment& assignment) const { - SCOPED_TIME_STAT(&stats_); - VLOG(2) << "Checking solution"; - // Check that all variables are assigned. - for (VariableIndex var(0); var < num_variables_; ++var) { - if (!assignment.IsVariableAssigned(var)) { - LOG(ERROR) << " - var " << var.value() << " undefined"; - return false; - } - } - // Check that all clauses are satisfied. - for (SatClause* clause : problem_clauses_) { - if (!clause->IsSatisfied(assignment)) { - LOG(ERROR) << " - clause error: " << DebugString(*clause); - return false; - } - } - std::string result; - for (VariableIndex var(0); var < num_variables_; ++var) { - result.append(trail_.Assignment() - .GetTrueLiteralForAssignedVariable(var) - .DebugString()); - result.append(" "); - } - return true; -} - std::string SatSolver::DebugString(const SatClause& clause) const { std::string result; for (const Literal literal : clause) { @@ -1174,6 +1191,7 @@ ResolutionNode* SatSolver::CreateResolutionNode( break; case AssignmentInfo::SEARCH_DECISION: case AssignmentInfo::BINARY_PROPAGATION: + case AssignmentInfo::SYMMETRY_PROPAGATION: LOG(FATAL) << "This shouldn't happen"; break; } @@ -1198,9 +1216,17 @@ void SatSolver::ComputeFirstUIPConflict( conflict->clear(); reason_used_to_infer_the_conflict->clear(); - const int current_level = CurrentDecisionLevel(); - int num_literal_at_current_level_that_needs_to_be_processed = 0; - DCHECK_GT(current_level, 0); + + // Computes the highest trail index appearing in the failing_clause and its + // level (Which is almost always equals to the CurrentDecisionLevel(), except + // for symmetry propagation). + int trail_index = -1; + for (const Literal literal : failing_clause) { + trail_index = std::max(trail_index, trail_.Info(literal.Variable()).trail_index); + } + if (trail_index == -1) return; + const int highest_level = DecisionLevel(trail_[trail_index].Variable()); + if (highest_level == 0) return; // To find the 1-UIP conflict clause, we start by the failing_clause, and // expand each of its literal using the reason for this literal assignement to @@ -1213,21 +1239,21 @@ void SatSolver::ComputeFirstUIPConflict( // Now, the trick is that we use the trail to expand the literal of the // current level in a very specific order. Namely the reverse order of the one // in which they where infered. We stop as soon as - // num_literal_at_current_level_that_needs_to_be_processed is exactly one. + // num_literal_at_highest_level_that_needs_to_be_processed is exactly one. // // This last literal will be the first UIP because by definition all the // propagation done at the current level will pass though it at some point. ClauseRef clause_to_expand = failing_clause; DCHECK(!clause_to_expand.IsEmpty()); - int trail_index = trail_.Index() - 1; + int num_literal_at_highest_level_that_needs_to_be_processed = 0; while (true) { for (const Literal literal : clause_to_expand) { const VariableIndex var = literal.Variable(); if (!is_marked_[var]) { is_marked_.Set(var); const int level = DecisionLevel(var); - if (level == current_level) { - ++num_literal_at_current_level_that_needs_to_be_processed; + if (level == highest_level) { + ++num_literal_at_highest_level_that_needs_to_be_processed; } else if (level > 0) { // Note that all these literals are currently false since the clause // to expand was used to infer the value of a literal at this level. @@ -1240,18 +1266,21 @@ void SatSolver::ComputeFirstUIPConflict( } // Find next marked literal to expand from the trail. - DCHECK_GT(num_literal_at_current_level_that_needs_to_be_processed, 0); + DCHECK_GT(num_literal_at_highest_level_that_needs_to_be_processed, 0); while (!is_marked_[trail_[trail_index].Variable()]) { --trail_index; DCHECK_GE(trail_index, 0); - DCHECK_EQ(DecisionLevel(trail_[trail_index].Variable()), current_level); + DCHECK_EQ(DecisionLevel(trail_[trail_index].Variable()), highest_level); } - if (num_literal_at_current_level_that_needs_to_be_processed == 1) { + if (num_literal_at_highest_level_that_needs_to_be_processed == 1) { // We have the first UIP. Add its negation to the conflict clause. // This way, after backtracking to the proper level, the conflict clause // will be unit, and infer the negation of the UIP that caused the fail. conflict->push_back(trail_[trail_index].Negated()); + + // To respect the function API move the first UIP in the first position. + std::swap(conflict->back(), conflict->front()); break; } @@ -1268,7 +1297,7 @@ void SatSolver::ComputeFirstUIPConflict( DCHECK(!clause_to_expand.IsEmpty()); } - --num_literal_at_current_level_that_needs_to_be_processed; + --num_literal_at_highest_level_that_needs_to_be_processed; --trail_index; } } @@ -1328,13 +1357,12 @@ void SatSolver::MinimizeConflict( // literal assignement, there will be no cycles. void SatSolver::MinimizeConflictSimple(std::vector* conflict) { SCOPED_TIME_STAT(&stats_); - is_marked_.ClearAndResize(num_variables_); - for (Literal literal : *conflict) { - is_marked_.Set(literal.Variable()); - } - int index = 0; const int current_level = CurrentDecisionLevel(); - for (int i = 0; i < conflict->size(); ++i) { + + // Note that is_marked_ is already initialized and that we can start at 1 + // since the first literal of the conflict is the 1-UIP literal. + int index = 1; + for (int i = 1; i < conflict->size(); ++i) { const VariableIndex var = (*conflict)[i].Variable(); bool can_be_removed = false; if (DecisionLevel(var) != current_level) { @@ -1360,7 +1388,7 @@ void SatSolver::MinimizeConflictSimple(std::vector* conflict) { } // This is similar to MinimizeConflictSimple() except that for each literal of -// the conflict, the literals of its reason are recursively expended using their +// the conflict, the literals of its reason are recursively expanded using their // reason and so on. The recusion stop until we show that the initial literal // can be infered from the conflict variables alone, or if we show that this is // not the case. The result of any variable expension will be cached in order @@ -1394,8 +1422,11 @@ void SatSolver::MinimizeConflictRecursively(std::vector* conflict) { // least one variable of each decision levels. Because such variable can't be // eliminated using lower decision levels variable otherwise it will have been // propagated. - for (Literal literal : *conflict) { - const VariableIndex var = literal.Variable(); + // + // Note(user): Because is_marked_ may actually contains literals that are + // implied if the 1-UIP literal is false, we can't just iterate on the + // variables of the conflict here. + for (VariableIndex var : is_marked_.PositionsSetAtLeastOnce()) { const int level = DecisionLevel(var); min_trail_index_per_level_[level] = std::min(min_trail_index_per_level_[level], trail_.Info(var).trail_index); @@ -1403,8 +1434,9 @@ void SatSolver::MinimizeConflictRecursively(std::vector* conflict) { // Remove the redundant variable from the conflict. That is the ones that can // be infered by some other variables in the conflict. - int index = 0; - for (int i = 0; i < conflict->size(); ++i) { + // Note that we can skip the first position since this is the 1-UIP. + int index = 1; + for (int i = 1; i < conflict->size(); ++i) { const VariableIndex var = (*conflict)[i].Variable(); if (trail_.Info(var).trail_index <= min_trail_index_per_level_[DecisionLevel(var)] || @@ -1416,13 +1448,18 @@ void SatSolver::MinimizeConflictRecursively(std::vector* conflict) { ++index; } } - - // Reset min_trail_index_per_level_. This works since we can never eliminate - // all the literals from the same level. conflict->resize(index); - for (Literal literal : *conflict) { - min_trail_index_per_level_[DecisionLevel(literal.Variable())] = - std::numeric_limits::max(); + + // Reset min_trail_index_per_level_. We use the sparse version only if it + // involves less than half the size of min_trail_index_per_level_. + const int threshold = min_trail_index_per_level_.size() / 2; + if (is_marked_.PositionsSetAtLeastOnce().size() < threshold) { + for (VariableIndex var : is_marked_.PositionsSetAtLeastOnce()) { + min_trail_index_per_level_[DecisionLevel(var)] = + std::numeric_limits::max(); + } + } else { + min_trail_index_per_level_.clear(); } } @@ -1446,9 +1483,9 @@ bool SatSolver::CanBeInferedFromConflictVariables(VariableIndex variable) { DCHECK(!Reason(variable).IsEmpty()); for (Literal literal : Reason(variable)) { const VariableIndex var = literal.Variable(); - if (var == variable) continue; - const int level = DecisionLevel(var); + DCHECK_NE(var, variable); if (is_marked_[var]) continue; + const int level = DecisionLevel(var); if (level == 0) { // Note that this is not needed if the solver is not configured to produce // an unsat proof. However, the (level == 0) test shoud always be false in @@ -1507,7 +1544,7 @@ bool SatSolver::CanBeInferedFromConflictVariables(VariableIndex variable) { DCHECK(!Reason(current_var).IsEmpty()); for (Literal literal : Reason(current_var)) { const VariableIndex var = literal.Variable(); - if (var == current_var) continue; + DCHECK_NE(var, current_var); const int level = DecisionLevel(var); if (level == 0 || is_marked_[var]) continue; if (trail_.Info(var).trail_index <= min_trail_index_per_level_[level] || @@ -1719,5 +1756,22 @@ void SatSolver::InitRestart() { } } +std::string SatStatusString(SatSolver::Status status) { + switch (status) { + case SatSolver::ASSUMPTIONS_UNSAT: + return "ASSUMPTIONS_UNSAT"; + case SatSolver::MODEL_UNSAT: + return "MODEL_UNSAT"; + case SatSolver::MODEL_SAT: + return "MODEL_SAT"; + case SatSolver::LIMIT_REACHED: + return "LIMIT_REACHED"; + } + // Fallback. We don't use "default:" so the compiler will return an error + // if we forgot one enum case above. + LOG(DFATAL) << "Invalid SatSolver::Status " << status; + return "UNKNOWN"; +} + } // namespace sat } // namespace operations_research diff --git a/src/sat/sat_solver.h b/src/sat/sat_solver.h index 5764a3dc59..920344c8bb 100644 --- a/src/sat/sat_solver.h +++ b/src/sat/sat_solver.h @@ -25,7 +25,6 @@ #include "base/integral_types.h" #include "base/logging.h" -#include "base/scoped_ptr.h" #include "base/stringprintf.h" #include "base/timer.h" #include "base/int_type_indexed_vector.h" @@ -35,6 +34,7 @@ #include "sat/clause.h" #include "sat/sat_base.h" #include "sat/sat_parameters.pb.h" +#include "sat/symmetry.h" #include "sat/unsat_proof.h" #include "util/bitset.h" #include "util/stats.h" @@ -64,15 +64,16 @@ class SatSolver { // Fixes a variable so that the given literal is true. This can be used to // solve a subproblem where some variables are fixed. Note that it is more // efficient to add such unit clause before all the others. + // Returns false if the problem is detected to be UNSAT. bool AddUnitClause(Literal true_literal); - // Adds a clause to the problem. Returns false if the clause is always false - // and thus make the problem unsatisfiable. + // Adds a clause to the problem. Returns false if the problem is detected to + // be UNSAT. bool AddProblemClause(const std::vector& literals); // Adds a pseudo-Boolean constraint to the problem. Returns false if the - // constraint is always false and thus make the problem unsatisfiable. If the - // constraint is always true, this detects it and does nothing. + // problem is detected to be UNSAT. If the constraint is always true, this + // detects it and does nothing. // // Note(user): There is an optimization if the same constraint is added // consecutively (even if the bounds are different). This is particularly @@ -87,6 +88,16 @@ class SatSolver { bool use_upper_bound, Coefficient upper_bound, std::vector* cst); + // Add a set of Literals permutation that are assumed to be symmetries of the + // problem. The solver will take ownership of the pointers. + // + // TODO(user): This currently can't be used with unsat_proof() on. Fix it. + void AddSymmetries(std::vector>* generators) { + for (int i = 0; i < generators->size(); ++i) { + symmetry_propagator_.AddSymmetry(std::move((*generators)[i])); + } + } + // Advanced usage. This is only relevant when trying to compute an unsat core. // All the constraints added by one of the Add*() function above when this was // set to true will be considered for the core. All the others will just be @@ -96,11 +107,11 @@ class SatSolver { is_relevant_for_core_computation_ = value; } - // Returns the number of time AddProblemClause() or AddLinearConstraint() was - // called. This will also be the unique index associated to the next - // constraint that will be added. This unique index is used by UnsatCore() to - // indicates what constraints are part of the core. - int NumConstraints() { return num_constraints_; } + // Returns the number of time one of the Add*() functions was called. This + // will also be the unique index associated to the next constraint that will + // be added. This unique index is used by UnsatCore() to indicates what + // constraints are part of the core. + int NumAddedConstraints() { return num_constraints_; } // Gives a hint so the solver tries to find a solution with the given literal // sets to true. The weight is a positive number reflecting the relative @@ -113,14 +124,7 @@ class SatSolver { // The weight is also used as a tie-breaker between variable with the same // activities provided that the variable_weight parameter is set to // DEFAULT_WEIGHT. - void SetAssignmentPreference(Literal literal, double weight) { - if (!parameters_.use_optimization_hints()) return; - DCHECK_GE(weight, 0.0); - DCHECK_LE(weight, 1.0); - queue_elements_[literal.Variable()].tie_breaker = weight; - objective_weights_[literal.Index()] = 0.0; - objective_weights_[literal.NegatedIndex()] = 1.0; - } + void SetAssignmentPreference(Literal literal, double weight); // Solves the problem and returns its status. // @@ -134,32 +138,22 @@ class SatSolver { MODEL_UNSAT, MODEL_SAT, LIMIT_REACHED, - INTERNAL_ERROR, }; Status Solve(); // Returns an UNSAT core. That is a subset of the problem clauses that are // still UNSAT. A problem constraint of index #i is the one that was added - // with the i-th call to AddProblemClause() or AddLinearConstraint(), see - // NumConstraints(). + // with the i-th call to one of the Add*() functions, see + // NumAddedConstraints(). // // Preconditions: // - Solve() must be called with the parameters unsat_proof() set to true. // - It must have returned MODEL_UNSAT. void ComputeUnsatCore(std::vector* core); - // Returns true if a given assignment is a solution of the current problem. - // TODO(user): This currently only check normal clauses. Fix it to include - // binary clauses and linear constraints. - bool IsAssignmentValid(const VariablesAssignment& assignment) const; - // Advanced usage. The next 3 functions allow to drive the search from outside // the solver. - // Starts the initial propagation and returns false if the model is already - // detected to be UNSAT. - bool InitialPropagation(); - // Takes a new decision (the given true_literal must be unassigned) and // propagates it. Returns the trail index of the first newly propagated // literal. If there is a conflict and the problem is detected to be UNSAT, @@ -194,7 +188,7 @@ class SatSolver { // sate after InitialPropagation() was called. void Backtrack(int target_level); - // Returns the current decisions, trail and variable assignment. + // Various getters of the current solver state. struct Decision { Decision() : trail_index(-1) {} Decision(int i, Literal l) : trail_index(i), literal(l) {} @@ -206,8 +200,7 @@ class SatSolver { const Trail& LiteralTrail() const { return trail_; } const VariablesAssignment& Assignment() const { return trail_.Assignment(); } - // Useful information about the last Solve(). They are cleared at - // the beginning of each Solve(). + // Some statistics since the creation of the solver. int64 num_branches() const; int64 num_failures() const; int64 num_propagations() const; @@ -227,8 +220,12 @@ class SatSolver { int DecisionLevel(VariableIndex var) const { return trail_.Info(var).level; } // Returns the reason for a given variable assignment. The variable must be - // assigned (this is DCHECKed). Note that the reason clause may or may not - // contain a literal refering to the given variable. + // assigned (this is DCHECKed). The interpretation is that because all the + // literal of a reason were assigned to false, we could deduce the assignement + // of the given variable. + // + // WARNING: The returned ClauseRef will be invalidated by the next call to + // Reason(). // // Complexity remark: This is called a lot less often than Enqueue(). So it is // better to do as little work as possible during Enqueue() and more work @@ -242,12 +239,8 @@ class SatSolver { // like a good idea to keep clauses that were used as a reason even if the // variable is currently not assigned. This way, even if the clause cleaning // happen just after a restart, the logic will not change. - // - // This works because the literal propagated by a clause will always be in - // the second position. See SatClause::PropagateOnFalse() and - // SatClause::AttachAndEnqueuePotentialUnitPropagation(). bool IsClauseUsedAsReason(SatClause* clause) const { - const VariableIndex var = clause->SecondLiteral().Variable(); + const VariableIndex var = clause->PropagatedLiteral().Variable(); return trail_.Info(var).type == AssignmentInfo::CLAUSE_PROPAGATION && trail_.Info(var).sat_clause == clause; } @@ -312,6 +305,9 @@ class SatSolver { // comparison of the different possible conflict clause computation, see the // reference below. // + // The conflict will have only one literal at the highest decision level, and + // this literal will always be the first in the conflict vector. + // // L Zhang, CF Madigan, MH Moskewicz, S Malik, "Efficient conflict driven // learning in a boolean satisfiability solver" Proceedings of the 2001 // IEEE/ACM international conference on Computer-aided design, Pages 279-285. @@ -337,6 +333,10 @@ class SatSolver { // replace literals by other literals from lower decision levels. The first // function choose which one of the other functions to call depending on the // parameters. + // + // Precondidtion: is_marked_ should be set to true for all the variables of + // the conflict. It can also contains false non-conflict variables that + // are implied by the negation of the 1-UIP conflict literal. void MinimizeConflict(std::vector* conflict, std::vector* reason_used_to_infer_the_conflict); void MinimizeConflictExperimental(std::vector* conflict); @@ -347,9 +347,10 @@ class SatSolver { bool CanBeInferedFromConflictVariables(VariableIndex variable); // To be used in DCHECK(). Verifies some property of the conflict clause: - // - There is an unique literal of the current decision level. + // - There is an unique literal with the highest decision level. + // - This literal appears in the first position. // - All the other literals are of smaller decision level. - // - The is no literals with a decision level of zero. + // - Ther is no literal with a decision level of zero. bool IsConflictValid(const std::vector& literals); // Given the learned clause after a conflict, this computes the correct @@ -431,6 +432,7 @@ class SatSolver { LiteralWatchers watched_clauses_; BinaryImplicationGraph binary_implication_graph_; PbConstraints pb_constraints_; + SymmetryPropagator symmetry_propagator_; // The solver trail. Trail trail_; @@ -549,6 +551,10 @@ class SatSolver { SparseBitset is_independent_; std::vector min_trail_index_per_level_; + // Temporary member used by Reason(). + mutable std::vector tmp_reason_; + mutable std::vector tmp_intermediate_reason_; + // Temporary members used by CanBeInferedFromConflictVariables(). std::vector dfs_stack_; std::vector variable_to_process_; @@ -593,6 +599,9 @@ class SatSolver { DISALLOW_COPY_AND_ASSIGN(SatSolver); }; +// Returns a std::string representation of a SatSolver::Status. +std::string SatStatusString(SatSolver::Status status); + // Returns the ith element of the strategy S^univ proposed by M. Luby et al. in // Optimal Speedup of Las Vegas Algorithms, Information Processing Letters 1993. // This is used to decide the number of conflicts allowed before the next diff --git a/src/sat/symmetry.cc b/src/sat/symmetry.cc new file mode 100644 index 0000000000..5ddec04e8e --- /dev/null +++ b/src/sat/symmetry.cc @@ -0,0 +1,197 @@ +// Copyright 2010-2013 Google +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +#include "sat/symmetry.h" + +namespace operations_research { +namespace sat { + +SymmetryPropagator::SymmetryPropagator(Trail* trail) + : trail_(trail), + propagation_trail_index_(0), + stats_("SymmetryPropagator"), + num_propagations_(0), + num_conflicts_(0) {} + +SymmetryPropagator::~SymmetryPropagator() { + IF_STATS_ENABLED({ + LOG(INFO) << stats_.StatString(); + LOG(INFO) << "num propagations by symmetry: " << num_propagations_; + LOG(INFO) << "num conflicts by symmetry: " << num_conflicts_; + }); +} + +void SymmetryPropagator::Resize(int num_variables) { + images_.resize(num_variables << 1); + tmp_literal_mapping_.resize(num_variables << 1); + for (LiteralIndex i(0); i < tmp_literal_mapping_.size(); ++i) { + tmp_literal_mapping_[i] = Literal(i); + } +} + +void SymmetryPropagator::AddSymmetry( + std::unique_ptr permutation) { + if (permutation->NumCycles() == 0) return; + SCOPED_TIME_STAT(&stats_); + DCHECK_EQ(propagation_trail_index_, 0); + for (int c = 0; c < permutation->NumCycles(); ++c) { + int e = permutation->LastElementInCycle(c); + for (const int image : permutation->Cycle(c)) { + DCHECK_GE(LiteralIndex(e), 0); + DCHECK_LE(LiteralIndex(e), images_.size()); + const int permutation_index = permutations_.size(); + images_[LiteralIndex(e)].push_back( + ImageInfo(permutation_index, Literal(LiteralIndex(image)))); + e = image; + } + } + permutation_trails_.push_back(std::vector()); + permutation_trails_.back().reserve(permutation->Support().size()); + permutations_.emplace_back(permutation.release()); +} + +bool SymmetryPropagator::PropagationNeeded() const { + SCOPED_TIME_STAT(&stats_); + return !permutations_.empty() && propagation_trail_index_ < trail_->Index(); +} + +bool SymmetryPropagator::PropagateNext() { + SCOPED_TIME_STAT(&stats_); + const Literal true_literal = (*trail_)[propagation_trail_index_]; + const std::vector& images = images_[true_literal.Index()]; + for (int image_index = 0; image_index < images.size(); ++image_index) { + const int p_index = images[image_index].permutation_index; + + // TODO(user): some optim ideas: no need to enqueue if a decision image is + // already assigned to false. But then the Untrail() is more involved. + std::vector* p_trail = &(permutation_trails_[p_index]); + if (Enqueue(true_literal, images[image_index].image, p_trail)) continue; + + // We have a non-symmetric literal and its image is not already assigned to + // true. + const AssignedLiteralInfo& non_symmetric = + (*p_trail)[p_trail->back().first_non_symmetric_info_index_so_far]; + + // If the first non-symmetric literal is a decision, then we can't deduce + // anything. Otherwise, it is either a conflict or a propagation. + const AssignmentInfo& assignment_info = + trail_->Info(non_symmetric.literal.Variable()); + if (assignment_info.type == AssignmentInfo::SEARCH_DECISION) continue; + if (trail_->Assignment().IsLiteralFalse(non_symmetric.image)) { + // Conflict. + conflict_permutation_index_ = p_index; + conflict_source_reason_ = non_symmetric.literal; + conflict_literal_ = non_symmetric.image; + ++num_conflicts_; + + // Backtrack over all the enqueues we just did. + for (; image_index >= 0; --image_index) { + permutation_trails_[images[image_index].permutation_index].pop_back(); + } + return false; + } else { + // Propagation. + trail_->EnqueueWithSymmetricReason(non_symmetric.image, + assignment_info.trail_index, p_index); + ++num_propagations_; + } + } + ++propagation_trail_index_; + return true; +} + +void SymmetryPropagator::Untrail(int trail_index) { + SCOPED_TIME_STAT(&stats_); + while (propagation_trail_index_ > trail_index) { + --propagation_trail_index_; + const Literal true_literal = (*trail_)[propagation_trail_index_]; + for (ImageInfo& info : images_[true_literal.Index()]) { + permutation_trails_[info.permutation_index].pop_back(); + } + } +} + +bool SymmetryPropagator::Enqueue(Literal literal, Literal image, + std::vector* p_trail) { + // Small optimization to get the trail index of literal. + const int literal_trail_index = propagation_trail_index_; + DCHECK_EQ(literal_trail_index, trail_->Info(literal.Variable()).trail_index); + + // Push the new AssignedLiteralInfo on the permutation trail. Note that we + // don't know yet its first_non_symmetric_info_index_so_far but we know that + // they are increasing, so we can restart by the one of the previous + // AssignedLiteralInfo. + p_trail->push_back(AssignedLiteralInfo( + literal, image, + p_trail->empty() + ? 0 + : p_trail->back().first_non_symmetric_info_index_so_far)); + int* index = &(p_trail->back().first_non_symmetric_info_index_so_far); + + // Compute first_non_symmetric_info_index_so_far. + while (*index < p_trail->size() && + trail_->Assignment().IsLiteralTrue((*p_trail)[*index].image)) { + // This AssignedLiteralInfo is symmetric for the full solver assignment. + // We test if it is also symmetric for the assignment so far: + if (trail_->Info((*p_trail)[*index].image.Variable()).trail_index > + literal_trail_index) { + // It isn't, so we can stop the function here. We will continue the loop + // when this function is called again with an higher trail_index. + return true; + } + ++(*index); + } + return *index == p_trail->size(); +} + +VariableIndex SymmetryPropagator::VariableAtTheSourceOfLastConflict() const { + return conflict_source_reason_.Variable(); +} + +const std::vector& SymmetryPropagator::LastConflict( + ClauseRef initial_reason) const { + SCOPED_TIME_STAT(&stats_); + Permute(conflict_permutation_index_, initial_reason, &conflict_scratchpad_); + conflict_scratchpad_.push_back(conflict_literal_); + for (Literal literal : conflict_scratchpad_) { + DCHECK(trail_->Assignment().IsLiteralFalse(literal)) << literal; + } + return conflict_scratchpad_; +} + +void SymmetryPropagator::Permute(int index, ClauseRef input, + std::vector* output) const { + SCOPED_TIME_STAT(&stats_); + // Initialize tmp_literal_mapping_. + const SparsePermutation& permutation = *(permutations_[index].get()); + for (int c = 0; c < permutation.NumCycles(); ++c) { + int e = permutation.LastElementInCycle(c); + for (const int image : permutation.Cycle(c)) { + tmp_literal_mapping_[LiteralIndex(e)] = Literal(LiteralIndex(image)); + e = image; + } + } + + // Permute the input into the output. + output->clear(); + for (Literal literal : input) { + output->push_back(tmp_literal_mapping_[literal.Index()]); + } + + // Clean up. + for (int e : permutation.Support()) { + tmp_literal_mapping_[LiteralIndex(e)] = Literal(LiteralIndex(e)); + } +} + +} // namespace sat +} // namespace operations_research diff --git a/src/sat/symmetry.h b/src/sat/symmetry.h new file mode 100644 index 0000000000..a8640dcd56 --- /dev/null +++ b/src/sat/symmetry.h @@ -0,0 +1,175 @@ +// Copyright 2010-2013 Google +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +#ifndef OR_TOOLS_SAT_SYMMETRY_H_ +#define OR_TOOLS_SAT_SYMMETRY_H_ + +#include "sat/sat_base.h" +#include "algorithms/sparse_permutation.h" +#include "util/stats.h" + +namespace operations_research { +namespace sat { + +// This class implements more or less the strategy described in the paper: +// Devriendt J., Bogaerts B., De Cat B., Denecker M., Mears C. "Symmetry +// propagation: Improved Dynamic Symmetry Breaking in SAT", 2012, +// IEEE 24th International Conference on Tools with Artificial Intelligence. +// +// Basically, each time a literal is propagated, this class tries to detect +// if another literal could also be propagated by symmetry. Note that this uses +// an heuristic in order to be efficient and that it is not exhaustive in the +// sense that it doesn't detect all possible propagations. +// +// Algorithm details: +// +// Given the current solver trail (i.e. the assigned literals and their +// assignment order) the idea is to compute (as efficiently as possible) for +// each permutation added to this class what is called the first (under the +// trail assignment order) non-symmetric literal. A literal 'l' is said to be +// non-symmetric under a given assignement and for a given permutation 'p' if +// 'l' is assigned to true but not 'p(l)'. +// +// If a first non-symmetric literal 'l' for a permutation 'p' is not a decision, +// then: +// - Because it is not a decision, 'l' has been implied by a reason formed by +// literals assigned to true at lower trail indices. +// - Because this is the first non-symmetric literal for 'p', the permuted +// reason only contains literal that are also assigned to true. +// - Because of this, 'p(l)' is also implied by the current assignment. +// Of course, this assume that p is a symmetry of the full problem. +// Note that if it is already assigned to false, then we have a conflict. +// +// TODO(user): Implements the optimizations mentioned in the paper? +// TODO(user): Instrument and see if the code can be optimized. +class SymmetryPropagator { + public: + explicit SymmetryPropagator(Trail* trail); // No ownership taken. + ~SymmetryPropagator(); + + // Changes the number of variables. This must be higher than: + // - Any variable touched by a symmetry about to be added or already added. + // - Any variable assigned by the trail when calling PropagateNext(). + void Resize(int num_variables); + + // Adds a new permutation to this symmetry propagator. The ownership is + // transfered. This must be an integer permutation such that: + // - Its domain is [0, 2 * num_variables) and corresponds to the index + // representation of the literals over num_variables variables. + // - It must be compatible with the negation, for any literal l; not(p(l)) + // must be the same as p(not(l)), where p(x) represents the image of x by + // the permutation. + // + // Remark: Any permutation which is a symmetry of the main SAT problem can be + // added here. However, since the number of permutations is usually not + // manageable, a good alternative is to only add the generators of the + // permutation group. It is also important to add permutations with a support + // as small as possible. + // + // TODO(user): Currently this can only be called before PropagateNext() is + // called (DCHECKed). Not sure if we need more incrementality though. + void AddSymmetry(std::unique_ptr permutation); + + // If some literals enqueued on the trail haven't been processed by this class + // then PropagationNeeded() will returns true. In this case, it is possible to + // call PropagateNext() to process them. + // + // PropagateNext() will returns false if a conflict in the assignment is + // detected. Note that it will also abort early if some new literals are + // propagated, so in order to propagate everything, you need a construct like + // while (PropagationNeeded()) PropagateNext(); + bool PropagationNeeded() const; + bool PropagateNext(); + + // Backtracks to the state where all the literals with trail index greater or + // equal to the given one are assumed to be unassigned. + void Untrail(int trail_index); + + // Functions to get the clause representing the last conflict. One must first + // query the reason of the assignment of the variable + // VariableAtTheSourceOfLastConflict() and then call LastConflict() with this + // reason. + VariableIndex VariableAtTheSourceOfLastConflict() const; + const std::vector& LastConflict(ClauseRef initial_reason) const; + + // Permutes a list of literals from input into output using the permutation + // with given index. This uses tmp_literal_mapping_ and has a complexity in + // O(permutation_support + input_size). + void Permute(int index, ClauseRef input, std::vector* output) const; + + private: + // The solver trail that contains the variables assignements and all the + // assignment info. propagation_trail_index_ is the index of the first + // assigned variable from the trail that is not yet processed by this class. + Trail* trail_; + int propagation_trail_index_; + + // The permutations. + // The index of a permutation is its position in this vector. + std::vector> permutations_; + + // Reverse mapping (source literal) -> list of (permutation_index, image). + struct ImageInfo { + ImageInfo(int p, Literal i) : permutation_index(p), image(i) {} + + int permutation_index; + Literal image; + }; + ITIVector> images_; + + // For each permutation p, we maintain the list of all assigned literals + // affected by p whose trail index is < propagation_trail_index_; sorted by + // trail index. Next to each such literal, we also store: + struct AssignedLiteralInfo { + AssignedLiteralInfo(Literal l, Literal i, int index) + : literal(l), image(i), first_non_symmetric_info_index_so_far(index) {} + + // The literal in question (assigned to true and in the support of p). + Literal literal; + + // The image by p of the literal above. + Literal image; + + // Previous AssignedLiteralInfos are considered 'symmetric' iff both their + // 'literal' and 'image' were assigned to true at the time the current + // AssignedLiteralInfo's literal was assigned (i.e. earlier in the trail). + int first_non_symmetric_info_index_so_far; + }; + std::vector> permutation_trails_; + + // Adds an AssignedLiteralInfo to the given permutation trail. + // Returns false if there is a non-symmetric literal in this trail with its + // image not already assigned to true by the solver. + bool Enqueue(Literal literal, Literal image, + std::vector* permutation_trail); + + // The identity permutation over all the literals. + // This is temporary modified to encode a sparse permutation and then always + // restored to the identity. + mutable ITIVector tmp_literal_mapping_; + + // Temporary data needed to compute the last conflict. + int conflict_permutation_index_; + Literal conflict_source_reason_; + Literal conflict_literal_; + mutable std::vector conflict_scratchpad_; + + mutable StatsGroup stats_; + int num_propagations_; + int num_conflicts_; + DISALLOW_COPY_AND_ASSIGN(SymmetryPropagator); +}; + +} // namespace sat +} // namespace operations_research + +#endif // OR_TOOLS_SAT_SYMMETRY_H_ diff --git a/src/sat/unsat_proof.cc b/src/sat/unsat_proof.cc index 6d78b81c45..09d9eee0e6 100644 --- a/src/sat/unsat_proof.cc +++ b/src/sat/unsat_proof.cc @@ -16,7 +16,7 @@ namespace operations_research { namespace sat { // A node of the resolution DAG. -class ResolutionNode { +struct ResolutionNode { public: // Constructor for the root nodes. ResolutionNode() diff --git a/src/util/graph_export.cc b/src/util/graph_export.cc index dc47cadbf5..a9f5430fc5 100644 --- a/src/util/graph_export.cc +++ b/src/util/graph_export.cc @@ -17,7 +17,6 @@ #include "base/logging.h" #include "base/macros.h" -#include "base/scoped_ptr.h" #include "base/stringprintf.h" namespace operations_research { diff --git a/src/util/monoid_operation_tree.h b/src/util/monoid_operation_tree.h index d6b010eb62..206d170545 100644 --- a/src/util/monoid_operation_tree.h +++ b/src/util/monoid_operation_tree.h @@ -19,7 +19,6 @@ #include "base/logging.h" #include "base/macros.h" -#include "base/scoped_ptr.h" #include "base/stringprintf.h" namespace operations_research { diff --git a/src/util/stats.h b/src/util/stats.h index bc7533b08b..047bd174e2 100644 --- a/src/util/stats.h +++ b/src/util/stats.h @@ -70,7 +70,6 @@ #include #include -#include "base/scoped_ptr.h" #include "base/timer.h" namespace operations_research { diff --git a/src/util/tuple_set.h b/src/util/tuple_set.h index 84ae24b1df..7c2042b867 100644 --- a/src/util/tuple_set.h +++ b/src/util/tuple_set.h @@ -33,7 +33,6 @@ #ifndef OR_TOOLS_UTIL_TUPLE_SET_H_ #define OR_TOOLS_UTIL_TUPLE_SET_H_ -#include #include "base/hash.h" #include "base/hash.h" #include diff --git a/src/util/zvector.h b/src/util/zvector.h index 6f309d1aba..01471e64f4 100644 --- a/src/util/zvector.h +++ b/src/util/zvector.h @@ -28,7 +28,6 @@ #include "base/integral_types.h" #include "base/logging.h" #include "base/macros.h" -#include "base/scoped_ptr.h" // An array class for storing arrays of integers. // The range of indices is specified at the construction of the object.