OR-Tools  9.3
find_graph_symmetries.cc
Go to the documentation of this file.
1// Copyright 2010-2021 Google LLC
2// Licensed under the Apache License, Version 2.0 (the "License");
3// you may not use this file except in compliance with the License.
4// You may obtain a copy of the License at
5//
6// http://www.apache.org/licenses/LICENSE-2.0
7//
8// Unless required by applicable law or agreed to in writing, software
9// distributed under the License is distributed on an "AS IS" BASIS,
10// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11// See the License for the specific language governing permissions and
12// limitations under the License.
13
15
16#include <algorithm>
17#include <cstdint>
18#include <limits>
19#include <memory>
20#include <numeric>
21#include <string>
22#include <utility>
23#include <vector>
24
25#include "absl/algorithm/container.h"
26#include "absl/container/flat_hash_set.h"
27#include "absl/memory/memory.h"
28#include "absl/status/status.h"
29#include "absl/strings/str_format.h"
30#include "absl/strings/str_join.h"
31#include "absl/time/clock.h"
32#include "absl/time/time.h"
39#include "ortools/graph/util.h"
40
41ABSL_FLAG(bool, minimize_permutation_support_size, false,
42 "Tweak the algorithm to try and minimize the support size"
43 " of the generators produced. This may negatively impact the"
44 " performance, but works great on the sat_holeXXX benchmarks"
45 " to reduce the support size.");
46
47namespace operations_research {
48
50
51namespace {
52// Some routines used below.
53void SwapFrontAndBack(std::vector<int>* v) {
54 DCHECK(!v->empty());
55 std::swap((*v)[0], v->back());
56}
57
58bool PartitionsAreCompatibleAfterPartIndex(const DynamicPartition& p1,
59 const DynamicPartition& p2,
60 int part_index) {
61 const int num_parts = p1.NumParts();
62 if (p2.NumParts() != num_parts) return false;
63 for (int p = part_index; p < num_parts; ++p) {
64 if (p1.SizeOfPart(p) != p2.SizeOfPart(p) ||
65 p1.ParentOfPart(p) != p2.ParentOfPart(p)) {
66 return false;
67 }
68 }
69 return true;
70}
71
72// Whether the "l1" list maps to "l2" under the permutation "permutation".
73// This method uses a transient bitmask on all the elements, which
74// should be entirely false before the call (and will be restored as such
75// after it).
76//
77// TODO(user): Make this method support multi-elements (i.e. an element may
78// be repeated in the list), and see if that's sufficient to make the whole
79// graph symmetry finder support multi-arcs.
80template <class List>
81bool ListMapsToList(const List& l1, const List& l2,
82 const DynamicPermutation& permutation,
83 std::vector<bool>* tmp_node_mask) {
84 int num_elements_delta = 0;
85 bool match = true;
86 for (const int mapped_x : l2) {
87 ++num_elements_delta;
88 (*tmp_node_mask)[mapped_x] = true;
89 }
90 for (const int x : l1) {
91 --num_elements_delta;
92 const int mapped_x = permutation.ImageOf(x);
93 if (!(*tmp_node_mask)[mapped_x]) {
94 match = false;
95 break;
96 }
97 (*tmp_node_mask)[mapped_x] = false;
98 }
99 if (num_elements_delta != 0) match = false;
100 if (!match) {
101 // We need to clean up tmp_node_mask.
102 for (const int x : l2) (*tmp_node_mask)[x] = false;
103 }
104 return match;
105}
106} // namespace
107
108GraphSymmetryFinder::GraphSymmetryFinder(const Graph& graph, bool is_undirected)
109 : graph_(graph),
110 tmp_dynamic_permutation_(NumNodes()),
111 tmp_node_mask_(NumNodes(), false),
112 tmp_degree_(NumNodes(), 0),
113 tmp_nodes_with_degree_(NumNodes() + 1) {
114 // Set up an "unlimited" time limit by default.
115 time_limit_ = &dummy_time_limit_;
116 tmp_partition_.Reset(NumNodes());
117 if (is_undirected) {
118 DCHECK(GraphIsSymmetric(graph));
119 } else {
120 // Compute the reverse adjacency lists.
121 // First pass: compute the total in-degree of all nodes and put it in
122 // reverse_adj_list_index (shifted by two; see below why).
123 reverse_adj_list_index_.assign(graph.num_nodes() + /*shift*/ 2, 0);
124 for (const int node : graph.AllNodes()) {
125 for (const int arc : graph.OutgoingArcs(node)) {
126 ++reverse_adj_list_index_[graph.Head(arc) + /*shift*/ 2];
127 }
128 }
129 // Second pass: apply a cumulative sum over reverse_adj_list_index.
130 // After that, reverse_adj_list contains:
131 // [0, 0, in_degree(node0), in_degree(node0) + in_degree(node1), ...]
132 std::partial_sum(reverse_adj_list_index_.begin() + /*shift*/ 2,
133 reverse_adj_list_index_.end(),
134 reverse_adj_list_index_.begin() + /*shift*/ 2);
135 // Third pass: populate "flattened_reverse_adj_lists", using
136 // reverse_adj_list_index[i] as a dynamic pointer to the yet-unpopulated
137 // area of the reverse adjacency list of node #i.
138 flattened_reverse_adj_lists_.assign(graph.num_arcs(), -1);
139 for (const int node : graph.AllNodes()) {
140 for (const int arc : graph.OutgoingArcs(node)) {
141 flattened_reverse_adj_lists_[reverse_adj_list_index_[graph.Head(arc) +
142 /*shift*/ 1]++] =
143 node;
144 }
145 }
146 // The last pass shifted reverse_adj_list_index, so it's now as we want it:
147 // [0, in_degree(node0), in_degree(node0) + in_degree(node1), ...]
148 if (DEBUG_MODE) {
149 DCHECK_EQ(graph.num_arcs(), reverse_adj_list_index_[graph.num_nodes()]);
150 for (const int i : flattened_reverse_adj_lists_) DCHECK_NE(i, -1);
151 }
152 }
153}
154
156 const DynamicPermutation& permutation) const {
157 for (const int base : permutation.AllMappingsSrc()) {
158 const int image = permutation.ImageOf(base);
159 if (image == base) continue;
160 if (!ListMapsToList(graph_[base], graph_[image], permutation,
161 &tmp_node_mask_)) {
162 return false;
163 }
164 }
165 if (!reverse_adj_list_index_.empty()) {
166 // The graph was not symmetric: we must also check the incoming arcs
167 // to displaced nodes.
168 for (const int base : permutation.AllMappingsSrc()) {
169 const int image = permutation.ImageOf(base);
170 if (image == base) continue;
171 if (!ListMapsToList(TailsOfIncomingArcsTo(base),
172 TailsOfIncomingArcsTo(image), permutation,
173 &tmp_node_mask_)) {
174 return false;
175 }
176 }
177 }
178 return true;
179}
180
181namespace {
182// Specialized subroutine, to avoid code duplication: see its call site
183// and its self-explanatory code.
184template <class T>
185inline void IncrementCounterForNonSingletons(const T& nodes,
186 const DynamicPartition& partition,
187 std::vector<int>* node_count,
188 std::vector<int>* nodes_seen,
189 int64_t* num_operations) {
190 *num_operations += nodes.end() - nodes.begin();
191 for (const int node : nodes) {
192 if (partition.ElementsInSamePartAs(node).size() == 1) continue;
193 const int count = ++(*node_count)[node];
194 if (count == 1) nodes_seen->push_back(node);
195 }
196}
197} // namespace
198
200 int first_unrefined_part_index, DynamicPartition* partition) {
201 // Rename, for readability of the code below.
202 std::vector<int>& tmp_nodes_with_nonzero_degree = tmp_stack_;
203
204 // This function is the main bottleneck of the whole algorithm. We count the
205 // number of blocks in the inner-most loops in num_operations. At the end we
206 // will multiply it by a factor to have some deterministic time that we will
207 // append to the deterministic time counter.
208 //
209 // TODO(user): We are really imprecise in our counting, but it is fine. We
210 // just need a way to enforce a deterministic limit on the computation effort.
211 int64_t num_operations = 0;
212
213 // Assuming that the partition was refined based on the adjacency on
214 // parts [0 .. first_unrefined_part_index) already, we simply need to
215 // refine parts first_unrefined_part_index ... NumParts()-1, the latter bound
216 // being a moving target:
217 // When a part #p < first_unrefined_part_index gets modified, it's always
218 // split in two: itself, and a new part #p'. Since #p was already refined
219 // on, we only need to further refine on *one* of its two split parts.
220 // And this will be done because p' > first_unrefined_part_index.
221 //
222 // Thus, the following loop really does the full recursive refinement as
223 // advertised.
224 std::vector<bool> adjacency_directions(1, /*outgoing*/ true);
225 if (!reverse_adj_list_index_.empty()) {
226 adjacency_directions.push_back(false); // Also look at incoming arcs.
227 }
228 for (int part_index = first_unrefined_part_index;
229 part_index < partition->NumParts(); // Moving target!
230 ++part_index) {
231 for (const bool outgoing_adjacency : adjacency_directions) {
232 // Count the aggregated degree of all nodes, only looking at arcs that
233 // come from/to the current part.
234 if (outgoing_adjacency) {
235 for (const int node : partition->ElementsInPart(part_index)) {
236 IncrementCounterForNonSingletons(
237 graph_[node], *partition, &tmp_degree_,
238 &tmp_nodes_with_nonzero_degree, &num_operations);
239 }
240 } else {
241 for (const int node : partition->ElementsInPart(part_index)) {
242 IncrementCounterForNonSingletons(
243 TailsOfIncomingArcsTo(node), *partition, &tmp_degree_,
244 &tmp_nodes_with_nonzero_degree, &num_operations);
245 }
246 }
247 // Group the nodes by (nonzero) degree. Remember the maximum degree.
248 int max_degree = 0;
249 num_operations += 3 + tmp_nodes_with_nonzero_degree.size();
250 for (const int node : tmp_nodes_with_nonzero_degree) {
251 const int degree = tmp_degree_[node];
252 tmp_degree_[node] = 0; // To clean up after us.
253 max_degree = std::max(max_degree, degree);
254 tmp_nodes_with_degree_[degree].push_back(node);
255 }
256 tmp_nodes_with_nonzero_degree.clear(); // To clean up after us.
257 // For each degree, refine the partition by the set of nodes with that
258 // degree.
259 for (int degree = 1; degree <= max_degree; ++degree) {
260 // We use a manually tuned factor 3 because Refine() does quite a bit of
261 // operations for each node in its argument.
262 num_operations += 1 + 3 * tmp_nodes_with_degree_[degree].size();
263 partition->Refine(tmp_nodes_with_degree_[degree]);
264 tmp_nodes_with_degree_[degree].clear(); // To clean up after us.
265 }
266 }
267 }
268
269 // The coefficient was manually tuned (only on a few instances) so that the
270 // time is roughly correlated with seconds on a fast desktop computer from
271 // 2020.
272 time_limit_->AdvanceDeterministicTime(1e-8 *
273 static_cast<double>(num_operations));
274}
275
277 int node, DynamicPartition* partition, std::vector<int>* new_singletons) {
278 const int original_num_parts = partition->NumParts();
279 partition->Refine(std::vector<int>(1, node));
280 RecursivelyRefinePartitionByAdjacency(partition->PartOf(node), partition);
281
282 // Explore the newly refined parts to gather all the new singletons.
283 if (new_singletons != nullptr) {
284 new_singletons->clear();
285 for (int p = original_num_parts; p < partition->NumParts(); ++p) {
286 const int parent = partition->ParentOfPart(p);
287 // We may see the same singleton parent several times, so we guard them
288 // with the tmp_node_mask_ boolean vector.
289 if (!tmp_node_mask_[parent] && parent < original_num_parts &&
290 partition->SizeOfPart(parent) == 1) {
291 tmp_node_mask_[parent] = true;
292 new_singletons->push_back(*partition->ElementsInPart(parent).begin());
293 }
294 if (partition->SizeOfPart(p) == 1) {
295 new_singletons->push_back(*partition->ElementsInPart(p).begin());
296 }
297 }
298 // Reset tmp_node_mask_.
299 for (int p = original_num_parts; p < partition->NumParts(); ++p) {
300 tmp_node_mask_[partition->ParentOfPart(p)] = false;
301 }
302 }
303}
304
305namespace {
306void MergeNodeEquivalenceClassesAccordingToPermutation(
307 const SparsePermutation& perm, MergingPartition* node_equivalence_classes,
308 DenseDoublyLinkedList* sorted_representatives) {
309 for (int c = 0; c < perm.NumCycles(); ++c) {
310 // TODO(user): use the global element->image iterator when it exists.
311 int prev = -1;
312 for (const int e : perm.Cycle(c)) {
313 if (prev >= 0) {
314 const int removed_representative =
315 node_equivalence_classes->MergePartsOf(prev, e);
316 if (sorted_representatives != nullptr && removed_representative != -1) {
317 sorted_representatives->Remove(removed_representative);
318 }
319 }
320 prev = e;
321 }
322 }
323}
324
325// Subroutine used by FindSymmetries(); see its call site. This finds and
326// outputs (in "pruned_other_nodes") the list of all representatives (under
327// "node_equivalence_classes") that are in the same part as
328// "representative_node" in "partition"; other than "representative_node"
329// itself.
330// "node_equivalence_classes" must be compatible with "partition", i.e. two
331// nodes that are in the same equivalence class must also be in the same part.
332//
333// To do this in O(output size), we also need the
334// "representatives_sorted_by_index_in_partition" data structure: the
335// representatives of the nodes of the targeted part are contiguous in that
336// linked list.
337void GetAllOtherRepresentativesInSamePartAs(
338 int representative_node, const DynamicPartition& partition,
339 const DenseDoublyLinkedList& representatives_sorted_by_index_in_partition,
340 MergingPartition* node_equivalence_classes, // Only for debugging.
341 std::vector<int>* pruned_other_nodes) {
342 pruned_other_nodes->clear();
343 const int part_index = partition.PartOf(representative_node);
344 // Iterate on all contiguous representatives after the initial one...
345 int repr = representative_node;
346 while (true) {
347 DCHECK_EQ(repr, node_equivalence_classes->GetRoot(repr));
348 repr = representatives_sorted_by_index_in_partition.Prev(repr);
349 if (repr < 0 || partition.PartOf(repr) != part_index) break;
350 pruned_other_nodes->push_back(repr);
351 }
352 // ... and then on all contiguous representatives *before* it.
353 repr = representative_node;
354 while (true) {
355 DCHECK_EQ(repr, node_equivalence_classes->GetRoot(repr));
356 repr = representatives_sorted_by_index_in_partition.Next(repr);
357 if (repr < 0 || partition.PartOf(repr) != part_index) break;
358 pruned_other_nodes->push_back(repr);
359 }
360
361 // This code is a bit tricky, so we check that we're doing it right, by
362 // comparing its output to the brute-force, O(Part size) version.
363 // This also (partly) verifies that
364 // "representatives_sorted_by_index_in_partition" is what it claims it is.
365 if (DEBUG_MODE) {
366 std::vector<int> expected_output;
367 for (const int e : partition.ElementsInPart(part_index)) {
368 if (node_equivalence_classes->GetRoot(e) != representative_node) {
369 expected_output.push_back(e);
370 }
371 }
372 node_equivalence_classes->KeepOnlyOneNodePerPart(&expected_output);
373 for (int& x : expected_output) x = node_equivalence_classes->GetRoot(x);
374 std::sort(expected_output.begin(), expected_output.end());
375 std::vector<int> sorted_output = *pruned_other_nodes;
376 std::sort(sorted_output.begin(), sorted_output.end());
377 DCHECK_EQ(absl::StrJoin(expected_output, " "),
378 absl::StrJoin(sorted_output, " "));
379 }
380}
381} // namespace
382
384 std::vector<int>* node_equivalence_classes_io,
385 std::vector<std::unique_ptr<SparsePermutation>>* generators,
386 std::vector<int>* factorized_automorphism_group_size,
388 // Initialization.
389 time_limit_ = time_limit == nullptr ? &dummy_time_limit_ : time_limit;
390 IF_STATS_ENABLED(stats_.initialization_time.StartTimer());
391 generators->clear();
392 factorized_automorphism_group_size->clear();
393 if (node_equivalence_classes_io->size() != NumNodes()) {
394 return absl::Status(absl::StatusCode::kInvalidArgument,
395 "Invalid 'node_equivalence_classes_io'.");
396 }
397 DynamicPartition base_partition(*node_equivalence_classes_io);
398 // Break all inherent asymmetries in the graph.
399 {
400 ScopedTimeDistributionUpdater u(&stats_.initialization_refine_time);
401 RecursivelyRefinePartitionByAdjacency(/*first_unrefined_part_index=*/0,
402 &base_partition);
403 }
404 if (time_limit_->LimitReached()) {
405 return absl::Status(absl::StatusCode::kDeadlineExceeded,
406 "During the initial refinement.");
407 }
408 VLOG(4) << "Base partition: "
410
411 MergingPartition node_equivalence_classes(NumNodes());
412 std::vector<std::vector<int>> permutations_displacing_node(NumNodes());
413 std::vector<int> potential_root_image_nodes;
414 IF_STATS_ENABLED(stats_.initialization_time.StopTimerAndAddElapsedTime());
415
416 // To find all permutations of the Graph that satisfy the current partition,
417 // we pick an element v that is not in a singleton part, and we
418 // split the search in two phases:
419 // 1) Find (the generators of) all permutations that keep v invariant.
420 // 2) For each w in PartOf(v) such that w != v:
421 // find *one* permutation that maps v to w, if it exists.
422 // if it does exists, add this to the generators.
423 //
424 // The part 1) is recursive.
425 //
426 // Since we can't really use true recursion because it will be too deep for
427 // the stack, we implement it iteratively. To do that, we unroll 1):
428 // the "invariant dive" is a single pass that successively refines the node
429 // base_partition with elements from non-singleton parts (the 'invariant
430 // node'), until all parts are singletons.
431 // We remember which nodes we picked as invariants, and also the successive
432 // partition sizes as we refine it, to allow us to backtrack.
433 // Then we'll perform 2) in the reverse order, backtracking the stack from 1)
434 // as using another dedicated stack for the search (see below).
435 IF_STATS_ENABLED(stats_.invariant_dive_time.StartTimer());
436 struct InvariantDiveState {
437 int invariant_node;
438 int num_parts_before_refinement;
439
440 InvariantDiveState(int node, int num_parts)
441 : invariant_node(node), num_parts_before_refinement(num_parts) {}
442 };
443 std::vector<InvariantDiveState> invariant_dive_stack;
444 // TODO(user): experiment with, and briefly describe the results of various
445 // algorithms for picking the invariant node:
446 // - random selection
447 // - highest/lowest degree first
448 // - enumerate by part index; or by part size
449 // - etc.
450 for (int invariant_node = 0; invariant_node < NumNodes(); ++invariant_node) {
451 if (base_partition.ElementsInSamePartAs(invariant_node).size() == 1) {
452 continue;
453 }
454 invariant_dive_stack.push_back(
455 InvariantDiveState(invariant_node, base_partition.NumParts()));
456 DistinguishNodeInPartition(invariant_node, &base_partition, nullptr);
457 VLOG(4) << "Invariant dive: invariant node = " << invariant_node
458 << "; partition after: "
460 if (time_limit_->LimitReached()) {
461 return absl::Status(absl::StatusCode::kDeadlineExceeded,
462 "During the invariant dive.");
463 }
464 }
465 DenseDoublyLinkedList representatives_sorted_by_index_in_partition(
466 base_partition.ElementsInHierarchicalOrder());
467 DynamicPartition image_partition = base_partition;
468 IF_STATS_ENABLED(stats_.invariant_dive_time.StopTimerAndAddElapsedTime());
469 // Now we've dived to the bottom: we're left with the identity permutation,
470 // which we don't need as a generator. We move on to phase 2).
471
472 IF_STATS_ENABLED(stats_.main_search_time.StartTimer());
473 while (!invariant_dive_stack.empty()) {
474 if (time_limit_->LimitReached()) break;
475 // Backtrack the last step of 1) (the invariant dive).
476 IF_STATS_ENABLED(stats_.invariant_unroll_time.StartTimer());
477 const int root_node = invariant_dive_stack.back().invariant_node;
478 const int base_num_parts =
479 invariant_dive_stack.back().num_parts_before_refinement;
480 invariant_dive_stack.pop_back();
481 base_partition.UndoRefineUntilNumPartsEqual(base_num_parts);
482 image_partition.UndoRefineUntilNumPartsEqual(base_num_parts);
483 VLOG(4) << "Backtracking invariant dive: root node = " << root_node
484 << "; partition: "
486
487 // Now we'll try to map "root_node" to all image nodes that seem compatible
488 // and that aren't "root_node" itself.
489 //
490 // Doing so, we're able to detect potential bad (or good) matches by
491 // refining the 'base' partition with "root_node"; and refining the
492 // 'image' partition (which represents the partition of images nodes,
493 // i.e. the nodes after applying the currently implicit permutation)
494 // with that candidate image node: if the two partitions don't match, then
495 // the candidate image isn't compatible.
496 // If the partitions do match, we might either find the underlying
497 // permutation directly, or we might need to further try and map other
498 // nodes to their image: this is a recursive search with backtracking.
499
500 // The potential images of root_node are the nodes in its part. They can be
501 // pruned by the already computed equivalence classes.
502 // TODO(user): better elect the representative of each equivalence class
503 // in order to reduce the permutation support down the line
504 // TODO(user): Don't build a list; but instead use direct, inline iteration
505 // on the representatives in the while() loop below, to benefit from the
506 // incremental merging of the equivalence classes.
507 DCHECK_EQ(1, node_equivalence_classes.NumNodesInSamePartAs(root_node));
508 GetAllOtherRepresentativesInSamePartAs(
509 root_node, base_partition, representatives_sorted_by_index_in_partition,
510 &node_equivalence_classes, &potential_root_image_nodes);
511 DCHECK(!potential_root_image_nodes.empty());
512 IF_STATS_ENABLED(stats_.invariant_unroll_time.StopTimerAndAddElapsedTime());
513
514 // Try to map "root_node" to all of its potential images. For each image,
515 // we only care about finding a single compatible permutation, if it exists.
516 while (!potential_root_image_nodes.empty()) {
517 if (time_limit_->LimitReached()) break;
518 VLOG(4) << "Potential (pruned) images of root node " << root_node
519 << " left: [" << absl::StrJoin(potential_root_image_nodes, " ")
520 << "].";
521 const int root_image_node = potential_root_image_nodes.back();
522 VLOG(4) << "Trying image of root node: " << root_image_node;
523
524 std::unique_ptr<SparsePermutation> permutation =
525 FindOneSuitablePermutation(root_node, root_image_node,
526 &base_partition, &image_partition,
527 *generators, permutations_displacing_node);
528
529 if (permutation != nullptr) {
530 ScopedTimeDistributionUpdater u(&stats_.permutation_output_time);
531 // We found a permutation. We store it in the list of generators, and
532 // further prune out the remaining 'root' image candidates, taking into
533 // account the permutation we just found.
534 MergeNodeEquivalenceClassesAccordingToPermutation(
535 *permutation, &node_equivalence_classes,
536 &representatives_sorted_by_index_in_partition);
537 // HACK(user): to make sure that we keep root_image_node as the
538 // representant of its part, we temporarily move it to the front
539 // of the vector, then move it again to the back so that it gets
540 // deleted by the pop_back() below.
541 SwapFrontAndBack(&potential_root_image_nodes);
542 node_equivalence_classes.KeepOnlyOneNodePerPart(
543 &potential_root_image_nodes);
544 SwapFrontAndBack(&potential_root_image_nodes);
545
546 // Register it onto the permutations_displacing_node vector.
547 const int permutation_index = static_cast<int>(generators->size());
548 for (const int node : permutation->Support()) {
549 permutations_displacing_node[node].push_back(permutation_index);
550 }
551
552 // Move the permutation to the generator list (this also transfers
553 // ownership).
554 generators->push_back(std::move(permutation));
555 }
556
557 potential_root_image_nodes.pop_back();
558 }
559
560 // We keep track of the size of the orbit of 'root_node' under the
561 // current subgroup: this is one of the factors of the total group size.
562 // TODO(user): better, more complete explanation.
563 factorized_automorphism_group_size->push_back(
564 node_equivalence_classes.NumNodesInSamePartAs(root_node));
565 }
566 node_equivalence_classes.FillEquivalenceClasses(node_equivalence_classes_io);
567 IF_STATS_ENABLED(stats_.main_search_time.StopTimerAndAddElapsedTime());
568 IF_STATS_ENABLED(stats_.SetPrintOrder(StatsGroup::SORT_BY_NAME));
569 IF_STATS_ENABLED(LOG(INFO) << "Statistics: " << stats_.StatString());
570 if (time_limit_->LimitReached()) {
571 return absl::Status(absl::StatusCode::kDeadlineExceeded,
572 "Some automorphisms were found, but probably not all.");
573 }
574 return ::absl::OkStatus();
575}
576
577namespace {
578// This method can be easily understood in the context of
579// ConfirmFullMatchOrFindNextMappingDecision(): see its call sites.
580// Knowing that we want to map some element of part #part_index of
581// "base_partition" to part #part_index of "image_partition", pick the "best"
582// such mapping, for the global search algorithm.
583inline void GetBestMapping(const DynamicPartition& base_partition,
584 const DynamicPartition& image_partition,
585 int part_index, int* base_node, int* image_node) {
586 // As of pending CL 66620435, we've loosely tried three variants of
587 // GetBestMapping():
588 // 1) Just take the first element of the base part, map it to the first
589 // element of the image part.
590 // 2) Just take the first element of the base part, and map it to itself if
591 // possible, else map it to the first element of the image part
592 // 3) Scan all elements of the base parts until we find one that can map to
593 // itself. If there isn't one; we just fall back to the strategy 1).
594 //
595 // Variant 2) gives the best results on most benchmarks, in terms of speed,
596 // but 3) yields much smaller supports for the sat_holeXXX benchmarks, as
597 // long as it's combined with the other tweak enabled by
598 // FLAGS_minimize_permutation_support_size.
599 if (absl::GetFlag(FLAGS_minimize_permutation_support_size)) {
600 // Variant 3).
601 for (const int node : base_partition.ElementsInPart(part_index)) {
602 if (image_partition.PartOf(node) == part_index) {
603 *image_node = *base_node = node;
604 return;
605 }
606 }
607 *base_node = *base_partition.ElementsInPart(part_index).begin();
608 *image_node = *image_partition.ElementsInPart(part_index).begin();
609 return;
610 }
611
612 // Variant 2).
613 *base_node = *base_partition.ElementsInPart(part_index).begin();
614 if (image_partition.PartOf(*base_node) == part_index) {
615 *image_node = *base_node;
616 } else {
617 *image_node = *image_partition.ElementsInPart(part_index).begin();
618 }
619}
620} // namespace
621
622// TODO(user): refactor this method and its submethods into a dedicated class
623// whose members will be ominously accessed by all the class methods; most
624// notably the search state stack. This may improve readability.
625std::unique_ptr<SparsePermutation>
626GraphSymmetryFinder::FindOneSuitablePermutation(
627 int root_node, int root_image_node, DynamicPartition* base_partition,
628 DynamicPartition* image_partition,
629 const std::vector<std::unique_ptr<SparsePermutation>>&
630 generators_found_so_far,
631 const std::vector<std::vector<int>>& permutations_displacing_node) {
632 // DCHECKs() and statistics.
633 ScopedTimeDistributionUpdater search_time_updater(&stats_.search_time);
634 DCHECK_EQ("", tmp_dynamic_permutation_.DebugString());
635 DCHECK_EQ(base_partition->DebugString(DynamicPartition::SORT_BY_PART),
636 image_partition->DebugString(DynamicPartition::SORT_BY_PART));
637 DCHECK(search_states_.empty());
638
639 // These will be used during the search. See their usage.
640 std::vector<int> base_singletons;
641 std::vector<int> image_singletons;
642 int next_base_node;
643 int next_image_node;
644 int min_potential_mismatching_part_index;
645 std::vector<int> next_potential_image_nodes;
646
647 // Initialize the search: we can already distinguish "root_node" in the base
648 // partition. See the comment below.
649 search_states_.emplace_back(
650 /*base_node=*/root_node, /*first_image_node=*/-1,
651 /*num_parts_before_trying_to_map_base_node=*/base_partition->NumParts(),
652 /*min_potential_mismatching_part_index=*/base_partition->NumParts());
653 // We inject the image node directly as the "remaining_pruned_image_nodes".
654 search_states_.back().remaining_pruned_image_nodes.assign(1, root_image_node);
655 {
656 ScopedTimeDistributionUpdater u(&stats_.initial_search_refine_time);
657 DistinguishNodeInPartition(root_node, base_partition, &base_singletons);
658 }
659 while (!search_states_.empty()) {
660 if (time_limit_->LimitReached()) return nullptr;
661 // When exploring a SearchState "ss", we're supposed to have:
662 // - A base_partition that has already been refined on ss->base_node.
663 // (base_singleton is the list of singletons created on the base
664 // partition during that refinement).
665 // - A non-empty list of potential image nodes (we'll try them in reverse
666 // order).
667 // - An image partition that hasn't been refined yet.
668 //
669 // Also, one should note that the base partition (before its refinement on
670 // base_node) was deemed compatible with the image partition as it is now.
671 const SearchState& ss = search_states_.back();
672 const int image_node = ss.first_image_node >= 0
673 ? ss.first_image_node
674 : ss.remaining_pruned_image_nodes.back();
675
676 // Statistics, DCHECKs.
677 IF_STATS_ENABLED(stats_.search_depth.Add(search_states_.size()));
678 DCHECK_EQ(ss.num_parts_before_trying_to_map_base_node,
679 image_partition->NumParts());
680
681 // Apply the decision: map base_node to image_node. Since base_partition
682 // was already refined on base_node, we just need to refine image_partition.
683 {
684 ScopedTimeDistributionUpdater u(&stats_.search_refine_time);
685 DistinguishNodeInPartition(image_node, image_partition,
686 &image_singletons);
687 }
688 VLOG(4) << ss.DebugString();
689 VLOG(4) << base_partition->DebugString(DynamicPartition::SORT_BY_PART);
690 VLOG(4) << image_partition->DebugString(DynamicPartition::SORT_BY_PART);
691
692 // Run some diagnoses on the two partitions. There are many outcomes, so
693 // it's a bit complicated:
694 // 1) The partitions are incompatible
695 // - Because of a straightfoward criterion (size mismatch).
696 // - Because they are both fully refined (i.e. singletons only), yet the
697 // permutation induced by them is not a graph automorpshim.
698 // 2) The partitions induce a permutation (all their non-singleton parts are
699 // identical), and this permutation is a graph automorphism.
700 // 3) The partitions need further refinement:
701 // - Because some non-singleton parts aren't equal in the base and image
702 // partition
703 // - Or because they are a full match (i.e. may induce a permutation,
704 // like in 2)), but the induced permutation isn't a graph automorphism.
705 bool compatible = true;
706 {
707 ScopedTimeDistributionUpdater u(&stats_.quick_compatibility_time);
708 compatible = PartitionsAreCompatibleAfterPartIndex(
709 *base_partition, *image_partition,
710 ss.num_parts_before_trying_to_map_base_node);
711 u.AlsoUpdate(compatible ? &stats_.quick_compatibility_success_time
712 : &stats_.quick_compatibility_fail_time);
713 }
714 bool partitions_are_full_match = false;
715 if (compatible) {
716 {
718 &stats_.dynamic_permutation_refinement_time);
719 tmp_dynamic_permutation_.AddMappings(base_singletons, image_singletons);
720 }
721 ScopedTimeDistributionUpdater u(&stats_.map_election_std_time);
722 min_potential_mismatching_part_index =
723 ss.min_potential_mismatching_part_index;
724 partitions_are_full_match = ConfirmFullMatchOrFindNextMappingDecision(
725 *base_partition, *image_partition, tmp_dynamic_permutation_,
726 &min_potential_mismatching_part_index, &next_base_node,
727 &next_image_node);
728 u.AlsoUpdate(partitions_are_full_match
729 ? &stats_.map_election_std_full_match_time
730 : &stats_.map_election_std_mapping_time);
731 }
732 if (compatible && partitions_are_full_match) {
733 DCHECK_EQ(min_potential_mismatching_part_index,
734 base_partition->NumParts());
735 // We have a permutation candidate!
736 // Note(user): we also deal with (extremely rare) false positives for
737 // "partitions_are_full_match" here: in case they aren't a full match,
738 // IsGraphAutomorphism() will catch that; and we'll simply deepen the
739 // search.
740 bool is_automorphism = true;
741 {
742 ScopedTimeDistributionUpdater u(&stats_.automorphism_test_time);
743 is_automorphism = IsGraphAutomorphism(tmp_dynamic_permutation_);
744 u.AlsoUpdate(is_automorphism ? &stats_.automorphism_test_success_time
745 : &stats_.automorphism_test_fail_time);
746 }
747 if (is_automorphism) {
748 ScopedTimeDistributionUpdater u(&stats_.search_finalize_time);
749 // We found a valid permutation. We can return it, but first we
750 // must restore the partitions to their original state.
751 std::unique_ptr<SparsePermutation> sparse_permutation(
752 tmp_dynamic_permutation_.CreateSparsePermutation());
753 VLOG(4) << "Automorphism found: " << sparse_permutation->DebugString();
754 const int base_num_parts =
755 search_states_[0].num_parts_before_trying_to_map_base_node;
756 base_partition->UndoRefineUntilNumPartsEqual(base_num_parts);
757 image_partition->UndoRefineUntilNumPartsEqual(base_num_parts);
758 tmp_dynamic_permutation_.Reset();
759 search_states_.clear();
760
761 search_time_updater.AlsoUpdate(&stats_.search_time_success);
762 return sparse_permutation;
763 }
764
765 // The permutation isn't a valid automorphism. Either the partitions were
766 // fully refined, and we deem them incompatible, or they weren't, and we
767 // consider them as 'not a full match'.
768 VLOG(4) << "Permutation candidate isn't a valid automorphism.";
769 if (base_partition->NumParts() == NumNodes()) {
770 // Fully refined: the partitions are incompatible.
771 compatible = false;
772 ScopedTimeDistributionUpdater u(&stats_.dynamic_permutation_undo_time);
773 tmp_dynamic_permutation_.UndoLastMappings(&base_singletons);
774 } else {
775 ScopedTimeDistributionUpdater u(&stats_.map_reelection_time);
776 // TODO(user, viger): try to get the non-singleton part from
777 // DynamicPermutation in O(1). On some graphs like the symmetry of the
778 // mip problem lectsched-4-obj.mps.gz, this take the majority of the
779 // time!
780 int non_singleton_part = 0;
781 {
782 ScopedTimeDistributionUpdater u(&stats_.non_singleton_search_time);
783 while (base_partition->SizeOfPart(non_singleton_part) == 1) {
784 ++non_singleton_part;
785 DCHECK_LT(non_singleton_part, base_partition->NumParts());
786 }
787 }
788 time_limit_->AdvanceDeterministicTime(
789 1e-9 * static_cast<double>(non_singleton_part));
790
791 // The partitions are compatible, but we'll deepen the search on some
792 // non-singleton part. We can pick any base and image node in this case.
793 GetBestMapping(*base_partition, *image_partition, non_singleton_part,
794 &next_base_node, &next_image_node);
795 }
796 }
797
798 // Now we've fully diagnosed our partitions, and have already dealt with
799 // case 2). We're left to deal with 1) and 3).
800 //
801 // Case 1): partitions are incompatible.
802 if (!compatible) {
803 ScopedTimeDistributionUpdater u(&stats_.backtracking_time);
804 // We invalidate the current image node, and prune the remaining image
805 // nodes. We might be left with no other image nodes, which means that
806 // we'll backtrack, i.e. pop our current SearchState and invalidate the
807 // 'current' image node of the upper SearchState (which might lead to us
808 // backtracking it, and so on).
809 while (!search_states_.empty()) {
810 SearchState* const last_ss = &search_states_.back();
811 image_partition->UndoRefineUntilNumPartsEqual(
812 last_ss->num_parts_before_trying_to_map_base_node);
813 if (last_ss->first_image_node >= 0) {
814 // Find out and prune the remaining potential image nodes: there is
815 // no permutation that maps base_node -> image_node that is
816 // compatible with the current partition, so there can't be a
817 // permutation that maps base_node -> X either, for all X in the orbit
818 // of 'image_node' under valid permutations compatible with the
819 // current partition. Ditto for other potential image nodes.
820 //
821 // TODO(user): fix this: we should really be collecting all
822 // permutations displacing any node in "image_part", for the pruning
823 // to be really exhaustive. We could also consider alternative ways,
824 // like incrementally maintaining the list of permutations compatible
825 // with the partition so far.
826 const int part = image_partition->PartOf(last_ss->first_image_node);
827 last_ss->remaining_pruned_image_nodes.reserve(
828 image_partition->SizeOfPart(part));
829 last_ss->remaining_pruned_image_nodes.push_back(
830 last_ss->first_image_node);
831 for (const int e : image_partition->ElementsInPart(part)) {
832 if (e != last_ss->first_image_node) {
833 last_ss->remaining_pruned_image_nodes.push_back(e);
834 }
835 }
836 {
837 ScopedTimeDistributionUpdater u(&stats_.pruning_time);
838 PruneOrbitsUnderPermutationsCompatibleWithPartition(
839 *image_partition, generators_found_so_far,
840 permutations_displacing_node[last_ss->first_image_node],
841 &last_ss->remaining_pruned_image_nodes);
842 }
843 SwapFrontAndBack(&last_ss->remaining_pruned_image_nodes);
844 DCHECK_EQ(last_ss->remaining_pruned_image_nodes.back(),
845 last_ss->first_image_node);
846 last_ss->first_image_node = -1;
847 }
848 last_ss->remaining_pruned_image_nodes.pop_back();
849 if (!last_ss->remaining_pruned_image_nodes.empty()) break;
850
851 VLOG(4) << "Backtracking one level up.";
852 base_partition->UndoRefineUntilNumPartsEqual(
853 last_ss->num_parts_before_trying_to_map_base_node);
854 // If this was the root search state (i.e. we fully backtracked and
855 // will exit the search after that), we don't have mappings to undo.
856 // We run UndoLastMappings() anyway, because it's a no-op in that case.
857 tmp_dynamic_permutation_.UndoLastMappings(&base_singletons);
858 search_states_.pop_back();
859 }
860 // Continue the search.
861 continue;
862 }
863
864 // Case 3): we deepen the search.
865 // Since the search loop starts from an already-refined base_partition,
866 // we must do it here.
867 VLOG(4) << " Deepening the search.";
868 search_states_.emplace_back(
869 next_base_node, next_image_node,
870 /*num_parts_before_trying_to_map_base_node*/ base_partition->NumParts(),
871 min_potential_mismatching_part_index);
872 {
873 ScopedTimeDistributionUpdater u(&stats_.search_refine_time);
874 DistinguishNodeInPartition(next_base_node, base_partition,
875 &base_singletons);
876 }
877 }
878 // We exhausted the search; we didn't find any permutation.
879 search_time_updater.AlsoUpdate(&stats_.search_time_fail);
880 return nullptr;
881}
882
884GraphSymmetryFinder::TailsOfIncomingArcsTo(int node) const {
886 flattened_reverse_adj_lists_.begin() + reverse_adj_list_index_[node],
887 flattened_reverse_adj_lists_.begin() + reverse_adj_list_index_[node + 1]);
888}
889
890void GraphSymmetryFinder::PruneOrbitsUnderPermutationsCompatibleWithPartition(
891 const DynamicPartition& partition,
892 const std::vector<std::unique_ptr<SparsePermutation>>& permutations,
893 const std::vector<int>& permutation_indices, std::vector<int>* nodes) {
894 VLOG(4) << " Pruning [" << absl::StrJoin(*nodes, ", ") << "]";
895 // TODO(user): apply a smarter test to decide whether to do the pruning
896 // or not: we can accurately estimate the cost of pruning (iterate through
897 // all generators found so far) and its estimated benefit (the cost of
898 // the search below the state that we're currently in, times the expected
899 // number of pruned nodes). Sometimes it may be better to skip the
900 // pruning.
901 if (nodes->size() <= 1) return;
902
903 // Iterate on all targeted permutations. If they are compatible, apply
904 // them to tmp_partition_ which will contain the incrementally merged
905 // equivalence classes.
906 std::vector<int>& tmp_nodes_on_support =
907 tmp_stack_; // Rename, for readability.
908 DCHECK(tmp_nodes_on_support.empty());
909 // TODO(user): investigate further optimizations: maybe it's possible
910 // to incrementally maintain the set of permutations that is compatible
911 // with the current partition, instead of recomputing it here?
912 for (const int p : permutation_indices) {
913 const SparsePermutation& permutation = *permutations[p];
914 // First, a quick compatibility check: the permutation's cycles must be
915 // smaller or equal to the size of the part that they are included in.
916 bool compatible = true;
917 for (int c = 0; c < permutation.NumCycles(); ++c) {
918 const SparsePermutation::Iterator cycle = permutation.Cycle(c);
919 if (cycle.size() >
920 partition.SizeOfPart(partition.PartOf(*cycle.begin()))) {
921 compatible = false;
922 break;
923 }
924 }
925 if (!compatible) continue;
926 // Now the full compatibility check: each cycle of the permutation must
927 // be fully included in an image part.
928 for (int c = 0; c < permutation.NumCycles(); ++c) {
929 int part = -1;
930 for (const int node : permutation.Cycle(c)) {
931 if (partition.PartOf(node) != part) {
932 if (part >= 0) {
933 compatible = false;
934 break;
935 }
936 part = partition.PartOf(node); // Initialization of 'part'.
937 }
938 }
939 }
940 if (!compatible) continue;
941 // The permutation is fully compatible!
942 // TODO(user): ignore cycles that are outside of image_part.
943 MergeNodeEquivalenceClassesAccordingToPermutation(permutation,
944 &tmp_partition_, nullptr);
945 for (const int node : permutation.Support()) {
946 if (!tmp_node_mask_[node]) {
947 tmp_node_mask_[node] = true;
948 tmp_nodes_on_support.push_back(node);
949 }
950 }
951 }
952
953 // Apply the pruning.
954 tmp_partition_.KeepOnlyOneNodePerPart(nodes);
955
956 // Reset the "tmp_" structures sparsely.
957 for (const int node : tmp_nodes_on_support) {
958 tmp_node_mask_[node] = false;
959 tmp_partition_.ResetNode(node);
960 }
961 tmp_nodes_on_support.clear();
962 VLOG(4) << " Pruned: [" << absl::StrJoin(*nodes, ", ") << "]";
963}
964
965bool GraphSymmetryFinder::ConfirmFullMatchOrFindNextMappingDecision(
966 const DynamicPartition& base_partition,
967 const DynamicPartition& image_partition,
968 const DynamicPermutation& current_permutation_candidate,
969 int* min_potential_mismatching_part_index_io, int* next_base_node,
970 int* next_image_node) const {
971 *next_base_node = -1;
972 *next_image_node = -1;
973
974 // The following clause should be true most of the times, except in some
975 // specific use cases.
976 if (!absl::GetFlag(FLAGS_minimize_permutation_support_size)) {
977 // First, we try to map the loose ends of the current permutations: these
978 // loose ends can't be mapped to themselves, so we'll have to map them to
979 // something anyway.
980 for (const int loose_node : current_permutation_candidate.LooseEnds()) {
981 DCHECK_GT(base_partition.ElementsInSamePartAs(loose_node).size(), 1);
982 *next_base_node = loose_node;
983 const int root = current_permutation_candidate.RootOf(loose_node);
984 DCHECK_NE(root, loose_node);
985 if (image_partition.PartOf(root) == base_partition.PartOf(loose_node)) {
986 // We prioritize mapping a loose end to its own root (i.e. close a
987 // cycle), if possible, like here: we exit immediately.
988 *next_image_node = root;
989 return false;
990 }
991 }
992 if (*next_base_node != -1) {
993 // We found loose ends, but none that mapped to its own root. Just pick
994 // any valid image.
995 *next_image_node =
996 *image_partition
997 .ElementsInPart(base_partition.PartOf(*next_base_node))
998 .begin();
999 return false;
1000 }
1001 }
1002
1003 // If there is no loose node (i.e. the current permutation only has closed
1004 // cycles), we fall back to picking any part that is different in the base and
1005 // image partitions; because we know that some mapping decision will have to
1006 // be made there.
1007 // SUBTLE: we use "min_potential_mismatching_part_index_io" to incrementally
1008 // keep running this search (for a mismatching part) from where we left off.
1009 // TODO(user): implement a simpler search for a mismatching part: it's
1010 // trivially possible if the base partition maintains a hash set of all
1011 // Fprints of its parts, and if the image partition uses that to maintain the
1012 // list of 'different' non-singleton parts.
1013 const int initial_min_potential_mismatching_part_index =
1014 *min_potential_mismatching_part_index_io;
1015 for (; *min_potential_mismatching_part_index_io < base_partition.NumParts();
1016 ++*min_potential_mismatching_part_index_io) {
1017 const int p = *min_potential_mismatching_part_index_io;
1018 if (base_partition.SizeOfPart(p) != 1 &&
1019 base_partition.FprintOfPart(p) != image_partition.FprintOfPart(p)) {
1020 GetBestMapping(base_partition, image_partition, p, next_base_node,
1021 next_image_node);
1022 return false;
1023 }
1024
1025 const int parent = base_partition.ParentOfPart(p);
1026 if (parent < initial_min_potential_mismatching_part_index &&
1027 base_partition.SizeOfPart(parent) != 1 &&
1028 base_partition.FprintOfPart(parent) !=
1029 image_partition.FprintOfPart(parent)) {
1030 GetBestMapping(base_partition, image_partition, parent, next_base_node,
1031 next_image_node);
1032 return false;
1033 }
1034 }
1035
1036 // We didn't find an unequal part. DCHECK that our "incremental" check was
1037 // actually correct and that all non-singleton parts are indeed equal.
1038 if (DEBUG_MODE) {
1039 for (int p = 0; p < base_partition.NumParts(); ++p) {
1040 if (base_partition.SizeOfPart(p) != 1) {
1041 CHECK_EQ(base_partition.FprintOfPart(p),
1042 image_partition.FprintOfPart(p));
1043 }
1044 }
1045 }
1046 return true;
1047}
1048
1049std::string GraphSymmetryFinder::SearchState::DebugString() const {
1050 return absl::StrFormat(
1051 "SearchState{ base_node=%d, first_image_node=%d,"
1052 " remaining_pruned_image_nodes=[%s],"
1053 " num_parts_before_trying_to_map_base_node=%d }",
1054 base_node, first_image_node,
1055 absl::StrJoin(remaining_pruned_image_nodes, " "),
1056 num_parts_before_trying_to_map_base_node);
1057}
1058
1059} // namespace operations_research
int64_t max
Definition: alldiff_cst.cc:140
#define DCHECK_NE(val1, val2)
Definition: base/logging.h:892
#define CHECK_EQ(val1, val2)
Definition: base/logging.h:703
#define DCHECK_GT(val1, val2)
Definition: base/logging.h:896
#define DCHECK_LT(val1, val2)
Definition: base/logging.h:894
#define LOG(severity)
Definition: base/logging.h:420
#define DCHECK(condition)
Definition: base/logging.h:890
#define DCHECK_EQ(val1, val2)
Definition: base/logging.h:891
#define VLOG(verboselevel)
Definition: base/logging.h:984
IterablePart ElementsInPart(int i) const
void Refine(const std::vector< int > &distinguished_subset)
void UndoRefineUntilNumPartsEqual(int original_num_parts)
IterablePart ElementsInSamePartAs(int i) const
std::string DebugString(DebugStringSorting sorting) const
const std::vector< int > & ElementsInHierarchicalOrder() const
std::unique_ptr< SparsePermutation > CreateSparsePermutation() const
void UndoLastMappings(std::vector< int > *undone_mapping_src)
void AddMappings(const std::vector< int > &src, const std::vector< int > &dst)
const std::vector< int > & AllMappingsSrc() const
void RecursivelyRefinePartitionByAdjacency(int first_unrefined_part_index, DynamicPartition *partition)
bool IsGraphAutomorphism(const DynamicPermutation &permutation) const
void DistinguishNodeInPartition(int node, DynamicPartition *partition, std::vector< int > *new_singletons_or_null)
absl::Status FindSymmetries(std::vector< int > *node_equivalence_classes_io, std::vector< std::unique_ptr< SparsePermutation > > *generators, std::vector< int > *factorized_automorphism_group_size, TimeLimit *time_limit=nullptr)
GraphSymmetryFinder(const Graph &graph, bool is_undirected)
int MergePartsOf(int node1, int node2)
int FillEquivalenceClasses(std::vector< int > *node_equivalence_classes)
void KeepOnlyOneNodePerPart(std::vector< int > *nodes)
A simple class to enforce both an elapsed time limit and a deterministic time limit in the same threa...
Definition: time_limit.h:106
bool LimitReached()
Returns true when the external limit is true, or the deterministic time is over the deterministic lim...
Definition: time_limit.h:546
void AdvanceDeterministicTime(double deterministic_duration)
Advances the deterministic time.
Definition: time_limit.h:227
ArcIndexType num_arcs() const
Definition: graph.h:207
NodeIndexType num_nodes() const
Definition: graph.h:204
IntegerRange< NodeIndex > AllNodes() const
Definition: graph.h:937
NodeIndexType Head(ArcIndexType arc) const
Definition: graph.h:1315
BeginEndWrapper< OutgoingArcIterator > OutgoingArcs(NodeIndexType node) const
ModelSharedTimeLimit * time_limit
ABSL_FLAG(bool, minimize_permutation_support_size, false, "Tweak the algorithm to try and minimize the support size" " of the generators produced. This may negatively impact the" " performance, but works great on the sat_holeXXX benchmarks" " to reduce the support size.")
int arc
const int INFO
Definition: log_severity.h:31
const bool DEBUG_MODE
Definition: macros.h:24
void swap(IdMap< K, V > &a, IdMap< K, V > &b)
Definition: id_map.h:262
Collection of objects used to extend the Constraint Solver library.
DisabledScopedTimeDistributionUpdater ScopedTimeDistributionUpdater
Definition: stats.h:434
bool GraphIsSymmetric(const Graph &graph)
Definition: graph/util.h:218
int nodes
#define IF_STATS_ENABLED(instructions)
Definition: stats.h:437
std::vector< int >::const_iterator begin() const