OR-Tools  9.2
all_different.cc
Go to the documentation of this file.
1// Copyright 2010-2021 Google LLC
2// Licensed under the Apache License, Version 2.0 (the "License");
3// you may not use this file except in compliance with the License.
4// You may obtain a copy of the License at
5//
6// http://www.apache.org/licenses/LICENSE-2.0
7//
8// Unless required by applicable law or agreed to in writing, software
9// distributed under the License is distributed on an "AS IS" BASIS,
10// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11// See the License for the specific language governing permissions and
12// limitations under the License.
13
15
16#include <algorithm>
17#include <cstdint>
18#include <limits>
19#include <map>
20#include <memory>
21
22#include "absl/container/flat_hash_set.h"
28#include "ortools/util/sort.h"
29
30namespace operations_research {
31namespace sat {
32
33std::function<void(Model*)> AllDifferentBinary(
34 const std::vector<IntegerVariable>& vars) {
35 return [=](Model* model) {
36 // Fully encode all the given variables and construct a mapping value ->
37 // List of literal each indicating that a given variable takes this value.
38 //
39 // Note that we use a map to always add the constraints in the same order.
40 std::map<IntegerValue, std::vector<Literal>> value_to_literals;
41 IntegerEncoder* encoder = model->GetOrCreate<IntegerEncoder>();
42 for (const IntegerVariable var : vars) {
44 for (const auto& entry : encoder->FullDomainEncoding(var)) {
45 value_to_literals[entry.value].push_back(entry.literal);
46 }
47 }
48
49 // Add an at most one constraint for each value.
50 for (const auto& entry : value_to_literals) {
51 if (entry.second.size() > 1) {
52 model->Add(AtMostOneConstraint(entry.second));
53 }
54 }
55
56 // If the number of values is equal to the number of variables, we have
57 // a permutation. We can add a bool_or for each literals attached to a
58 // value.
59 if (value_to_literals.size() == vars.size()) {
60 for (const auto& entry : value_to_literals) {
61 model->Add(ClauseConstraint(entry.second));
62 }
63 }
64 };
65}
66
67std::function<void(Model*)> AllDifferentOnBounds(
68 const std::vector<AffineExpression>& expressions) {
69 return [=](Model* model) {
70 if (expressions.empty()) return;
71 auto* constraint = new AllDifferentBoundsPropagator(
72 expressions, model->GetOrCreate<IntegerTrail>());
73 constraint->RegisterWith(model->GetOrCreate<GenericLiteralWatcher>());
74 model->TakeOwnership(constraint);
75 };
76}
77
78std::function<void(Model*)> AllDifferentOnBounds(
79 const std::vector<IntegerVariable>& vars) {
80 return [=](Model* model) {
81 if (vars.empty()) return;
82 std::vector<AffineExpression> expressions;
83 expressions.reserve(vars.size());
84 for (const IntegerVariable var : vars) {
85 expressions.push_back(AffineExpression(var));
86 }
87 auto* constraint = new AllDifferentBoundsPropagator(
88 expressions, model->GetOrCreate<IntegerTrail>());
89 constraint->RegisterWith(model->GetOrCreate<GenericLiteralWatcher>());
90 model->TakeOwnership(constraint);
91 };
92}
93
94std::function<void(Model*)> AllDifferentAC(
95 const std::vector<IntegerVariable>& variables) {
96 return [=](Model* model) {
97 if (variables.size() < 3) return;
98
100 variables, model->GetOrCreate<IntegerEncoder>(),
101 model->GetOrCreate<Trail>(), model->GetOrCreate<IntegerTrail>());
102 constraint->RegisterWith(model->GetOrCreate<GenericLiteralWatcher>());
103 model->TakeOwnership(constraint);
104 };
105}
106
108 std::vector<IntegerVariable> variables, IntegerEncoder* encoder,
109 Trail* trail, IntegerTrail* integer_trail)
110 : num_variables_(variables.size()),
111 variables_(std::move(variables)),
112 trail_(trail),
113 integer_trail_(integer_trail) {
114 // Initialize literals cache.
115 int64_t min_value = std::numeric_limits<int64_t>::max();
116 int64_t max_value = std::numeric_limits<int64_t>::min();
117 variable_min_value_.resize(num_variables_);
118 variable_max_value_.resize(num_variables_);
119 variable_literal_index_.resize(num_variables_);
120 int num_fixed_variables = 0;
121 for (int x = 0; x < num_variables_; x++) {
122 variable_min_value_[x] = integer_trail_->LowerBound(variables_[x]).value();
123 variable_max_value_[x] = integer_trail_->UpperBound(variables_[x]).value();
124
125 // Compute value range of all variables.
126 min_value = std::min(min_value, variable_min_value_[x]);
127 max_value = std::max(max_value, variable_max_value_[x]);
128
129 // FullyEncode does not like 1-value domains, handle this case first.
130 // TODO(user): Prune now, ignore these variables during solving.
131 if (variable_min_value_[x] == variable_max_value_[x]) {
132 num_fixed_variables++;
133 variable_literal_index_[x].push_back(kTrueLiteralIndex);
134 continue;
135 }
136
137 // Force full encoding if not already done.
138 if (!encoder->VariableIsFullyEncoded(variables_[x])) {
139 encoder->FullyEncodeVariable(variables_[x]);
140 }
141
142 // Fill cache with literals, default value is kFalseLiteralIndex.
143 int64_t size = variable_max_value_[x] - variable_min_value_[x] + 1;
144 variable_literal_index_[x].resize(size, kFalseLiteralIndex);
145 for (const auto& entry : encoder->FullDomainEncoding(variables_[x])) {
146 int64_t value = entry.value.value();
147 // Can happen because of initial propagation!
148 if (value < variable_min_value_[x] || variable_max_value_[x] < value) {
149 continue;
150 }
151 variable_literal_index_[x][value - variable_min_value_[x]] =
152 entry.literal.Index();
153 }
154 }
155 min_all_values_ = min_value;
156 num_all_values_ = max_value - min_value + 1;
157
158 successor_.resize(num_variables_);
159 variable_to_value_.assign(num_variables_, -1);
160 visiting_.resize(num_variables_);
161 variable_visited_from_.resize(num_variables_);
162 residual_graph_successors_.resize(num_variables_ + num_all_values_ + 1);
163 component_number_.resize(num_variables_ + num_all_values_ + 1);
164}
165
167 const int id = watcher->Register(this);
168 watcher->SetPropagatorPriority(id, 2);
169 for (const auto& literal_indices : variable_literal_index_) {
170 for (const LiteralIndex li : literal_indices) {
171 // Watch only unbound literals.
172 if (li >= 0 &&
173 !trail_->Assignment().VariableIsAssigned(Literal(li).Variable())) {
174 watcher->WatchLiteral(Literal(li), id);
175 watcher->WatchLiteral(Literal(li).Negated(), id);
176 }
177 }
178 }
179}
180
181LiteralIndex AllDifferentConstraint::VariableLiteralIndexOf(int x,
182 int64_t value) {
183 return (value < variable_min_value_[x] || variable_max_value_[x] < value)
185 : variable_literal_index_[x][value - variable_min_value_[x]];
186}
187
188inline bool AllDifferentConstraint::VariableHasPossibleValue(int x,
189 int64_t value) {
190 LiteralIndex li = VariableLiteralIndexOf(x, value);
191 if (li == kFalseLiteralIndex) return false;
192 if (li == kTrueLiteralIndex) return true;
193 DCHECK_GE(li, 0);
194 return !trail_->Assignment().LiteralIsFalse(Literal(li));
195}
196
197bool AllDifferentConstraint::MakeAugmentingPath(int start) {
198 // Do a BFS and use visiting_ as a queue, with num_visited pointing
199 // at its begin() and num_to_visit its end().
200 // To switch to the augmenting path once a nonmatched value was found,
201 // we remember the BFS tree in variable_visited_from_.
202 int num_to_visit = 0;
203 int num_visited = 0;
204 // Enqueue start.
205 visiting_[num_to_visit++] = start;
206 variable_visited_[start] = true;
207 variable_visited_from_[start] = -1;
208
209 while (num_visited < num_to_visit) {
210 // Dequeue node to visit.
211 const int node = visiting_[num_visited++];
212
213 for (const int value : successor_[node]) {
214 if (value_visited_[value]) continue;
215 value_visited_[value] = true;
216 if (value_to_variable_[value] == -1) {
217 // value is not matched: change path from node to start, and return.
218 int path_node = node;
219 int path_value = value;
220 while (path_node != -1) {
221 int old_value = variable_to_value_[path_node];
222 variable_to_value_[path_node] = path_value;
223 value_to_variable_[path_value] = path_node;
224 path_node = variable_visited_from_[path_node];
225 path_value = old_value;
226 }
227 return true;
228 } else {
229 // Enqueue node matched to value.
230 const int next_node = value_to_variable_[value];
231 variable_visited_[next_node] = true;
232 visiting_[num_to_visit++] = next_node;
233 variable_visited_from_[next_node] = node;
234 }
235 }
236 }
237 return false;
238}
239
240// The algorithm copies the solver state to successor_, which is used to compute
241// a matching. If all variables can be matched, it generates the residual graph
242// in separate vectors, computes its SCCs, and filters variable -> value if
243// variable is not in the same SCC as value.
244// Explanations for failure and filtering are fine-grained:
245// failure is explained by a Hall set, i.e. dom(variables) \subseteq {values},
246// with |variables| < |values|; filtering is explained by the Hall set that
247// would happen if the variable was assigned to the value.
248//
249// TODO(user): If needed, there are several ways performance could be
250// improved.
251// If copying the variable state is too costly, it could be maintained instead.
252// If the propagator has too many fruitless calls (without failing/pruning),
253// we can remember the O(n) arcs used in the matching and the SCC decomposition,
254// and guard calls to Propagate() if these arcs are still valid.
256 // Copy variable state to graph state.
257 prev_matching_ = variable_to_value_;
258 value_to_variable_.assign(num_all_values_, -1);
259 variable_to_value_.assign(num_variables_, -1);
260 for (int x = 0; x < num_variables_; x++) {
261 successor_[x].clear();
262 const int64_t min_value = integer_trail_->LowerBound(variables_[x]).value();
263 const int64_t max_value = integer_trail_->UpperBound(variables_[x]).value();
264 for (int64_t value = min_value; value <= max_value; value++) {
265 if (VariableHasPossibleValue(x, value)) {
266 const int offset_value = value - min_all_values_;
267 // Forward-checking should propagate x != value.
268 successor_[x].push_back(offset_value);
269 }
270 }
271 if (successor_[x].size() == 1) {
272 const int offset_value = successor_[x][0];
273 if (value_to_variable_[offset_value] == -1) {
274 value_to_variable_[offset_value] = x;
275 variable_to_value_[x] = offset_value;
276 }
277 }
278 }
279
280 // Because we currently propagates all clauses before entering this
281 // propagator, we known that this can't happen.
282 if (DEBUG_MODE) {
283 for (int x = 0; x < num_variables_; x++) {
284 for (const int offset_value : successor_[x]) {
285 if (value_to_variable_[offset_value] != -1 &&
286 value_to_variable_[offset_value] != x) {
287 LOG(FATAL) << "Should have been propagated by AllDifferentBinary()!";
288 }
289 }
290 }
291 }
292
293 // Seed with previous matching.
294 for (int x = 0; x < num_variables_; x++) {
295 if (variable_to_value_[x] != -1) continue;
296 const int prev_value = prev_matching_[x];
297 if (prev_value == -1 || value_to_variable_[prev_value] != -1) continue;
298
299 if (VariableHasPossibleValue(x, prev_matching_[x] + min_all_values_)) {
300 variable_to_value_[x] = prev_matching_[x];
301 value_to_variable_[prev_matching_[x]] = x;
302 }
303 }
304
305 // Compute max matching.
306 int x = 0;
307 for (; x < num_variables_; x++) {
308 if (variable_to_value_[x] == -1) {
309 value_visited_.assign(num_all_values_, false);
310 variable_visited_.assign(num_variables_, false);
311 MakeAugmentingPath(x);
312 }
313 if (variable_to_value_[x] == -1) break; // No augmenting path exists.
314 }
315
316 // Fail if covering variables impossible.
317 // Explain with the forbidden parts of the graph that prevent
318 // MakeAugmentingPath from increasing the matching size.
319 if (x < num_variables_) {
320 // For now explain all forbidden arcs.
321 std::vector<Literal>* conflict = trail_->MutableConflict();
322 conflict->clear();
323 for (int y = 0; y < num_variables_; y++) {
324 if (!variable_visited_[y]) continue;
325 for (int value = variable_min_value_[y]; value <= variable_max_value_[y];
326 value++) {
327 const LiteralIndex li = VariableLiteralIndexOf(y, value);
328 if (li >= 0 && !value_visited_[value - min_all_values_]) {
329 DCHECK(trail_->Assignment().LiteralIsFalse(Literal(li)));
330 conflict->push_back(Literal(li));
331 }
332 }
333 }
334 return false;
335 }
336
337 // The current matching is a valid solution, now try to filter values.
338 // Build residual graph, compute its SCCs.
339 for (int x = 0; x < num_variables_; x++) {
340 residual_graph_successors_[x].clear();
341 for (const int succ : successor_[x]) {
342 if (succ != variable_to_value_[x]) {
343 residual_graph_successors_[x].push_back(num_variables_ + succ);
344 }
345 }
346 }
347 for (int offset_value = 0; offset_value < num_all_values_; offset_value++) {
348 residual_graph_successors_[num_variables_ + offset_value].clear();
349 if (value_to_variable_[offset_value] != -1) {
350 residual_graph_successors_[num_variables_ + offset_value].push_back(
351 value_to_variable_[offset_value]);
352 }
353 }
354 const int dummy_node = num_variables_ + num_all_values_;
355 residual_graph_successors_[dummy_node].clear();
356 if (num_variables_ < num_all_values_) {
357 for (int x = 0; x < num_variables_; x++) {
358 residual_graph_successors_[dummy_node].push_back(x);
359 }
360 for (int offset_value = 0; offset_value < num_all_values_; offset_value++) {
361 if (value_to_variable_[offset_value] == -1) {
362 residual_graph_successors_[num_variables_ + offset_value].push_back(
363 dummy_node);
364 }
365 }
366 }
367
368 // Compute SCCs, make node -> component map.
369 struct SccOutput {
370 explicit SccOutput(std::vector<int>* c) : components(c) {}
371 void emplace_back(int const* b, int const* e) {
372 for (int const* it = b; it < e; ++it) {
373 (*components)[*it] = num_components;
374 }
375 ++num_components;
376 }
377 int num_components = 0;
378 std::vector<int>* components;
379 };
380 SccOutput scc_output(&component_number_);
382 static_cast<int>(residual_graph_successors_.size()),
383 residual_graph_successors_, &scc_output);
384
385 // Remove arcs var -> val where SCC(var) -/->* SCC(val).
386 for (int x = 0; x < num_variables_; x++) {
387 if (successor_[x].size() == 1) continue;
388 for (const int offset_value : successor_[x]) {
389 const int value_node = offset_value + num_variables_;
390 if (variable_to_value_[x] != offset_value &&
391 component_number_[x] != component_number_[value_node] &&
392 VariableHasPossibleValue(x, offset_value + min_all_values_)) {
393 // We can deduce that x != value. To explain, force x == offset_value,
394 // then find another assignment for the variable matched to
395 // offset_value. It will fail: explaining why is the same as
396 // explaining failure as above, and it is an explanation of x != value.
397 value_visited_.assign(num_all_values_, false);
398 variable_visited_.assign(num_variables_, false);
399 // Undo x -> old_value and old_variable -> offset_value.
400 const int old_variable = value_to_variable_[offset_value];
401 variable_to_value_[old_variable] = -1;
402 const int old_value = variable_to_value_[x];
403 value_to_variable_[old_value] = -1;
404 variable_to_value_[x] = offset_value;
405 value_to_variable_[offset_value] = x;
406
407 value_visited_[offset_value] = true;
408 MakeAugmentingPath(old_variable);
409 DCHECK_EQ(variable_to_value_[old_variable], -1); // No reassignment.
410
411 std::vector<Literal>* reason = trail_->GetEmptyVectorToStoreReason();
412 for (int y = 0; y < num_variables_; y++) {
413 if (!variable_visited_[y]) continue;
414 for (int value = variable_min_value_[y];
415 value <= variable_max_value_[y]; value++) {
416 const LiteralIndex li = VariableLiteralIndexOf(y, value);
417 if (li >= 0 && !value_visited_[value - min_all_values_]) {
418 DCHECK(!VariableHasPossibleValue(y, value));
419 reason->push_back(Literal(li));
420 }
421 }
422 }
423
424 const LiteralIndex li =
425 VariableLiteralIndexOf(x, offset_value + min_all_values_);
428 return trail_->EnqueueWithStoredReason(Literal(li).Negated());
429 }
430 }
431 }
432
433 return true;
434}
435
437 const std::vector<AffineExpression>& expressions,
438 IntegerTrail* integer_trail)
439 : integer_trail_(integer_trail) {
440 CHECK(!expressions.empty());
441
442 // We need +2 for sentinels.
443 const int capacity = expressions.size() + 2;
444 index_to_start_index_.resize(capacity);
445 index_to_end_index_.resize(capacity);
446 index_is_present_.resize(capacity, false);
447 index_to_expr_.resize(capacity, kNoIntegerVariable);
448
449 for (int i = 0; i < expressions.size(); ++i) {
450 bounds_.push_back({expressions[i]});
451 negated_bounds_.push_back({expressions[i].Negated()});
452 }
453}
454
456 if (!PropagateLowerBounds()) return false;
457
458 // Note that it is not required to swap back bounds_ and negated_bounds_.
459 // TODO(user): investigate the impact.
460 std::swap(bounds_, negated_bounds_);
461 const bool result = PropagateLowerBounds();
462 std::swap(bounds_, negated_bounds_);
463 return result;
464}
465
466void AllDifferentBoundsPropagator::FillHallReason(IntegerValue hall_lb,
467 IntegerValue hall_ub) {
468 integer_reason_.clear();
469 const int limit = GetIndex(hall_ub);
470 for (int i = GetIndex(hall_lb); i <= limit; ++i) {
471 const AffineExpression expr = index_to_expr_[i];
472 integer_reason_.push_back(expr.GreaterOrEqual(hall_lb));
473 integer_reason_.push_back(expr.LowerOrEqual(hall_ub));
474 }
475}
476
477int AllDifferentBoundsPropagator::FindStartIndexAndCompressPath(int index) {
478 // First, walk the pointer until we find one pointing to itself.
479 int start_index = index;
480 while (true) {
481 const int next = index_to_start_index_[start_index];
482 if (start_index == next) break;
483 start_index = next;
484 }
485
486 // Second, redo the same thing and make everyone point to the representative.
487 while (true) {
488 const int next = index_to_start_index_[index];
489 if (start_index == next) break;
490 index_to_start_index_[index] = start_index;
491 index = next;
492 }
493 return start_index;
494}
495
496bool AllDifferentBoundsPropagator::PropagateLowerBounds() {
497 // Start by filling the cached bounds and sorting by increasing lb.
498 for (CachedBounds& entry : bounds_) {
499 entry.lb = integer_trail_->LowerBound(entry.expr);
500 entry.ub = integer_trail_->UpperBound(entry.expr);
501 }
502 IncrementalSort(bounds_.begin(), bounds_.end(),
503 [](CachedBounds a, CachedBounds b) { return a.lb < b.lb; });
504
505 // We will split the affine epressions in vars sorted by lb in contiguous
506 // subset with index of the form [start, start + num_in_window).
507 int start = 0;
508 int num_in_window = 1;
509
510 // Minimum lower bound in the current window.
511 IntegerValue min_lb = bounds_.front().lb;
512
513 const int size = bounds_.size();
514 for (int i = 1; i < size; ++i) {
515 const IntegerValue lb = bounds_[i].lb;
516
517 // If the lower bounds of all the other variables is greater, then it can
518 // never fall into a potential hall interval formed by the variable in the
519 // current window, so we can split the problem into independent parts.
520 if (lb <= min_lb + IntegerValue(num_in_window - 1)) {
521 ++num_in_window;
522 continue;
523 }
524
525 // Process the current window.
526 if (num_in_window > 1) {
527 absl::Span<CachedBounds> window(&bounds_[start], num_in_window);
528 if (!PropagateLowerBoundsInternal(min_lb, window)) {
529 return false;
530 }
531 }
532
533 // Start of the next window.
534 start = i;
535 num_in_window = 1;
536 min_lb = lb;
537 }
538
539 // Take care of the last window.
540 if (num_in_window > 1) {
541 absl::Span<CachedBounds> window(&bounds_[start], num_in_window);
542 return PropagateLowerBoundsInternal(min_lb, window);
543 }
544
545 return true;
546}
547
548bool AllDifferentBoundsPropagator::PropagateLowerBoundsInternal(
549 IntegerValue min_lb, absl::Span<CachedBounds> bounds) {
550 hall_starts_.clear();
551 hall_ends_.clear();
552
553 // All cached lb in bounds will be in [min_lb, min_lb + bounds_.size()).
554 // Make sure we change our base_ so that GetIndex() fit in our buffers.
555 base_ = min_lb - IntegerValue(1);
556
557 // Sparse cleaning of index_is_present_.
558 for (const int i : indices_to_clear_) {
559 index_is_present_[i] = false;
560 }
561 indices_to_clear_.clear();
562
563 // Sort bounds by increasing ub.
564 std::sort(bounds.begin(), bounds.end(),
565 [](CachedBounds a, CachedBounds b) { return a.ub < b.ub; });
566 for (const CachedBounds entry : bounds) {
567 const AffineExpression expr = entry.expr;
568
569 // Note that it is important to use the cache to make sure GetIndex() is
570 // not out of bound in case integer_trail_->LowerBound() changed when we
571 // pushed something.
572 const IntegerValue lb = entry.lb;
573 const int lb_index = GetIndex(lb);
574 const bool value_is_covered = index_is_present_[lb_index];
575
576 // Check if lb is in an Hall interval, and push it if this is the case.
577 if (value_is_covered) {
578 const int hall_index =
579 std::lower_bound(hall_ends_.begin(), hall_ends_.end(), lb) -
580 hall_ends_.begin();
581 if (hall_index < hall_ends_.size() && hall_starts_[hall_index] <= lb) {
582 const IntegerValue hs = hall_starts_[hall_index];
583 const IntegerValue he = hall_ends_[hall_index];
584 FillHallReason(hs, he);
585 integer_reason_.push_back(expr.GreaterOrEqual(hs));
586 if (!integer_trail_->SafeEnqueue(expr.GreaterOrEqual(he + 1),
587 integer_reason_)) {
588 return false;
589 }
590 }
591 }
592
593 // Update our internal representation of the non-consecutive intervals.
594 //
595 // If lb is not used, we add a node there, otherwise we add it to the
596 // right of the interval that contains lb. In both cases, if there is an
597 // interval to the left (resp. right) we merge them.
598 int new_index = lb_index;
599 int start_index = lb_index;
600 int end_index = lb_index;
601 if (value_is_covered) {
602 start_index = FindStartIndexAndCompressPath(new_index);
603 new_index = index_to_end_index_[start_index] + 1;
604 end_index = new_index;
605 } else {
606 if (index_is_present_[new_index - 1]) {
607 start_index = FindStartIndexAndCompressPath(new_index - 1);
608 }
609 }
610 if (index_is_present_[new_index + 1]) {
611 end_index = index_to_end_index_[new_index + 1];
612 index_to_start_index_[new_index + 1] = start_index;
613 }
614
615 // Update the end of the representative.
616 index_to_end_index_[start_index] = end_index;
617
618 // This is the only place where we "add" a new node.
619 {
620 index_to_start_index_[new_index] = start_index;
621 index_to_expr_[new_index] = expr;
622 index_is_present_[new_index] = true;
623 indices_to_clear_.push_back(new_index);
624 }
625
626 // In most situation, we cannot have a conflict now, because it should have
627 // been detected before by pushing an interval lower bound past its upper
628 // bound. However, it is possible that when we push one bound, other bounds
629 // change. So if the upper bound is smaller than the current interval end,
630 // we abort so that the conflit reason will be better on the next call to
631 // the propagator.
632 const IntegerValue end = GetValue(end_index);
633 if (end > integer_trail_->UpperBound(expr)) return true;
634
635 // If we have a new Hall interval, add it to the set. Note that it will
636 // always be last, and if it overlaps some previous Hall intervals, it
637 // always overlaps them fully.
638 //
639 // Note: It is okay to not use entry.ub here if we want to fetch the last
640 // value, but in practice it shouldn't really change when we push a
641 // lower_bound and it is faster to use the cached entry.
642 if (end == entry.ub) {
643 const IntegerValue start = GetValue(start_index);
644 while (!hall_starts_.empty() && start <= hall_starts_.back()) {
645 hall_starts_.pop_back();
646 hall_ends_.pop_back();
647 }
648 DCHECK(hall_ends_.empty() || hall_ends_.back() < start);
649 hall_starts_.push_back(start);
650 hall_ends_.push_back(end);
651 }
652 }
653 return true;
654}
655
657 GenericLiteralWatcher* watcher) {
658 const int id = watcher->Register(this);
659 for (const CachedBounds& entry : bounds_) {
660 watcher->WatchAffineExpression(entry.expr, id);
661 }
663}
664
665} // namespace sat
666} // namespace operations_research
int64_t max
Definition: alldiff_cst.cc:140
int64_t min
Definition: alldiff_cst.cc:139
#define CHECK(condition)
Definition: base/logging.h:495
#define DCHECK_NE(val1, val2)
Definition: base/logging.h:891
#define DCHECK_GE(val1, val2)
Definition: base/logging.h:894
#define LOG(severity)
Definition: base/logging.h:420
#define DCHECK(condition)
Definition: base/logging.h:889
#define DCHECK_EQ(val1, val2)
Definition: base/logging.h:890
void RegisterWith(GenericLiteralWatcher *watcher)
AllDifferentBoundsPropagator(const std::vector< AffineExpression > &expressions, IntegerTrail *integer_trail)
void RegisterWith(GenericLiteralWatcher *watcher)
AllDifferentConstraint(std::vector< IntegerVariable > variables, IntegerEncoder *encoder, Trail *trail, IntegerTrail *integer_trail)
void WatchLiteral(Literal l, int id, int watch_index=-1)
Definition: integer.h:1551
void WatchAffineExpression(AffineExpression e, int id)
Definition: integer.h:1271
void SetPropagatorPriority(int id, int priority)
Definition: integer.cc:2018
int Register(PropagatorInterface *propagator)
Definition: integer.cc:1995
void FullyEncodeVariable(IntegerVariable var)
Definition: integer.cc:49
std::vector< ValueLiteralPair > FullDomainEncoding(IntegerVariable var) const
Definition: integer.cc:114
bool VariableIsFullyEncoded(IntegerVariable var) const
Definition: integer.cc:79
IntegerValue UpperBound(IntegerVariable i) const
Definition: integer.h:1439
ABSL_MUST_USE_RESULT bool SafeEnqueue(IntegerLiteral i_lit, absl::Span< const IntegerLiteral > integer_reason)
Definition: integer.cc:1009
IntegerValue LowerBound(IntegerVariable i) const
Definition: integer.h:1435
Class that owns everything related to a particular optimization model.
Definition: sat/model.h:38
std::vector< Literal > * GetEmptyVectorToStoreReason(int trail_index) const
Definition: sat_base.h:322
const VariablesAssignment & Assignment() const
Definition: sat_base.h:382
ABSL_MUST_USE_RESULT bool EnqueueWithStoredReason(Literal true_literal)
Definition: sat_base.h:286
std::vector< Literal > * MutableConflict()
Definition: sat_base.h:363
bool VariableIsAssigned(BooleanVariable var) const
Definition: sat_base.h:160
bool LiteralIsFalse(Literal literal) const
Definition: sat_base.h:149
int64_t b
int64_t a
Block * next
SharedBoundsManager * bounds
int64_t value
IntVar * var
Definition: expr_array.cc:1874
double lower_bound
GRBmodel * model
const int FATAL
Definition: log_severity.h:32
const bool DEBUG_MODE
Definition: macros.h:24
void swap(IdMap< K, V > &a, IdMap< K, V > &b)
Definition: id_map.h:262
std::function< std::vector< ValueLiteralPair >(Model *)> FullyEncodeVariable(IntegerVariable var)
Definition: integer.h:1773
std::function< void(Model *)> AllDifferentAC(const std::vector< IntegerVariable > &variables)
std::function< void(Model *)> ClauseConstraint(absl::Span< const Literal > literals)
Definition: sat_solver.h:906
const IntegerVariable kNoIntegerVariable(-1)
const LiteralIndex kTrueLiteralIndex(-2)
std::function< void(Model *)> AtMostOneConstraint(const std::vector< Literal > &literals)
Definition: sat_solver.h:892
const LiteralIndex kFalseLiteralIndex(-3)
std::function< void(Model *)> AllDifferentBinary(const std::vector< IntegerVariable > &vars)
std::function< void(Model *)> AllDifferentOnBounds(const std::vector< AffineExpression > &expressions)
Collection of objects used to extend the Constraint Solver library.
void IncrementalSort(int max_comparisons, Iterator begin, Iterator end, Compare comp=Compare{}, bool is_stable=false)
Definition: sort.h:46
STL namespace.
int index
Definition: pack.cc:509
int64_t capacity
std::optional< int64_t > end
int64_t start
void FindStronglyConnectedComponents(const NodeIndex num_nodes, const Graph &graph, SccOutput *components)
IntegerLiteral GreaterOrEqual(IntegerValue bound) const
Definition: integer.h:1406
IntegerLiteral LowerOrEqual(IntegerValue bound) const
Definition: integer.h:1422