OR-Tools  9.3
constraint_solver/table.cc
Go to the documentation of this file.
1// Copyright 2010-2021 Google LLC
2// Licensed under the Apache License, Version 2.0 (the "License");
3// you may not use this file except in compliance with the License.
4// You may obtain a copy of the License at
5//
6// http://www.apache.org/licenses/LICENSE-2.0
7//
8// Unless required by applicable law or agreed to in writing, software
9// distributed under the License is distributed on an "AS IS" BASIS,
10// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11// See the License for the specific language governing permissions and
12// limitations under the License.
13
14//
15// This file implements the table constraints.
16
17#include <algorithm>
18#include <cstdint>
19#include <limits>
20#include <memory>
21#include <string>
22#include <vector>
23
24#include "absl/container/flat_hash_map.h"
25#include "absl/strings/str_format.h"
26#include "absl/strings/str_join.h"
33#include "ortools/util/bitset.h"
36
37namespace operations_research {
38namespace {
39// ----- Presolve helpers -----
40// TODO(user): Move this out of this file.
41struct AffineTransformation { // y == a*x + b.
42 AffineTransformation() : a(1), b(0) {}
43 AffineTransformation(int64_t aa, int64_t bb) : a(aa), b(bb) {
44 CHECK_NE(a, 0);
45 }
46 int64_t a;
47 int64_t b;
48
49 bool Reverse(int64_t value, int64_t* const reverse) const {
50 const int64_t temp = value - b;
51 if (temp % a == 0) {
52 *reverse = temp / a;
53 DCHECK_EQ(Forward(*reverse), value);
54 return true;
55 } else {
56 return false;
57 }
58 }
59
60 int64_t Forward(int64_t value) const { return value * a + b; }
61
62 int64_t UnsafeReverse(int64_t value) const { return (value - b) / a; }
63
64 void Clear() {
65 a = 1;
66 b = 0;
67 }
68
69 std::string DebugString() const {
70 return absl::StrFormat("(%d * x + %d)", a, b);
71 }
72};
73
74// TODO(user): Move this out too.
75class VarLinearizer : public ModelParser {
76 public:
77 VarLinearizer() : target_var_(nullptr), transformation_(nullptr) {}
78 ~VarLinearizer() override {}
79
80 void VisitIntegerVariable(const IntVar* const variable,
81 const std::string& operation, int64_t value,
82 IntVar* const delegate) override {
83 if (operation == ModelVisitor::kSumOperation) {
84 AddConstant(value);
85 delegate->Accept(this);
86 } else if (operation == ModelVisitor::kDifferenceOperation) {
87 AddConstant(value);
88 PushMultiplier(-1);
89 delegate->Accept(this);
90 PopMultiplier();
91 } else if (operation == ModelVisitor::kProductOperation) {
92 PushMultiplier(value);
93 delegate->Accept(this);
94 PopMultiplier();
95 } else if (operation == ModelVisitor::kTraceOperation) {
96 *target_var_ = const_cast<IntVar*>(variable);
97 transformation_->a = multipliers_.back();
98 }
99 }
100
101 void VisitIntegerVariable(const IntVar* const variable,
102 IntExpr* const delegate) override {
103 *target_var_ = const_cast<IntVar*>(variable);
104 transformation_->a = multipliers_.back();
105 }
106
107 void Visit(const IntVar* const var, IntVar** const target_var,
108 AffineTransformation* const transformation) {
109 target_var_ = target_var;
110 transformation_ = transformation;
111 transformation->Clear();
112 PushMultiplier(1);
113 var->Accept(this);
114 PopMultiplier();
115 CHECK(multipliers_.empty());
116 }
117
118 std::string DebugString() const override { return "VarLinearizer"; }
119
120 private:
121 void AddConstant(int64_t constant) {
122 transformation_->b += constant * multipliers_.back();
123 }
124
125 void PushMultiplier(int64_t multiplier) {
126 if (multipliers_.empty()) {
127 multipliers_.push_back(multiplier);
128 } else {
129 multipliers_.push_back(multiplier * multipliers_.back());
130 }
131 }
132
133 void PopMultiplier() { multipliers_.pop_back(); }
134
135 std::vector<int64_t> multipliers_;
136 IntVar** target_var_;
137 AffineTransformation* transformation_;
138};
139
140static const int kBitsInUint64 = 64;
141
142// ----- Positive Table Constraint -----
143
144// Structure of the constraint:
145
146// Tuples are indexed, we maintain a bitset for active tuples.
147
148// For each var and each value, we maintain a bitset mask of tuples
149// containing this value for this variable.
150
151// Propagation: When a value is removed, blank all active tuples using the
152// var-value mask.
153// Then we scan all other variable/values to see if there is an active
154// tuple that supports it.
155
156class BasePositiveTableConstraint : public Constraint {
157 public:
158 BasePositiveTableConstraint(Solver* const s, const std::vector<IntVar*>& vars,
159 const IntTupleSet& tuples)
160 : Constraint(s),
161 tuple_count_(tuples.NumTuples()),
162 arity_(vars.size()),
163 vars_(arity_),
164 holes_(arity_),
166 tuples_(tuples),
167 transformations_(arity_) {
168 // This constraint is intensive on domain and holes iterations on
169 // variables. Thus we can visit all variables to get to the
170 // boolean or domain int var beneath it. Then we can reverse
171 // process the tupleset to move in parallel to the simplifications
172 // of the variables. This way, we can keep the memory efficient
173 // nature of shared tuplesets (especially important for
174 // transitions constraints which are a chain of table
175 // constraints). The cost in running time is small as the tuples
176 // are read only once to construct the bitset data structures.
177 VarLinearizer linearizer;
178 for (int i = 0; i < arity_; ++i) {
179 linearizer.Visit(vars[i], &vars_[i], &transformations_[i]);
180 }
181 // Create hole iterators
182 for (int i = 0; i < arity_; ++i) {
183 holes_[i] = vars_[i]->MakeHoleIterator(true);
184 iterators_[i] = vars_[i]->MakeDomainIterator(true);
185 }
186 }
187
188 ~BasePositiveTableConstraint() override {}
189
190 std::string DebugString() const override {
191 return absl::StrFormat("AllowedAssignments(arity = %d, tuple_count = %d)",
193 }
194
195 void Accept(ModelVisitor* const visitor) const override {
196 visitor->BeginVisitConstraint(ModelVisitor::kAllowedAssignments, this);
197 visitor->VisitIntegerVariableArrayArgument(ModelVisitor::kVarsArgument,
198 vars_);
199 visitor->VisitIntegerMatrixArgument(ModelVisitor::kTuplesArgument, tuples_);
200 visitor->EndVisitConstraint(ModelVisitor::kAllowedAssignments, this);
201 }
202
203 protected:
204 bool TupleValue(int tuple_index, int var_index, int64_t* const value) const {
205 return transformations_[var_index].Reverse(
206 tuples_.Value(tuple_index, var_index), value);
207 }
208
209 int64_t UnsafeTupleValue(int tuple_index, int var_index) const {
210 return transformations_[var_index].UnsafeReverse(
211 tuples_.Value(tuple_index, var_index));
212 }
213
214 bool IsTupleSupported(int tuple_index) {
215 for (int var_index = 0; var_index < arity_; ++var_index) {
216 int64_t value = 0;
217 if (!TupleValue(tuple_index, var_index, &value) ||
218 !vars_[var_index]->Contains(value)) {
219 return false;
220 }
221 }
222 return true;
223 }
224
225 const int tuple_count_;
226 const int arity_;
227 std::vector<IntVar*> vars_;
228 std::vector<IntVarIterator*> holes_;
229 std::vector<IntVarIterator*> iterators_;
230 std::vector<int64_t> to_remove_;
231
232 private:
233 // All allowed tuples.
234 const IntTupleSet tuples_;
235 // The set of affine transformations that describe the
236 // simplification of the variables.
237 std::vector<AffineTransformation> transformations_;
238};
239
240class PositiveTableConstraint : public BasePositiveTableConstraint {
241 public:
242 typedef absl::flat_hash_map<int, std::vector<uint64_t>> ValueBitset;
243
244 PositiveTableConstraint(Solver* const s, const std::vector<IntVar*>& vars,
245 const IntTupleSet& tuples)
246 : BasePositiveTableConstraint(s, vars, tuples),
247 word_length_(BitLength64(tuples.NumTuples())),
248 active_tuples_(tuples.NumTuples()) {}
249
250 ~PositiveTableConstraint() override {}
251
252 void Post() override {
254 solver(), this, &PositiveTableConstraint::Propagate, "Propagate");
255 for (int i = 0; i < arity_; ++i) {
256 vars_[i]->WhenDomain(d);
257 Demon* u = MakeConstraintDemon1(
258 solver(), this, &PositiveTableConstraint::Update, "Update", i);
259 vars_[i]->WhenDomain(u);
260 }
261 // Initialize masks.
262 masks_.clear();
263 masks_.resize(arity_);
264 for (int i = 0; i < tuple_count_; ++i) {
265 InitializeMask(i);
266 }
267 // Initialize the active tuple bitset.
268 std::vector<uint64_t> actives(word_length_, 0);
269 for (int tuple_index = 0; tuple_index < tuple_count_; ++tuple_index) {
270 if (IsTupleSupported(tuple_index)) {
271 SetBit64(actives.data(), tuple_index);
272 }
273 }
274 active_tuples_.Init(solver(), actives);
275 }
276
277 void InitialPropagate() override {
278 // Build active_ structure.
279 for (int var_index = 0; var_index < arity_; ++var_index) {
280 for (const auto& it : masks_[var_index]) {
281 if (!vars_[var_index]->Contains(it.first)) {
282 active_tuples_.RevSubtract(solver(), it.second);
283 }
284 }
285 }
286 if (active_tuples_.Empty()) {
287 solver()->Fail();
288 }
289 // Remove unreached values.
290 for (int var_index = 0; var_index < arity_; ++var_index) {
291 const ValueBitset& mask = masks_[var_index];
292 IntVar* const var = vars_[var_index];
293 to_remove_.clear();
294 for (const int64_t value : InitAndGetValues(iterators_[var_index])) {
295 if (!mask.contains(value)) {
296 to_remove_.push_back(value);
297 }
298 }
299 if (!to_remove_.empty()) {
300 var->RemoveValues(to_remove_);
301 }
302 }
303 }
304
305 void Propagate() {
306 for (int var_index = 0; var_index < arity_; ++var_index) {
307 IntVar* const var = vars_[var_index];
308 to_remove_.clear();
309 for (const int64_t value : InitAndGetValues(iterators_[var_index])) {
310 if (!Supported(var_index, value)) {
311 to_remove_.push_back(value);
312 }
313 }
314 if (!to_remove_.empty()) {
315 var->RemoveValues(to_remove_);
316 }
317 }
318 }
319
320 void Update(int index) {
321 const ValueBitset& var_masks = masks_[index];
322 IntVar* const var = vars_[index];
323 const int64_t old_max = var->OldMax();
324 const int64_t vmin = var->Min();
325 const int64_t vmax = var->Max();
326 for (int64_t value = var->OldMin(); value < vmin; ++value) {
327 const auto& it = var_masks.find(value);
328 if (it != var_masks.end()) {
329 BlankActives(it->second);
330 }
331 }
332 for (const int64_t value : InitAndGetValues(holes_[index])) {
333 const auto& it = var_masks.find(value);
334 if (it != var_masks.end()) {
335 BlankActives(it->second);
336 }
337 }
338 for (int64_t value = vmax + 1; value <= old_max; ++value) {
339 const auto& it = var_masks.find(value);
340 if (it != var_masks.end()) {
341 BlankActives(it->second);
342 }
343 }
344 }
345
346 void BlankActives(const std::vector<uint64_t>& mask) {
347 if (!mask.empty()) {
348 active_tuples_.RevSubtract(solver(), mask);
349 if (active_tuples_.Empty()) {
350 solver()->Fail();
351 }
352 }
353 }
354
355 bool Supported(int var_index, int64_t value) {
356 DCHECK_GE(var_index, 0);
357 DCHECK_LT(var_index, arity_);
358 DCHECK(masks_[var_index].contains(value));
359 const std::vector<uint64_t>& mask = masks_[var_index][value];
360 int tmp = 0;
361 return active_tuples_.Intersects(mask, &tmp);
362 }
363
364 std::string DebugString() const override {
365 return absl::StrFormat("PositiveTableConstraint([%s], %d tuples)",
367 }
368
369 protected:
370 void InitializeMask(int tuple_index) {
371 std::vector<int64_t> cache(arity_);
372 for (int var_index = 0; var_index < arity_; ++var_index) {
373 if (!TupleValue(tuple_index, var_index, &cache[var_index])) {
374 return;
375 }
376 }
377 for (int var_index = 0; var_index < arity_; ++var_index) {
378 const int64_t value = cache[var_index];
379 std::vector<uint64_t>& mask = masks_[var_index][value];
380 if (mask.empty()) {
381 mask.assign(word_length_, 0);
382 }
383 SetBit64(mask.data(), tuple_index);
384 }
385 }
386
387 const int word_length_;
388 UnsortedNullableRevBitset active_tuples_;
389 std::vector<ValueBitset> masks_;
390 std::vector<uint64_t> temp_mask_;
391};
392
393// ----- Compact Tables -----
394
395class CompactPositiveTableConstraint : public BasePositiveTableConstraint {
396 public:
397 CompactPositiveTableConstraint(Solver* const s,
398 const std::vector<IntVar*>& vars,
399 const IntTupleSet& tuples)
400 : BasePositiveTableConstraint(s, vars, tuples),
401 word_length_(BitLength64(tuples.NumTuples())),
402 active_tuples_(tuples.NumTuples()),
403 masks_(arity_),
404 mask_starts_(arity_),
405 mask_ends_(arity_),
406 original_min_(arity_, 0),
409 demon_(nullptr),
410 touched_var_(-1),
411 var_sizes_(arity_, 0) {}
412
413 ~CompactPositiveTableConstraint() override {}
414
415 void Post() override {
416 demon_ = solver()->RegisterDemon(MakeDelayedConstraintDemon0(
417 solver(), this, &CompactPositiveTableConstraint::Propagate,
418 "Propagate"));
419 for (int i = 0; i < arity_; ++i) {
420 Demon* const u = MakeConstraintDemon1(
421 solver(), this, &CompactPositiveTableConstraint::Update, "Update", i);
422 vars_[i]->WhenDomain(u);
423 }
424 for (int i = 0; i < arity_; ++i) {
425 var_sizes_.SetValue(solver(), i, vars_[i]->Size());
426 }
427 }
428
429 void InitialPropagate() override {
430 BuildMasks();
431 FillMasksAndActiveTuples();
432 ComputeMasksBoundaries();
433 BuildSupports();
434 RemoveUnsupportedValues();
435 }
436
437 // ----- Propagation -----
438
439 void Propagate() {
440 // Reset touch_var_ if in mode (more than 1 variable was modified).
441 if (touched_var_ == -2) {
442 touched_var_ = -1;
443 }
444 // This methods scans all values of all variables to see if they
445 // are still supported.
446 // This method is not attached to any particular variable, but is pushed
447 // at a delayed priority after Update(var_index) is called.
448 for (int var_index = 0; var_index < arity_; ++var_index) {
449 // This demons runs in low priority. Thus we know all the
450 // variables that have changed since the last time it was run.
451 // In that case, if only one var was touched, as propagation is
452 // exact, we do not need to recheck that variable.
453 if (var_index == touched_var_) {
454 touched_var_ = -1; // Clean now, it is a 1 time flag.
455 continue;
456 }
457 IntVar* const var = vars_[var_index];
458 const int64_t original_min = original_min_[var_index];
459 const int64_t var_size = var->Size();
460 // The domain iterator is very slow, let's try to see if we can
461 // work our way around.
462 switch (var_size) {
463 case 1: {
464 if (!Supported(var_index, var->Min() - original_min)) {
465 solver()->Fail();
466 }
467 break;
468 }
469 case 2: {
470 const int64_t var_min = var->Min();
471 const int64_t var_max = var->Max();
472 const bool min_support = Supported(var_index, var_min - original_min);
473 const bool max_support = Supported(var_index, var_max - original_min);
474 if (!min_support) {
475 if (!max_support) {
476 solver()->Fail();
477 } else {
478 var->SetValue(var_max);
479 var_sizes_.SetValue(solver(), var_index, 1);
480 }
481 } else if (!max_support) {
482 var->SetValue(var_min);
483 var_sizes_.SetValue(solver(), var_index, 1);
484 }
485 break;
486 }
487 default: {
488 to_remove_.clear();
489 const int64_t var_min = var->Min();
490 const int64_t var_max = var->Max();
491 int64_t new_min = var_min;
492 int64_t new_max = var_max;
493 // If the domain of a variable is an interval, it is much
494 // faster to iterate on that interval instead of using the
495 // iterator.
496 if (var_max - var_min + 1 == var_size) {
497 for (; new_min <= var_max; ++new_min) {
498 if (Supported(var_index, new_min - original_min)) {
499 break;
500 }
501 }
502 for (; new_max >= new_min; --new_max) {
503 if (Supported(var_index, new_max - original_min)) {
504 break;
505 }
506 }
507 var->SetRange(new_min, new_max);
508 for (int64_t value = new_min + 1; value < new_max; ++value) {
509 if (!Supported(var_index, value - original_min)) {
510 to_remove_.push_back(value);
511 }
512 }
513 } else { // Domain is sparse.
514 // Let's not collect all values below the first supported
515 // value as this can easily and more rapidly be taken care
516 // of by a SetRange() call.
517 new_min = std::numeric_limits<int64_t>::max(); // escape value.
518 for (const int64_t value :
519 InitAndGetValues(iterators_[var_index])) {
520 if (!Supported(var_index, value - original_min)) {
521 to_remove_.push_back(value);
522 } else {
523 if (new_min == std::numeric_limits<int64_t>::max()) {
524 new_min = value;
525 // This will be covered by the SetRange.
526 to_remove_.clear();
527 }
528 new_max = value;
529 }
530 }
531 var->SetRange(new_min, new_max);
532 // Trim the to_remove vector.
533 int index = to_remove_.size() - 1;
534 while (index >= 0 && to_remove_[index] > new_max) {
535 index--;
536 }
537 to_remove_.resize(index + 1);
538 }
539 var->RemoveValues(to_remove_);
540 var_sizes_.SetValue(solver(), var_index, var->Size());
541 }
542 }
543 }
544 }
545
546 void Update(int var_index) {
547 if (vars_[var_index]->Size() == var_sizes_.Value(var_index)) {
548 return;
549 }
550 // This method will update the set of active tuples by masking out all
551 // tuples attached to values of the variables that have been removed.
552
553 // We first collect the complete set of tuples to blank out in temp_mask_.
554 IntVar* const var = vars_[var_index];
555 bool changed = false;
556 const int64_t omin = original_min_[var_index];
557 const int64_t var_size = var->Size();
558 const int64_t var_min = var->Min();
559 const int64_t var_max = var->Max();
560
561 switch (var_size) {
562 case 1: {
563 changed = AndMaskWithActive(masks_[var_index][var_min - omin]);
564 break;
565 }
566 case 2: {
567 SetTempMask(var_index, var_min - omin);
568 OrTempMask(var_index, var_max - omin);
569 changed = AndMaskWithActive(temp_mask_);
570 break;
571 }
572 default: {
573 const int64_t estimated_hole_size =
574 var_sizes_.Value(var_index) - var_size;
575 const int64_t old_min = var->OldMin();
576 const int64_t old_max = var->OldMax();
577 // Rough estimation of the number of operation if we scan
578 // deltas in the domain of the variable.
579 const int64_t number_of_operations =
580 estimated_hole_size + var_min - old_min + old_max - var_max;
581 if (number_of_operations < var_size) {
582 // Let's scan the removed values since last run.
583 for (int64_t value = old_min; value < var_min; ++value) {
584 changed |= SubtractMaskFromActive(masks_[var_index][value - omin]);
585 }
586 for (const int64_t value : InitAndGetValues(holes_[var_index])) {
587 changed |= SubtractMaskFromActive(masks_[var_index][value - omin]);
588 }
589 for (int64_t value = var_max + 1; value <= old_max; ++value) {
590 changed |= SubtractMaskFromActive(masks_[var_index][value - omin]);
591 }
592 } else {
593 ClearTempMask();
594 // Let's build the mask of supported tuples from the current
595 // domain.
596 if (var_max - var_min + 1 == var_size) { // Contiguous.
597 for (int64_t value = var_min; value <= var_max; ++value) {
598 OrTempMask(var_index, value - omin);
599 }
600 } else {
601 for (const int64_t value :
602 InitAndGetValues(iterators_[var_index])) {
603 OrTempMask(var_index, value - omin);
604 }
605 }
606 // Then we and this mask with active_tuples_.
607 changed = AndMaskWithActive(temp_mask_);
608 }
609 // We maintain the size of the variables incrementally (when it
610 // is > 2).
611 var_sizes_.SetValue(solver(), var_index, var_size);
612 }
613 }
614 // We push the propagate method only if something has changed.
615 if (changed) {
616 if (touched_var_ == -1 || touched_var_ == var_index) {
617 touched_var_ = var_index;
618 } else {
619 touched_var_ = -2; // more than one var.
620 }
621 EnqueueDelayedDemon(demon_);
622 }
623 }
624
625 std::string DebugString() const override {
626 return absl::StrFormat("CompactPositiveTableConstraint([%s], %d tuples)",
628 }
629
630 private:
631 // ----- Initialization -----
632
633 void BuildMasks() {
634 // Build masks.
635 for (int i = 0; i < arity_; ++i) {
636 original_min_[i] = vars_[i]->Min();
637 const int64_t span = vars_[i]->Max() - original_min_[i] + 1;
638 masks_[i].resize(span);
639 }
640 }
641
642 void FillMasksAndActiveTuples() {
643 std::vector<uint64_t> actives(word_length_, 0);
644 for (int tuple_index = 0; tuple_index < tuple_count_; ++tuple_index) {
645 if (IsTupleSupported(tuple_index)) {
646 SetBit64(actives.data(), tuple_index);
647 // Fill in all masks.
648 for (int var_index = 0; var_index < arity_; ++var_index) {
649 const int64_t value = UnsafeTupleValue(tuple_index, var_index);
650 const int64_t value_index = value - original_min_[var_index];
651 DCHECK_GE(value_index, 0);
652 DCHECK_LT(value_index, masks_[var_index].size());
653 if (masks_[var_index][value_index].empty()) {
654 masks_[var_index][value_index].assign(word_length_, 0);
655 }
656 SetBit64(masks_[var_index][value_index].data(), tuple_index);
657 }
658 }
659 }
660 active_tuples_.Init(solver(), actives);
661 }
662
663 void RemoveUnsupportedValues() {
664 // remove unreached values.
665 for (int var_index = 0; var_index < arity_; ++var_index) {
666 IntVar* const var = vars_[var_index];
667 to_remove_.clear();
668 for (const int64_t value : InitAndGetValues(iterators_[var_index])) {
669 if (masks_[var_index][value - original_min_[var_index]].empty()) {
670 to_remove_.push_back(value);
671 }
672 }
673 if (!to_remove_.empty()) {
674 var->RemoveValues(to_remove_);
675 }
676 }
677 }
678
679 void ComputeMasksBoundaries() {
680 for (int var_index = 0; var_index < arity_; ++var_index) {
681 mask_starts_[var_index].resize(masks_[var_index].size());
682 mask_ends_[var_index].resize(masks_[var_index].size());
683 for (int value_index = 0; value_index < masks_[var_index].size();
684 ++value_index) {
685 const std::vector<uint64_t>& mask = masks_[var_index][value_index];
686 if (mask.empty()) {
687 continue;
688 }
689 int start = 0;
690 while (start < word_length_ && mask[start] == 0) {
691 start++;
692 }
694 int end = word_length_ - 1;
695 while (end > start && mask[end] == 0) {
696 end--;
697 }
699 DCHECK_NE(mask[start], 0);
700 DCHECK_NE(mask[end], 0);
701 mask_starts_[var_index][value_index] = start;
702 mask_ends_[var_index][value_index] = end;
703 }
704 }
705 }
706
707 void BuildSupports() {
708 for (int var_index = 0; var_index < arity_; ++var_index) {
709 supports_[var_index].resize(masks_[var_index].size());
710 }
711 }
712
713 // ----- Helpers during propagation -----
714
715 bool AndMaskWithActive(const std::vector<uint64_t>& mask) {
716 const bool result = active_tuples_.RevAnd(solver(), mask);
717 if (active_tuples_.Empty()) {
718 solver()->Fail();
719 }
720 return result;
721 }
722
723 bool SubtractMaskFromActive(const std::vector<uint64_t>& mask) {
724 const bool result = active_tuples_.RevSubtract(solver(), mask);
725 if (active_tuples_.Empty()) {
726 solver()->Fail();
727 }
728 return result;
729 }
730
731 bool Supported(int var_index, int64_t value_index) {
732 DCHECK_GE(var_index, 0);
733 DCHECK_LT(var_index, arity_);
734 DCHECK_GE(value_index, 0);
735 DCHECK_LT(value_index, masks_[var_index].size());
736 const std::vector<uint64_t>& mask = masks_[var_index][value_index];
737 DCHECK(!mask.empty());
738 return active_tuples_.Intersects(mask, &supports_[var_index][value_index]);
739 }
740
741 void OrTempMask(int var_index, int64_t value_index) {
742 const std::vector<uint64_t>& mask = masks_[var_index][value_index];
743 if (!mask.empty()) {
744 const int mask_span = mask_ends_[var_index][value_index] -
745 mask_starts_[var_index][value_index] + 1;
746 if (active_tuples_.ActiveWordSize() < mask_span) {
747 for (int i : active_tuples_.active_words()) {
748 temp_mask_[i] |= mask[i];
749 }
750 } else {
751 for (int i = mask_starts_[var_index][value_index];
752 i <= mask_ends_[var_index][value_index]; ++i) {
753 temp_mask_[i] |= mask[i];
754 }
755 }
756 }
757 }
758
759 void SetTempMask(int var_index, int64_t value_index) {
760 // We assume memset is much faster that looping and assigning.
761 // Still we do want to stay sparse if possible.
762 // Thus we switch between dense and sparse initialization by
763 // comparing the number of operations in both case, with constant factor.
764 // TODO(user): experiment with different constant values.
765 if (active_tuples_.ActiveWordSize() < word_length_ / 4) {
766 for (int i : active_tuples_.active_words()) {
767 temp_mask_[i] = masks_[var_index][value_index][i];
768 }
769 } else {
770 temp_mask_ = masks_[var_index][value_index];
771 }
772 }
773
774 void ClearTempMask() {
775 // See comment above.
776 if (active_tuples_.ActiveWordSize() < word_length_ / 4) {
777 for (int i : active_tuples_.active_words()) {
778 temp_mask_[i] = 0;
779 }
780 } else {
781 temp_mask_.assign(word_length_, 0);
782 }
783 }
784
785 // The length in 64 bit words of the number of tuples.
786 int64_t word_length_;
787 // The active bitset.
788 UnsortedNullableRevBitset active_tuples_;
789 // The masks per value per variable.
790 std::vector<std::vector<std::vector<uint64_t>>> masks_;
791 // The range of active indices in the masks.
792 std::vector<std::vector<int>> mask_starts_;
793 std::vector<std::vector<int>> mask_ends_;
794 // The min on the vars at creation time.
795 std::vector<int64_t> original_min_;
796 // A temporary mask use for computation.
797 std::vector<uint64_t> temp_mask_;
798 // The index of the word in the active bitset supporting each value per
799 // variable.
800 std::vector<std::vector<int>> supports_;
801 Demon* demon_;
802 int touched_var_;
803 RevArray<int64_t> var_sizes_;
804};
805
806// ----- Small Compact Table. -----
807
808// TODO(user): regroup code with CompactPositiveTableConstraint.
809
810class SmallCompactPositiveTableConstraint : public BasePositiveTableConstraint {
811 public:
812 SmallCompactPositiveTableConstraint(Solver* const s,
813 const std::vector<IntVar*>& vars,
814 const IntTupleSet& tuples)
815 : BasePositiveTableConstraint(s, vars, tuples),
817 stamp_(0),
818 masks_(arity_),
819 original_min_(arity_, 0),
820 demon_(nullptr),
821 touched_var_(-1) {
823 CHECK_GE(arity_, 0);
824 CHECK_LE(tuples.NumTuples(), kBitsInUint64);
825 }
826
827 ~SmallCompactPositiveTableConstraint() override {}
828
829 void Post() override {
830 demon_ = solver()->RegisterDemon(MakeDelayedConstraintDemon0(
831 solver(), this, &SmallCompactPositiveTableConstraint::Propagate,
832 "Propagate"));
833 for (int i = 0; i < arity_; ++i) {
834 if (!vars_[i]->Bound()) {
835 Demon* const update_demon = MakeConstraintDemon1(
836 solver(), this, &SmallCompactPositiveTableConstraint::Update,
837 "Update", i);
838 vars_[i]->WhenDomain(update_demon);
839 }
840 }
841 stamp_ = 0;
842 }
843
844 void InitMasks() {
845 // Build masks.
846 for (int i = 0; i < arity_; ++i) {
847 original_min_[i] = vars_[i]->Min();
848 const int64_t span = vars_[i]->Max() - original_min_[i] + 1;
849 masks_[i].assign(span, 0);
850 }
851 }
852
853 bool IsTupleSupported(int tuple_index) {
854 for (int var_index = 0; var_index < arity_; ++var_index) {
855 int64_t value = 0;
856 if (!TupleValue(tuple_index, var_index, &value) ||
857 !vars_[var_index]->Contains(value)) {
858 return false;
859 }
860 }
861 return true;
862 }
863
864 void ComputeActiveTuples() {
865 active_tuples_ = 0;
866 // Compute active_tuples_ and update masks.
867 for (int tuple_index = 0; tuple_index < tuple_count_; ++tuple_index) {
868 if (IsTupleSupported(tuple_index)) {
869 const uint64_t local_mask = OneBit64(tuple_index);
870 active_tuples_ |= local_mask;
871 for (int var_index = 0; var_index < arity_; ++var_index) {
872 const int64_t value = UnsafeTupleValue(tuple_index, var_index);
873 masks_[var_index][value - original_min_[var_index]] |= local_mask;
874 }
875 }
876 }
877 if (!active_tuples_) {
878 solver()->Fail();
879 }
880 }
881
882 void RemoveUnsupportedValues() {
883 // remove unreached values.
884 for (int var_index = 0; var_index < arity_; ++var_index) {
885 IntVar* const var = vars_[var_index];
886 const int64_t original_min = original_min_[var_index];
887 to_remove_.clear();
888 for (const int64_t value : InitAndGetValues(iterators_[var_index])) {
889 if (masks_[var_index][value - original_min] == 0) {
890 to_remove_.push_back(value);
891 }
892 }
893 if (!to_remove_.empty()) {
894 var->RemoveValues(to_remove_);
895 }
896 }
897 }
898
899 void InitialPropagate() override {
900 InitMasks();
901 ComputeActiveTuples();
902 RemoveUnsupportedValues();
903 }
904
905 void Propagate() {
906 // This methods scans all the values of all the variables to see if they
907 // are still supported.
908 // This method is not attached to any particular variable, but is pushed
909 // at a delayed priority and awakened by Update(var_index).
910
911 // Reset touch_var_ if in mode (more than 1 variable was modified).
912 if (touched_var_ == -2) {
913 touched_var_ = -1;
914 }
915
916 // We cache active_tuples_.
917 const uint64_t actives = active_tuples_;
918
919 // We scan all variables and check their domains.
920 for (int var_index = 0; var_index < arity_; ++var_index) {
921 // This demons runs in low priority. Thus we know all the
922 // variables that have changed since the last time it was run.
923 // In that case, if only one var was touched, as propagation is
924 // exact, we do not need to recheck that variable.
925 if (var_index == touched_var_) {
926 touched_var_ = -1; // Clean it, it is a one time flag.
927 continue;
928 }
929 const std::vector<uint64_t>& var_mask = masks_[var_index];
930 const int64_t original_min = original_min_[var_index];
931 IntVar* const var = vars_[var_index];
932 const int64_t var_size = var->Size();
933 switch (var_size) {
934 case 1: {
935 if ((var_mask[var->Min() - original_min] & actives) == 0) {
936 // The difference with the non-small version of the table
937 // is that checking the validity of the resulting active
938 // tuples is cheap. Therefore we do not delay the check
939 // code.
940 solver()->Fail();
941 }
942 break;
943 }
944 case 2: {
945 const int64_t var_min = var->Min();
946 const int64_t var_max = var->Max();
947 const bool min_support =
948 (var_mask[var_min - original_min] & actives) != 0;
949 const bool max_support =
950 (var_mask[var_max - original_min] & actives) != 0;
951 if (!min_support && !max_support) {
952 solver()->Fail();
953 } else if (!min_support) {
954 var->SetValue(var_max);
955 } else if (!max_support) {
956 var->SetValue(var_min);
957 }
958 break;
959 }
960 default: {
961 to_remove_.clear();
962 const int64_t var_min = var->Min();
963 const int64_t var_max = var->Max();
964 int64_t new_min = var_min;
965 int64_t new_max = var_max;
966 if (var_max - var_min + 1 == var_size) {
967 // Contiguous case.
968 for (; new_min <= var_max; ++new_min) {
969 if ((var_mask[new_min - original_min] & actives) != 0) {
970 break;
971 }
972 }
973 for (; new_max >= new_min; --new_max) {
974 if ((var_mask[new_max - original_min] & actives) != 0) {
975 break;
976 }
977 }
978 var->SetRange(new_min, new_max);
979 for (int64_t value = new_min + 1; value < new_max; ++value) {
980 if ((var_mask[value - original_min] & actives) == 0) {
981 to_remove_.push_back(value);
982 }
983 }
984 } else {
985 bool min_set = false;
986 int last_size = 0;
987 for (const int64_t value :
988 InitAndGetValues(iterators_[var_index])) {
989 // The iterator is not safe w.r.t. deletion. Thus we
990 // postpone all value removals.
991 if ((var_mask[value - original_min] & actives) == 0) {
992 if (min_set) {
993 to_remove_.push_back(value);
994 }
995 } else {
996 if (!min_set) {
997 new_min = value;
998 min_set = true;
999 }
1000 new_max = value;
1001 last_size = to_remove_.size();
1002 }
1003 }
1004 if (min_set) {
1005 var->SetRange(new_min, new_max);
1006 } else {
1007 solver()->Fail();
1008 }
1009 to_remove_.resize(last_size);
1010 }
1011 var->RemoveValues(to_remove_);
1012 }
1013 }
1014 }
1015 }
1016
1017 void Update(int var_index) {
1018 // This method updates the set of active tuples by masking out all
1019 // tuples attached to values of the variables that have been removed.
1020
1021 IntVar* const var = vars_[var_index];
1022 const int64_t original_min = original_min_[var_index];
1023 const int64_t var_size = var->Size();
1024 switch (var_size) {
1025 case 1: {
1026 ApplyMask(var_index, masks_[var_index][var->Min() - original_min]);
1027 return;
1028 }
1029 case 2: {
1030 ApplyMask(var_index, masks_[var_index][var->Min() - original_min] |
1031 masks_[var_index][var->Max() - original_min]);
1032 return;
1033 }
1034 default: {
1035 // We first collect the complete set of tuples to blank out in
1036 // temp_mask.
1037 const std::vector<uint64_t>& var_mask = masks_[var_index];
1038 const int64_t old_min = var->OldMin();
1039 const int64_t old_max = var->OldMax();
1040 const int64_t var_min = var->Min();
1041 const int64_t var_max = var->Max();
1042 const bool contiguous = var_size == var_max - var_min + 1;
1043 const bool nearly_contiguous =
1044 var_size > (var_max - var_min + 1) * 7 / 10;
1045
1046 // Count the number of masks to collect to compare the deduction
1047 // vs the construction of the new active bitset.
1048 // TODO(user): Implement HolesSize() on IntVar* and use it
1049 // to remove this code and the var_sizes in the non_small
1050 // version.
1051 uint64_t hole_mask = 0;
1052 if (!contiguous) {
1053 for (const int64_t value : InitAndGetValues(holes_[var_index])) {
1054 hole_mask |= var_mask[value - original_min];
1055 }
1056 }
1057 const int64_t hole_operations = var_min - old_min + old_max - var_max;
1058 // We estimate the domain iterator to be 4x slower.
1059 const int64_t domain_operations = contiguous ? var_size : 4 * var_size;
1060 if (hole_operations < domain_operations) {
1061 for (int64_t value = old_min; value < var_min; ++value) {
1062 hole_mask |= var_mask[value - original_min];
1063 }
1064 for (int64_t value = var_max + 1; value <= old_max; ++value) {
1065 hole_mask |= var_mask[value - original_min];
1066 }
1067 // We reverse the mask as this was negative information.
1068 ApplyMask(var_index, ~hole_mask);
1069 } else {
1070 uint64_t domain_mask = 0;
1071 if (contiguous) {
1072 for (int64_t value = var_min; value <= var_max; ++value) {
1073 domain_mask |= var_mask[value - original_min];
1074 }
1075 } else if (nearly_contiguous) {
1076 for (int64_t value = var_min; value <= var_max; ++value) {
1077 if (var->Contains(value)) {
1078 domain_mask |= var_mask[value - original_min];
1079 }
1080 }
1081 } else {
1082 for (const int64_t value :
1083 InitAndGetValues(iterators_[var_index])) {
1084 domain_mask |= var_mask[value - original_min];
1085 }
1086 }
1087 ApplyMask(var_index, domain_mask);
1088 }
1089 }
1090 }
1091 }
1092
1093 std::string DebugString() const override {
1094 return absl::StrFormat(
1095 "SmallCompactPositiveTableConstraint([%s], %d tuples)",
1097 }
1098
1099 private:
1100 void ApplyMask(int var_index, uint64_t mask) {
1101 if ((~mask & active_tuples_) != 0) {
1102 // Check if we need to save the active_tuples in this node.
1103 const uint64_t current_stamp = solver()->stamp();
1104 if (stamp_ < current_stamp) {
1105 stamp_ = current_stamp;
1106 solver()->SaveValue(&active_tuples_);
1107 }
1108 active_tuples_ &= mask;
1109 if (active_tuples_) {
1110 // Maintain touched_var_.
1111 if (touched_var_ == -1 || touched_var_ == var_index) {
1112 touched_var_ = var_index;
1113 } else {
1114 touched_var_ = -2; // more than one var.
1115 }
1116 EnqueueDelayedDemon(demon_);
1117 } else {
1118 // Clean it before failing.
1119 touched_var_ = -1;
1120 solver()->Fail();
1121 }
1122 }
1123 }
1124
1125 // Bitset of active tuples.
1126 uint64_t active_tuples_;
1127 // Stamp of the active_tuple bitset.
1128 uint64_t stamp_;
1129 // The masks per value per variable.
1130 std::vector<std::vector<uint64_t>> masks_;
1131 // The min on the vars at creation time.
1132 std::vector<int64_t> original_min_;
1133 Demon* demon_;
1134 int touched_var_;
1135};
1136
1137bool HasCompactDomains(const std::vector<IntVar*>& vars) {
1138 return true; // Always assume compact table.
1139}
1140
1141// ---------- Deterministic Finite Automaton ----------
1142
1143// This constraint implements a finite automaton when transitions are
1144// the values of the variables in the array.
1145// that is state[i+1] = transition[var[i]][state[i]] if
1146// (state[i], var[i], state[i+1]) in the transition table.
1147// There is only one possible transition for a state/value pair.
1148class TransitionConstraint : public Constraint {
1149 public:
1150 static const int kStatePosition;
1151 static const int kNextStatePosition;
1152 static const int kTransitionTupleSize;
1153 TransitionConstraint(Solver* const s, const std::vector<IntVar*>& vars,
1154 const IntTupleSet& transition_table,
1155 int64_t initial_state,
1156 const std::vector<int64_t>& final_states)
1157 : Constraint(s),
1158 vars_(vars),
1159 transition_table_(transition_table),
1160 initial_state_(initial_state),
1161 final_states_(final_states) {}
1162
1163 TransitionConstraint(Solver* const s, const std::vector<IntVar*>& vars,
1164 const IntTupleSet& transition_table,
1165 int64_t initial_state,
1166 const std::vector<int>& final_states)
1167 : Constraint(s),
1168 vars_(vars),
1169 transition_table_(transition_table),
1170 initial_state_(initial_state),
1171 final_states_(final_states.size()) {
1172 for (int i = 0; i < final_states.size(); ++i) {
1173 final_states_[i] = final_states[i];
1174 }
1175 }
1176
1177 ~TransitionConstraint() override {}
1178
1179 void Post() override {
1180 Solver* const s = solver();
1181 int64_t state_min = std::numeric_limits<int64_t>::max();
1182 int64_t state_max = std::numeric_limits<int64_t>::min();
1183 const int nb_vars = vars_.size();
1184 for (int i = 0; i < transition_table_.NumTuples(); ++i) {
1185 state_max =
1186 std::max(state_max, transition_table_.Value(i, kStatePosition));
1187 state_max =
1188 std::max(state_max, transition_table_.Value(i, kNextStatePosition));
1189 state_min =
1190 std::min(state_min, transition_table_.Value(i, kStatePosition));
1191 state_min =
1192 std::min(state_min, transition_table_.Value(i, kNextStatePosition));
1193 }
1194
1195 std::vector<IntVar*> states;
1196 states.push_back(s->MakeIntConst(initial_state_));
1197 for (int var_index = 1; var_index < nb_vars; ++var_index) {
1198 states.push_back(s->MakeIntVar(state_min, state_max));
1199 }
1200 states.push_back(s->MakeIntVar(final_states_));
1201 CHECK_EQ(nb_vars + 1, states.size());
1202
1203 const int num_tuples = transition_table_.NumTuples();
1204
1205 for (int var_index = 0; var_index < nb_vars; ++var_index) {
1206 std::vector<IntVar*> tmp_vars(3);
1207 tmp_vars[0] = states[var_index];
1208 tmp_vars[1] = vars_[var_index];
1209 tmp_vars[2] = states[var_index + 1];
1210 // We always build the compact versions of the tables.
1211 if (num_tuples <= kBitsInUint64) {
1212 s->AddConstraint(s->RevAlloc(new SmallCompactPositiveTableConstraint(
1213 s, tmp_vars, transition_table_)));
1214 } else {
1215 s->AddConstraint(s->RevAlloc(new CompactPositiveTableConstraint(
1216 s, tmp_vars, transition_table_)));
1217 }
1218 }
1219 }
1220
1221 void InitialPropagate() override {}
1222
1223 void Accept(ModelVisitor* const visitor) const override {
1224 visitor->BeginVisitConstraint(ModelVisitor::kTransition, this);
1225 visitor->VisitIntegerVariableArrayArgument(ModelVisitor::kVarsArgument,
1226 vars_);
1227 visitor->VisitIntegerArgument(ModelVisitor::kInitialState, initial_state_);
1228 visitor->VisitIntegerArrayArgument(ModelVisitor::kFinalStatesArgument,
1229 final_states_);
1230 visitor->VisitIntegerMatrixArgument(ModelVisitor::kTuplesArgument,
1231 transition_table_);
1232 visitor->EndVisitConstraint(ModelVisitor::kTransition, this);
1233 }
1234
1235 std::string DebugString() const override {
1236 return absl::StrFormat(
1237 "TransitionConstraint([%s], %d transitions, initial = %d, final = "
1238 "[%s])",
1239 JoinDebugStringPtr(vars_, ", "), transition_table_.NumTuples(),
1240 initial_state_, absl::StrJoin(final_states_, ", "));
1241 }
1242
1243 private:
1244 // Variable representing transitions between states. See header file.
1245 const std::vector<IntVar*> vars_;
1246 // The transition as tuples (state, value, next_state).
1247 const IntTupleSet transition_table_;
1248 // The initial state before the first transition.
1249 const int64_t initial_state_;
1250 // Vector of final state after the last transision.
1251 std::vector<int64_t> final_states_;
1252};
1253
1257} // namespace
1258
1259// --------- API ----------
1260
1261Constraint* Solver::MakeAllowedAssignments(const std::vector<IntVar*>& vars,
1262 const IntTupleSet& tuples) {
1263 if (HasCompactDomains(vars)) {
1264 if (tuples.NumTuples() < kBitsInUint64 && parameters_.use_small_table()) {
1265 return RevAlloc(
1266 new SmallCompactPositiveTableConstraint(this, vars, tuples));
1267 } else {
1268 return RevAlloc(new CompactPositiveTableConstraint(this, vars, tuples));
1269 }
1270 }
1271 return RevAlloc(new PositiveTableConstraint(this, vars, tuples));
1272}
1273
1275 const std::vector<IntVar*>& vars, const IntTupleSet& transition_table,
1276 int64_t initial_state, const std::vector<int64_t>& final_states) {
1277 return RevAlloc(new TransitionConstraint(this, vars, transition_table,
1278 initial_state, final_states));
1279}
1280
1282 const std::vector<IntVar*>& vars, const IntTupleSet& transition_table,
1283 int64_t initial_state, const std::vector<int>& final_states) {
1284 return RevAlloc(new TransitionConstraint(this, vars, transition_table,
1285 initial_state, final_states));
1286}
1287
1288} // namespace operations_research
int64_t max
Definition: alldiff_cst.cc:140
int64_t min
Definition: alldiff_cst.cc:139
#define CHECK(condition)
Definition: base/logging.h:495
#define DCHECK_LE(val1, val2)
Definition: base/logging.h:893
#define DCHECK_NE(val1, val2)
Definition: base/logging.h:892
#define CHECK_EQ(val1, val2)
Definition: base/logging.h:703
#define CHECK_GE(val1, val2)
Definition: base/logging.h:707
#define DCHECK_GE(val1, val2)
Definition: base/logging.h:895
#define CHECK_NE(val1, val2)
Definition: base/logging.h:704
#define DCHECK_LT(val1, val2)
Definition: base/logging.h:894
#define DCHECK(condition)
Definition: base/logging.h:890
#define CHECK_LE(val1, val2)
Definition: base/logging.h:705
#define DCHECK_EQ(val1, val2)
Definition: base/logging.h:891
A constraint is the main modeling object.
Constraint * MakeTransitionConstraint(const std::vector< IntVar * > &vars, const IntTupleSet &transition_table, int64_t initial_state, const std::vector< int64_t > &final_states)
This constraint create a finite automaton that will check the sequence of variables vars.
Constraint * MakeAllowedAssignments(const std::vector< IntVar * > &vars, const IntTupleSet &tuples)
This method creates a constraint where the graph of the relation between the variables is given in ex...
T * RevAlloc(T *object)
Registers the given object as being reversible.
static const int kTransitionTupleSize
const int word_length_
static const int kStatePosition
const int arity_
static const int kNextStatePosition
std::vector< ValueBitset > masks_
int64_t b
std::vector< IntVar * > vars_
std::vector< IntVarIterator * > holes_
const int tuple_count_
int64_t a
std::vector< int64_t > to_remove_
std::vector< uint64_t > temp_mask_
UnsortedNullableRevBitset active_tuples_
std::vector< IntVarIterator * > iterators_
int64_t value
IntVar * var
Definition: expr_array.cc:1874
std::vector< int > supports_
int index
Collection of objects used to extend the Constraint Solver library.
Demon * MakeDelayedConstraintDemon0(Solver *const s, T *const ct, void(T::*method)(), const std::string &name)
std::string JoinDebugStringPtr(const std::vector< T > &v, const std::string &separator)
Definition: string_array.h:45
uint64_t OneBit64(int pos)
Definition: bitset.h:38
uint64_t BitLength64(uint64_t size)
Definition: bitset.h:338
Demon * MakeConstraintDemon1(Solver *const s, T *const ct, void(T::*method)(P), const std::string &name, P param1)
void SetBit64(uint64_t *const bitset, uint64_t pos)
Definition: bitset.h:354
BeginEndReverseIteratorWrapper< Container > Reverse(const Container &c)
Definition: iterators.h:98
IntervalVar *const target_var_
const int64_t stamp_
Definition: search.cc:3105
std::optional< int64_t > end
int64_t start
const double constant