OR-Tools  9.3
resource.cc
Go to the documentation of this file.
1// Copyright 2010-2021 Google LLC
2// Licensed under the Apache License, Version 2.0 (the "License");
3// you may not use this file except in compliance with the License.
4// You may obtain a copy of the License at
5//
6// http://www.apache.org/licenses/LICENSE-2.0
7//
8// Unless required by applicable law or agreed to in writing, software
9// distributed under the License is distributed on an "AS IS" BASIS,
10// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11// See the License for the specific language governing permissions and
12// limitations under the License.
13
14// This file contains implementations of several resource constraints.
15// The implemented constraints are:
16// * Disjunctive: forces a set of intervals to be non-overlapping
17// * Cumulative: forces a set of intervals with associated demands to be such
18// that the sum of demands of the intervals containing any given integer
19// does not exceed a capacity.
20// In addition, it implements the SequenceVar that allows ranking decisions
21// on a set of interval variables.
22
23#include <algorithm>
24#include <cstdint>
25#include <limits>
26#include <queue>
27#include <string>
28#include <utility>
29#include <vector>
30
31#include "absl/container/flat_hash_map.h"
32#include "absl/strings/str_cat.h"
33#include "absl/strings/str_format.h"
34#include "absl/strings/str_join.h"
38#include "ortools/base/macros.h"
43#include "ortools/util/bitset.h"
47
48namespace operations_research {
49namespace {
50// ----- Comparison functions -----
51
52// TODO(user): Tie breaking.
53
54// Comparison methods, used by the STL sort.
55template <class Task>
56bool StartMinLessThan(Task* const w1, Task* const w2) {
57 return (w1->interval->StartMin() < w2->interval->StartMin());
58}
59
60// A comparator that sorts the tasks by their effective earliest start time when
61// using the shortest duration possible. This comparator can be used when
62// sorting the tasks before they are inserted to a Theta-tree.
63template <class Task>
64bool ShortestDurationStartMinLessThan(Task* const w1, Task* const w2) {
65 return w1->interval->EndMin() - w1->interval->DurationMin() <
66 w2->interval->EndMin() - w2->interval->DurationMin();
67}
68
69template <class Task>
70bool StartMaxLessThan(Task* const w1, Task* const w2) {
71 return (w1->interval->StartMax() < w2->interval->StartMax());
72}
73
74template <class Task>
75bool EndMinLessThan(Task* const w1, Task* const w2) {
76 return (w1->interval->EndMin() < w2->interval->EndMin());
77}
78
79template <class Task>
80bool EndMaxLessThan(Task* const w1, Task* const w2) {
81 return (w1->interval->EndMax() < w2->interval->EndMax());
82}
83
84bool IntervalStartMinLessThan(IntervalVar* i1, IntervalVar* i2) {
85 return i1->StartMin() < i2->StartMin();
86}
87
88// ----- Wrappers around intervals -----
89
90// A DisjunctiveTask is a non-preemptive task sharing a disjunctive resource.
91// That is, it corresponds to an interval, and this interval cannot overlap with
92// any other interval of a DisjunctiveTask sharing the same resource.
93// It is indexed, that is it is aware of its position in a reference array.
94struct DisjunctiveTask {
95 explicit DisjunctiveTask(IntervalVar* const interval_)
96 : interval(interval_), index(-1) {}
97
98 std::string DebugString() const { return interval->DebugString(); }
99
100 IntervalVar* interval;
101 int index;
102};
103
104// A CumulativeTask is a non-preemptive task sharing a cumulative resource.
105// That is, it corresponds to an interval and a demand. The sum of demands of
106// all cumulative tasks CumulativeTasks sharing a resource of capacity c those
107// intervals contain any integer t cannot exceed c.
108// It is indexed, that is it is aware of its position in a reference array.
109struct CumulativeTask {
110 CumulativeTask(IntervalVar* const interval_, int64_t demand_)
111 : interval(interval_), demand(demand_), index(-1) {}
112
113 int64_t EnergyMin() const { return interval->DurationMin() * demand; }
114
115 int64_t DemandMin() const { return demand; }
116
117 void WhenAnything(Demon* const demon) { interval->WhenAnything(demon); }
118
119 std::string DebugString() const {
120 return absl::StrFormat("Task{ %s, demand: %d }", interval->DebugString(),
121 demand);
122 }
123
124 IntervalVar* interval;
125 int64_t demand;
126 int index;
127};
128
129// A VariableCumulativeTask is a non-preemptive task sharing a
130// cumulative resource. That is, it corresponds to an interval and a
131// demand. The sum of demands of all cumulative tasks
132// VariableCumulativeTasks sharing a resource of capacity c whose
133// intervals contain any integer t cannot exceed c. It is indexed,
134// that is it is aware of its position in a reference array.
135struct VariableCumulativeTask {
136 VariableCumulativeTask(IntervalVar* const interval_, IntVar* demand_)
137 : interval(interval_), demand(demand_), index(-1) {}
138
139 int64_t EnergyMin() const { return interval->DurationMin() * demand->Min(); }
140
141 int64_t DemandMin() const { return demand->Min(); }
142
143 void WhenAnything(Demon* const demon) {
144 interval->WhenAnything(demon);
145 demand->WhenRange(demon);
146 }
147
148 std::string DebugString() const {
149 return absl::StrFormat("Task{ %s, demand: %s }", interval->DebugString(),
150 demand->DebugString());
151 }
152
153 IntervalVar* const interval;
154 IntVar* const demand;
155 int index;
156};
157
158// ---------- Theta-Trees ----------
159
160// This is based on Petr Vilim (public) PhD work.
161// All names comes from his work. See http://vilim.eu/petr.
162
163// Node of a Theta-tree
164struct ThetaNode {
165 // Identity element
166 ThetaNode()
167 : total_processing(0), total_ect(std::numeric_limits<int64_t>::min()) {}
168
169 // Single interval element
170 explicit ThetaNode(const IntervalVar* const interval)
171 : total_processing(interval->DurationMin()),
172 total_ect(interval->EndMin()) {
173 // NOTE(user): Petr Vilim's thesis assumes that all tasks in the
174 // scheduling problem have fixed duration and that propagation already
175 // updated the bounds of the start/end times accordingly.
176 // The problem in this case is that the recursive formula for computing
177 // total_ect was only proved for the case where the duration is fixed; in
178 // our case, we use StartMin() + DurationMin() for the earliest completion
179 // time of a task, which should not break any assumptions, but may give
180 // bounds that are too loose.
181 }
182
183 void Compute(const ThetaNode& left, const ThetaNode& right) {
184 total_processing = CapAdd(left.total_processing, right.total_processing);
185 total_ect = std::max(CapAdd(left.total_ect, right.total_processing),
186 right.total_ect);
187 }
188
189 bool IsIdentity() const {
190 return total_processing == 0LL &&
192 }
193
194 std::string DebugString() const {
195 return absl::StrCat("ThetaNode{ p = ", total_processing,
196 ", e = ", total_ect < 0LL ? -1LL : total_ect, " }");
197 }
198
200 int64_t total_ect;
201};
202
203// A theta-tree is a container for a set of intervals supporting the following
204// operations:
205// * Insertions and deletion in O(log size_), with size_ the maximal number of
206// tasks the tree may contain;
207// * Querying the following quantity in O(1):
208// Max_{subset S of the set of contained intervals} (
209// Min_{i in S}(i.StartMin) + Sum_{i in S}(i.DurationMin) )
210class ThetaTree : public MonoidOperationTree<ThetaNode> {
211 public:
212 explicit ThetaTree(int size) : MonoidOperationTree<ThetaNode>(size) {}
213
214 int64_t Ect() const { return result().total_ect; }
215
216 void Insert(const DisjunctiveTask* const task) {
217 Set(task->index, ThetaNode(task->interval));
218 }
219
220 void Remove(const DisjunctiveTask* const task) { Reset(task->index); }
221
222 bool IsInserted(const DisjunctiveTask* const task) const {
223 return !GetOperand(task->index).IsIdentity();
224 }
225};
226
227// ----------------- Lambda Theta Tree -----------------------
228
229// Lambda-theta-node
230// These nodes are cumulative lambda theta-node. This is reflected in the
231// terminology. They can also be used in the disjunctive case, and this incurs
232// no performance penalty.
233struct LambdaThetaNode {
234 // Special value for task indices meaning 'no such task'.
235 static const int kNone;
236
237 // Identity constructor
238 LambdaThetaNode()
239 : energy(0LL),
240 energetic_end_min(std::numeric_limits<int64_t>::min()),
241 energy_opt(0LL),
243 energetic_end_min_opt(std::numeric_limits<int64_t>::min()),
245
246 // Constructor for a single cumulative task in the Theta set
247 LambdaThetaNode(int64_t capacity, const CumulativeTask& task)
248 : energy(task.EnergyMin()),
249 energetic_end_min(CapAdd(capacity * task.interval->StartMin(), energy)),
254
255 // Constructor for a single cumulative task in the Lambda set
256 LambdaThetaNode(int64_t capacity, const CumulativeTask& task, int index)
257 : energy(0LL),
258 energetic_end_min(std::numeric_limits<int64_t>::min()),
259 energy_opt(task.EnergyMin()),
261 energetic_end_min_opt(capacity * task.interval->StartMin() +
262 energy_opt),
264 DCHECK_GE(index, 0);
265 }
266
267 // Constructor for a single cumulative task in the Theta set
268 LambdaThetaNode(int64_t capacity, const VariableCumulativeTask& task)
269 : energy(task.EnergyMin()),
270 energetic_end_min(CapAdd(capacity * task.interval->StartMin(), energy)),
275
276 // Constructor for a single cumulative task in the Lambda set
277 LambdaThetaNode(int64_t capacity, const VariableCumulativeTask& task,
278 int index)
279 : energy(0LL),
280 energetic_end_min(std::numeric_limits<int64_t>::min()),
281 energy_opt(task.EnergyMin()),
283 energetic_end_min_opt(capacity * task.interval->StartMin() +
284 energy_opt),
286 DCHECK_GE(index, 0);
287 }
288
289 // Constructor for a single interval in the Theta set
290 explicit LambdaThetaNode(const IntervalVar* const interval)
291 : energy(interval->DurationMin()),
292 energetic_end_min(interval->EndMin()),
293 energy_opt(interval->DurationMin()),
297
298 // Constructor for a single interval in the Lambda set
299 // 'index' is the index of the given interval in the est vector
300 LambdaThetaNode(const IntervalVar* const interval, int index)
301 : energy(0LL),
302 energetic_end_min(std::numeric_limits<int64_t>::min()),
303 energy_opt(interval->DurationMin()),
307 DCHECK_GE(index, 0);
308 }
309
310 // Sets this LambdaThetaNode to the result of the natural binary operations
311 // over the two given operands, corresponding to the following set operations:
312 // Theta = left.Theta union right.Theta
313 // Lambda = left.Lambda union right.Lambda
314 //
315 // No set operation actually occur: we only maintain the relevant quantities
316 // associated with such sets.
317 void Compute(const LambdaThetaNode& left, const LambdaThetaNode& right) {
318 energy = CapAdd(left.energy, right.energy);
319 energetic_end_min = std::max(right.energetic_end_min,
320 CapAdd(left.energetic_end_min, right.energy));
321 const int64_t energy_left_opt = CapAdd(left.energy_opt, right.energy);
322 const int64_t energy_right_opt = CapAdd(left.energy, right.energy_opt);
323 if (energy_left_opt > energy_right_opt) {
324 energy_opt = energy_left_opt;
325 argmax_energy_opt = left.argmax_energy_opt;
326 } else {
327 energy_opt = energy_right_opt;
328 argmax_energy_opt = right.argmax_energy_opt;
329 }
330 const int64_t ect1 = right.energetic_end_min_opt;
331 const int64_t ect2 = CapAdd(left.energetic_end_min, right.energy_opt);
332 const int64_t ect3 = CapAdd(left.energetic_end_min_opt, right.energy);
333 if (ect1 >= ect2 && ect1 >= ect3) { // ect1 max
335 argmax_energetic_end_min_opt = right.argmax_energetic_end_min_opt;
336 } else if (ect2 >= ect1 && ect2 >= ect3) { // ect2 max
338 argmax_energetic_end_min_opt = right.argmax_energy_opt;
339 } else { // ect3 max
341 argmax_energetic_end_min_opt = left.argmax_energetic_end_min_opt;
342 }
343 // The processing time, with one grey interval, should be no less than
344 // without any grey interval.
346 // If there is no responsible grey interval for the processing time,
347 // the processing time with a grey interval should equal the one
348 // without.
350 }
351
352 // Amount of resource consumed by the Theta set, in units of demand X time.
353 // This is energy(Theta).
354 int64_t energy;
355
356 // Max_{subset S of Theta} (capacity * start_min(S) + energy(S))
358
359 // Max_{i in Lambda} (energy(Theta union {i}))
360 int64_t energy_opt;
361
362 // The argmax in energy_opt_. It is the index of the chosen task in the Lambda
363 // set, if any, or kNone if none.
365
366 // Max_{subset S of Theta, i in Lambda}
367 // (capacity * start_min(S union {i}) + energy(S union {i}))
369
370 // The argmax in energetic_end_min_opt_. It is the index of the chosen task in
371 // the Lambda set, if any, or kNone if none.
373};
374
375const int LambdaThetaNode::kNone = -1;
376
377// Disjunctive Lambda-Theta tree
378class DisjunctiveLambdaThetaTree : public MonoidOperationTree<LambdaThetaNode> {
379 public:
380 explicit DisjunctiveLambdaThetaTree(int size)
381 : MonoidOperationTree<LambdaThetaNode>(size) {}
382
383 void Insert(const DisjunctiveTask& task) {
384 Set(task.index, LambdaThetaNode(task.interval));
385 }
386
387 void Grey(const DisjunctiveTask& task) {
388 const int index = task.index;
389 Set(index, LambdaThetaNode(task.interval, index));
390 }
391
392 int64_t Ect() const { return result().energetic_end_min; }
393 int64_t EctOpt() const { return result().energetic_end_min_opt; }
394 int ResponsibleOpt() const { return result().argmax_energetic_end_min_opt; }
395};
396
397// A cumulative lambda-theta tree
398class CumulativeLambdaThetaTree : public MonoidOperationTree<LambdaThetaNode> {
399 public:
400 CumulativeLambdaThetaTree(int size, int64_t capacity_max)
401 : MonoidOperationTree<LambdaThetaNode>(size),
402 capacity_max_(capacity_max) {}
403
404 void Init(int64_t capacity_max) {
405 Clear();
406 capacity_max_ = capacity_max;
407 }
408
409 void Insert(const CumulativeTask& task) {
410 Set(task.index, LambdaThetaNode(capacity_max_, task));
411 }
412
413 void Grey(const CumulativeTask& task) {
414 const int index = task.index;
415 Set(index, LambdaThetaNode(capacity_max_, task, index));
416 }
417
418 void Insert(const VariableCumulativeTask& task) {
419 Set(task.index, LambdaThetaNode(capacity_max_, task));
420 }
421
422 void Grey(const VariableCumulativeTask& task) {
423 const int index = task.index;
424 Set(index, LambdaThetaNode(capacity_max_, task, index));
425 }
426
427 int64_t energetic_end_min() const { return result().energetic_end_min; }
428 int64_t energetic_end_min_opt() const {
429 return result().energetic_end_min_opt;
430 }
431 int64_t Ect() const {
432 return MathUtil::CeilOfRatio(energetic_end_min(), capacity_max_);
433 }
434 int64_t EctOpt() const {
435 return MathUtil::CeilOfRatio(result().energetic_end_min_opt, capacity_max_);
436 }
437 int argmax_energetic_end_min_opt() const {
438 return result().argmax_energetic_end_min_opt;
439 }
440
441 private:
442 int64_t capacity_max_;
443};
444
445// -------------- Not Last -----------------------------------------
446
447// A class that implements the 'Not-Last' propagation algorithm for the unary
448// resource constraint.
449class NotLast {
450 public:
451 NotLast(Solver* const solver, const std::vector<IntervalVar*>& intervals,
452 bool mirror, bool strict);
453
454 ~NotLast() { gtl::STLDeleteElements(&by_start_min_); }
455
456 bool Propagate();
457
458 private:
459 ThetaTree theta_tree_;
460 std::vector<DisjunctiveTask*> by_start_min_;
461 std::vector<DisjunctiveTask*> by_end_max_;
462 std::vector<DisjunctiveTask*> by_start_max_;
463 std::vector<int64_t> new_lct_;
464 const bool strict_;
465};
466
467NotLast::NotLast(Solver* const solver,
468 const std::vector<IntervalVar*>& intervals, bool mirror,
469 bool strict)
470 : theta_tree_(intervals.size()),
471 by_start_min_(intervals.size()),
472 by_end_max_(intervals.size()),
473 by_start_max_(intervals.size()),
474 new_lct_(intervals.size(), -1LL),
475 strict_(strict) {
476 // Populate the different vectors.
477 for (int i = 0; i < intervals.size(); ++i) {
478 IntervalVar* const underlying =
479 mirror ? solver->MakeMirrorInterval(intervals[i]) : intervals[i];
480 IntervalVar* const relaxed = solver->MakeIntervalRelaxedMin(underlying);
481 by_start_min_[i] = new DisjunctiveTask(relaxed);
482 by_end_max_[i] = by_start_min_[i];
483 by_start_max_[i] = by_start_min_[i];
484 }
485}
486
487bool NotLast::Propagate() {
488 // ---- Init ----
489 std::sort(by_start_max_.begin(), by_start_max_.end(),
490 StartMaxLessThan<DisjunctiveTask>);
491 std::sort(by_end_max_.begin(), by_end_max_.end(),
492 EndMaxLessThan<DisjunctiveTask>);
493 // Update start min positions
494 std::sort(by_start_min_.begin(), by_start_min_.end(),
495 StartMinLessThan<DisjunctiveTask>);
496 for (int i = 0; i < by_start_min_.size(); ++i) {
497 by_start_min_[i]->index = i;
498 }
499 theta_tree_.Clear();
500 for (int i = 0; i < by_start_min_.size(); ++i) {
501 new_lct_[i] = by_start_min_[i]->interval->EndMax();
502 }
503
504 // --- Execute ----
505 int j = 0;
506 for (DisjunctiveTask* const twi : by_end_max_) {
507 while (j < by_start_max_.size() &&
508 twi->interval->EndMax() > by_start_max_[j]->interval->StartMax()) {
509 if (j > 0 && theta_tree_.Ect() > by_start_max_[j]->interval->StartMax()) {
510 const int64_t new_end_max = by_start_max_[j - 1]->interval->StartMax();
511 new_lct_[by_start_max_[j]->index] =
512 std::min(new_lct_[by_start_max_[j]->index], new_end_max);
513 }
514 theta_tree_.Insert(by_start_max_[j]);
515 j++;
516 }
517 const bool inserted = theta_tree_.IsInserted(twi);
518 if (inserted) {
519 theta_tree_.Remove(twi);
520 }
521 const int64_t ect_theta_less_i = theta_tree_.Ect();
522 if (inserted) {
523 theta_tree_.Insert(twi);
524 }
525
526 if (ect_theta_less_i > twi->interval->StartMax() && j > 0) {
527 const int64_t new_end_max = by_start_max_[j - 1]->interval->StartMax();
528 if (new_end_max < new_lct_[twi->index]) {
529 new_lct_[twi->index] = new_end_max;
530 }
531 }
532 }
533
534 // Apply modifications
535 bool modified = false;
536 for (int i = 0; i < by_start_min_.size(); ++i) {
537 IntervalVar* const var = by_start_min_[i]->interval;
538 if ((strict_ || var->DurationMin() > 0) && var->EndMax() > new_lct_[i]) {
539 modified = true;
540 var->SetEndMax(new_lct_[i]);
541 }
542 }
543 return modified;
544}
545
546// ------ Edge finder + detectable precedences -------------
547
548// A class that implements two propagation algorithms: edge finding and
549// detectable precedences. These algorithms both push intervals to the right,
550// which is why they are grouped together.
551class EdgeFinderAndDetectablePrecedences {
552 public:
553 EdgeFinderAndDetectablePrecedences(Solver* const solver,
554 const std::vector<IntervalVar*>& intervals,
555 bool mirror, bool strict);
556 ~EdgeFinderAndDetectablePrecedences() {
557 gtl::STLDeleteElements(&by_start_min_);
558 }
559 int64_t size() const { return by_start_min_.size(); }
560 IntervalVar* interval(int index) { return by_start_min_[index]->interval; }
561 void UpdateEst();
562 void OverloadChecking();
563 bool DetectablePrecedences();
564 bool EdgeFinder();
565
566 private:
567 Solver* const solver_;
568
569 // --- All the following member variables are essentially used as local ones:
570 // no invariant is maintained about them, except for the fact that the vectors
571 // always contains all the considered intervals, so any function that wants to
572 // use them must first sort them in the right order.
573
574 // All of these vectors store the same set of objects. Therefore, at
575 // destruction time, STLDeleteElements should be called on only one of them.
576 // It does not matter which one.
577
578 ThetaTree theta_tree_;
579 std::vector<DisjunctiveTask*> by_end_min_;
580 std::vector<DisjunctiveTask*> by_start_min_;
581 std::vector<DisjunctiveTask*> by_end_max_;
582 std::vector<DisjunctiveTask*> by_start_max_;
583 // new_est_[i] is the new start min for interval est_[i]->interval.
584 std::vector<int64_t> new_est_;
585 // new_lct_[i] is the new end max for interval est_[i]->interval.
586 std::vector<int64_t> new_lct_;
587 DisjunctiveLambdaThetaTree lt_tree_;
588 const bool strict_;
589};
590
591EdgeFinderAndDetectablePrecedences::EdgeFinderAndDetectablePrecedences(
592 Solver* const solver, const std::vector<IntervalVar*>& intervals,
593 bool mirror, bool strict)
594 : solver_(solver),
595 theta_tree_(intervals.size()),
596 lt_tree_(intervals.size()),
597 strict_(strict) {
598 // Populate of the array of intervals
599 for (IntervalVar* const interval : intervals) {
600 IntervalVar* const underlying =
601 mirror ? solver->MakeMirrorInterval(interval) : interval;
602 IntervalVar* const relaxed = solver->MakeIntervalRelaxedMax(underlying);
603 DisjunctiveTask* const task = new DisjunctiveTask(relaxed);
604 by_end_min_.push_back(task);
605 by_start_min_.push_back(task);
606 by_end_max_.push_back(task);
607 by_start_max_.push_back(task);
608 new_est_.push_back(std::numeric_limits<int64_t>::min());
609 }
610}
611
612void EdgeFinderAndDetectablePrecedences::UpdateEst() {
613 std::sort(by_start_min_.begin(), by_start_min_.end(),
614 ShortestDurationStartMinLessThan<DisjunctiveTask>);
615 for (int i = 0; i < size(); ++i) {
616 by_start_min_[i]->index = i;
617 }
618}
619
620void EdgeFinderAndDetectablePrecedences::OverloadChecking() {
621 // Initialization.
622 UpdateEst();
623 std::sort(by_end_max_.begin(), by_end_max_.end(),
624 EndMaxLessThan<DisjunctiveTask>);
625 theta_tree_.Clear();
626
627 for (DisjunctiveTask* const task : by_end_max_) {
628 theta_tree_.Insert(task);
629 if (theta_tree_.Ect() > task->interval->EndMax()) {
630 solver_->Fail();
631 }
632 }
633}
634
635bool EdgeFinderAndDetectablePrecedences::DetectablePrecedences() {
636 // Initialization.
637 UpdateEst();
638 new_est_.assign(size(), std::numeric_limits<int64_t>::min());
639
640 // Propagate in one direction
641 std::sort(by_end_min_.begin(), by_end_min_.end(),
642 EndMinLessThan<DisjunctiveTask>);
643 std::sort(by_start_max_.begin(), by_start_max_.end(),
644 StartMaxLessThan<DisjunctiveTask>);
645 theta_tree_.Clear();
646 int j = 0;
647 for (DisjunctiveTask* const task_i : by_end_min_) {
648 if (j < size()) {
649 DisjunctiveTask* task_j = by_start_max_[j];
650 while (task_i->interval->EndMin() > task_j->interval->StartMax()) {
651 theta_tree_.Insert(task_j);
652 j++;
653 if (j == size()) break;
654 task_j = by_start_max_[j];
655 }
656 }
657 const int64_t esti = task_i->interval->StartMin();
658 bool inserted = theta_tree_.IsInserted(task_i);
659 if (inserted) {
660 theta_tree_.Remove(task_i);
661 }
662 const int64_t oesti = theta_tree_.Ect();
663 if (inserted) {
664 theta_tree_.Insert(task_i);
665 }
666 if (oesti > esti) {
667 new_est_[task_i->index] = oesti;
668 } else {
669 new_est_[task_i->index] = std::numeric_limits<int64_t>::min();
670 }
671 }
672
673 // Apply modifications
674 bool modified = false;
675 for (int i = 0; i < size(); ++i) {
676 IntervalVar* const var = by_start_min_[i]->interval;
677 if (new_est_[i] != std::numeric_limits<int64_t>::min() &&
678 (strict_ || var->DurationMin() > 0)) {
679 modified = true;
680 by_start_min_[i]->interval->SetStartMin(new_est_[i]);
681 }
682 }
683 return modified;
684}
685
686bool EdgeFinderAndDetectablePrecedences::EdgeFinder() {
687 // Initialization.
688 UpdateEst();
689 for (int i = 0; i < size(); ++i) {
690 new_est_[i] = by_start_min_[i]->interval->StartMin();
691 }
692
693 // Push in one direction.
694 std::sort(by_end_max_.begin(), by_end_max_.end(),
695 EndMaxLessThan<DisjunctiveTask>);
696 lt_tree_.Clear();
697 for (int i = 0; i < size(); ++i) {
698 lt_tree_.Insert(*by_start_min_[i]);
699 DCHECK_EQ(i, by_start_min_[i]->index);
700 }
701 for (int j = size() - 2; j >= 0; --j) {
702 lt_tree_.Grey(*by_end_max_[j + 1]);
703 DisjunctiveTask* const twj = by_end_max_[j];
704 // We should have checked for overloading earlier.
705 DCHECK_LE(lt_tree_.Ect(), twj->interval->EndMax());
706 while (lt_tree_.EctOpt() > twj->interval->EndMax()) {
707 const int i = lt_tree_.ResponsibleOpt();
708 DCHECK_GE(i, 0);
709 if (lt_tree_.Ect() > new_est_[i]) {
710 new_est_[i] = lt_tree_.Ect();
711 }
712 lt_tree_.Reset(i);
713 }
714 }
715
716 // Apply modifications.
717 bool modified = false;
718 for (int i = 0; i < size(); ++i) {
719 IntervalVar* const var = by_start_min_[i]->interval;
720 if (var->StartMin() < new_est_[i] && (strict_ || var->DurationMin() > 0)) {
721 modified = true;
722 var->SetStartMin(new_est_[i]);
723 }
724 }
725 return modified;
726}
727
728// --------- Disjunctive Constraint ----------
729
730// ----- Propagation on ranked activities -----
731
732class RankedPropagator : public Constraint {
733 public:
734 RankedPropagator(Solver* const solver, const std::vector<IntVar*>& nexts,
735 const std::vector<IntervalVar*>& intervals,
736 const std::vector<IntVar*>& slacks,
737 DisjunctiveConstraint* const disjunctive)
738 : Constraint(solver),
739 nexts_(nexts),
740 intervals_(intervals),
741 slacks_(slacks),
742 disjunctive_(disjunctive),
743 partial_sequence_(intervals.size()),
744 previous_(intervals.size() + 2, 0) {}
745
746 ~RankedPropagator() override {}
747
748 void Post() override {
749 Demon* const delayed =
750 solver()->MakeDelayedConstraintInitialPropagateCallback(this);
751 for (int i = 0; i < intervals_.size(); ++i) {
752 nexts_[i]->WhenBound(delayed);
753 intervals_[i]->WhenAnything(delayed);
754 slacks_[i]->WhenRange(delayed);
755 }
756 nexts_.back()->WhenBound(delayed);
757 }
758
759 void InitialPropagate() override {
760 PropagateNexts();
761 PropagateSequence();
762 }
763
764 void PropagateNexts() {
765 Solver* const s = solver();
766 const int ranked_first = partial_sequence_.NumFirstRanked();
767 const int ranked_last = partial_sequence_.NumLastRanked();
768 const int sentinel =
769 ranked_last == 0
770 ? nexts_.size()
771 : partial_sequence_[intervals_.size() - ranked_last] + 1;
772 int first = 0;
773 int counter = 0;
774 while (nexts_[first]->Bound()) {
775 DCHECK_NE(first, nexts_[first]->Min());
776 first = nexts_[first]->Min();
777 if (first == sentinel) {
778 return;
779 }
780 if (++counter > ranked_first) {
781 DCHECK(intervals_[first - 1]->MayBePerformed());
782 partial_sequence_.RankFirst(s, first - 1);
783 VLOG(2) << "RankFirst " << first - 1 << " -> "
784 << partial_sequence_.DebugString();
785 }
786 }
787 previous_.assign(previous_.size(), -1);
788 for (int i = 0; i < nexts_.size(); ++i) {
789 if (nexts_[i]->Bound()) {
790 previous_[nexts_[i]->Min()] = i;
791 }
792 }
793 int last = previous_.size() - 1;
794 counter = 0;
795 while (previous_[last] != -1) {
796 last = previous_[last];
797 if (++counter > ranked_last) {
798 partial_sequence_.RankLast(s, last - 1);
799 VLOG(2) << "RankLast " << last - 1 << " -> "
800 << partial_sequence_.DebugString();
801 }
802 }
803 }
804
805 void PropagateSequence() {
806 const int last_position = intervals_.size() - 1;
807 const int first_sentinel = partial_sequence_.NumFirstRanked();
808 const int last_sentinel = last_position - partial_sequence_.NumLastRanked();
809 // Propagates on ranked first from left to right.
810 for (int i = 0; i < first_sentinel - 1; ++i) {
811 IntervalVar* const interval = RankedInterval(i);
812 IntervalVar* const next_interval = RankedInterval(i + 1);
813 IntVar* const slack = RankedSlack(i);
814 const int64_t transition_time = RankedTransitionTime(i, i + 1);
815 next_interval->SetStartRange(
816 CapAdd(interval->StartMin(), CapAdd(slack->Min(), transition_time)),
817 CapAdd(interval->StartMax(), CapAdd(slack->Max(), transition_time)));
818 }
819 // Propagates on ranked last from right to left.
820 for (int i = last_position; i > last_sentinel + 1; --i) {
821 IntervalVar* const interval = RankedInterval(i - 1);
822 IntervalVar* const next_interval = RankedInterval(i);
823 IntVar* const slack = RankedSlack(i - 1);
824 const int64_t transition_time = RankedTransitionTime(i - 1, i);
825 interval->SetStartRange(CapSub(next_interval->StartMin(),
826 CapAdd(slack->Max(), transition_time)),
827 CapSub(next_interval->StartMax(),
828 CapAdd(slack->Min(), transition_time)));
829 }
830 // Propagate across.
831 IntervalVar* const first_interval =
832 first_sentinel > 0 ? RankedInterval(first_sentinel - 1) : nullptr;
833 IntVar* const first_slack =
834 first_sentinel > 0 ? RankedSlack(first_sentinel - 1) : nullptr;
835 IntervalVar* const last_interval = last_sentinel < last_position
836 ? RankedInterval(last_sentinel + 1)
837 : nullptr;
838
839 // Nothing to do afterwards, exiting.
840 if (first_interval == nullptr && last_interval == nullptr) {
841 return;
842 }
843 // Propagates to the middle part.
844 // This assumes triangular inequality in the transition times.
845 for (int i = first_sentinel; i <= last_sentinel; ++i) {
846 IntervalVar* const interval = RankedInterval(i);
847 IntVar* const slack = RankedSlack(i);
848 if (interval->MayBePerformed()) {
849 const bool performed = interval->MustBePerformed();
850 if (first_interval != nullptr) {
851 const int64_t transition_time =
852 RankedTransitionTime(first_sentinel - 1, i);
853 interval->SetStartRange(
854 CapAdd(first_interval->StartMin(),
855 CapAdd(first_slack->Min(), transition_time)),
856 CapAdd(first_interval->StartMax(),
857 CapAdd(first_slack->Max(), transition_time)));
858 if (performed) {
859 first_interval->SetStartRange(
860 CapSub(interval->StartMin(),
861 CapAdd(first_slack->Max(), transition_time)),
862 CapSub(interval->StartMax(),
863 CapAdd(first_slack->Min(), transition_time)));
864 }
865 }
866 if (last_interval != nullptr) {
867 const int64_t transition_time =
868 RankedTransitionTime(i, last_sentinel + 1);
869 interval->SetStartRange(
870 CapSub(last_interval->StartMin(),
871 CapAdd(slack->Max(), transition_time)),
872 CapSub(last_interval->StartMax(),
873 CapAdd(slack->Min(), transition_time)));
874 if (performed) {
875 last_interval->SetStartRange(
876 CapAdd(interval->StartMin(),
877 CapAdd(slack->Min(), transition_time)),
878 CapAdd(interval->StartMax(),
879 CapAdd(slack->Max(), transition_time)));
880 }
881 }
882 }
883 }
884 // TODO(user): cache transition on ranked intervals in a vector.
885 // Propagates on ranked first from right to left.
886 for (int i = std::min(first_sentinel - 2, last_position - 1); i >= 0; --i) {
887 IntervalVar* const interval = RankedInterval(i);
888 IntervalVar* const next_interval = RankedInterval(i + 1);
889 IntVar* const slack = RankedSlack(i);
890 const int64_t transition_time = RankedTransitionTime(i, i + 1);
891 interval->SetStartRange(CapSub(next_interval->StartMin(),
892 CapAdd(slack->Max(), transition_time)),
893 CapSub(next_interval->StartMax(),
894 CapAdd(slack->Min(), transition_time)));
895 }
896 // Propagates on ranked last from left to right.
897 for (int i = last_sentinel + 1; i < last_position - 1; ++i) {
898 IntervalVar* const interval = RankedInterval(i);
899 IntervalVar* const next_interval = RankedInterval(i + 1);
900 IntVar* const slack = RankedSlack(i);
901 const int64_t transition_time = RankedTransitionTime(i, i + 1);
902 next_interval->SetStartRange(
903 CapAdd(interval->StartMin(), CapAdd(slack->Min(), transition_time)),
904 CapAdd(interval->StartMax(), CapAdd(slack->Max(), transition_time)));
905 }
906 // TODO(user) : Propagate on slacks.
907 }
908
909 IntervalVar* RankedInterval(int i) const {
910 const int index = partial_sequence_[i];
911 return intervals_[index];
912 }
913
914 IntVar* RankedSlack(int i) const {
915 const int index = partial_sequence_[i];
916 return slacks_[index];
917 }
918
919 int64_t RankedTransitionTime(int before, int after) const {
920 const int before_index = partial_sequence_[before];
921 const int after_index = partial_sequence_[after];
922
923 return disjunctive_->TransitionTime(before_index, after_index);
924 }
925
926 std::string DebugString() const override {
927 return absl::StrFormat(
928 "RankedPropagator([%s], nexts = [%s], intervals = [%s])",
929 partial_sequence_.DebugString(), JoinDebugStringPtr(nexts_, ", "),
930 JoinDebugStringPtr(intervals_, ", "));
931 }
932
933 void Accept(ModelVisitor* const visitor) const override {
934 LOG(FATAL) << "Not yet implemented";
935 // TODO(user): IMPLEMENT ME.
936 }
937
938 private:
939 std::vector<IntVar*> nexts_;
940 std::vector<IntervalVar*> intervals_;
941 std::vector<IntVar*> slacks_;
942 DisjunctiveConstraint* const disjunctive_;
943 RevPartialSequence partial_sequence_;
944 std::vector<int> previous_;
945};
946
947// A class that stores several propagators for the sequence constraint, and
948// calls them until a fixpoint is reached.
949
950class FullDisjunctiveConstraint : public DisjunctiveConstraint {
951 public:
952 FullDisjunctiveConstraint(Solver* const s,
953 const std::vector<IntervalVar*>& intervals,
954 const std::string& name, bool strict)
955 : DisjunctiveConstraint(s, intervals, name),
956 sequence_var_(nullptr),
957 straight_(s, intervals, false, strict),
958 mirror_(s, intervals, true, strict),
959 straight_not_last_(s, intervals, false, strict),
960 mirror_not_last_(s, intervals, true, strict),
961 strict_(strict) {}
962
963 ~FullDisjunctiveConstraint() override {}
964
965 void Post() override {
966 Demon* const d = MakeDelayedConstraintDemon0(
967 solver(), this, &FullDisjunctiveConstraint::InitialPropagate,
968 "InitialPropagate");
969 for (int32_t i = 0; i < straight_.size(); ++i) {
970 straight_.interval(i)->WhenAnything(d);
971 }
972 }
973
974 void InitialPropagate() override {
975 bool all_optional_or_unperformed = true;
976 for (const IntervalVar* const interval : intervals_) {
977 if (interval->MustBePerformed()) {
978 all_optional_or_unperformed = false;
979 break;
980 }
981 }
982 if (all_optional_or_unperformed) { // Nothing to deduce
983 return;
984 }
985
986 bool all_times_fixed = true;
987 for (const IntervalVar* const interval : intervals_) {
988 if (interval->MayBePerformed() &&
989 (interval->StartMin() != interval->StartMax() ||
990 interval->DurationMin() != interval->DurationMax() ||
991 interval->EndMin() != interval->EndMax())) {
992 all_times_fixed = false;
993 break;
994 }
995 }
996
997 if (all_times_fixed) {
998 PropagatePerformed();
999 } else {
1000 do {
1001 do {
1002 do {
1003 // OverloadChecking is symmetrical. It has the same effect on the
1004 // straight and the mirrored version.
1005 straight_.OverloadChecking();
1006 } while (straight_.DetectablePrecedences() ||
1007 mirror_.DetectablePrecedences());
1008 } while (straight_not_last_.Propagate() ||
1009 mirror_not_last_.Propagate());
1010 } while (straight_.EdgeFinder() || mirror_.EdgeFinder());
1011 }
1012 }
1013
1014 bool Intersect(IntervalVar* const i1, IntervalVar* const i2) const {
1015 return i1->StartMin() < i2->EndMax() && i2->StartMin() < i1->EndMax();
1016 }
1017
1018 void PropagatePerformed() {
1019 performed_.clear();
1020 optional_.clear();
1021 for (IntervalVar* const interval : intervals_) {
1022 if (interval->MustBePerformed()) {
1023 performed_.push_back(interval);
1024 } else if (interval->MayBePerformed()) {
1025 optional_.push_back(interval);
1026 }
1027 }
1028 // Checks feasibility of performed;
1029 if (performed_.empty()) return;
1030 std::sort(performed_.begin(), performed_.end(), IntervalStartMinLessThan);
1031 for (int i = 0; i < performed_.size() - 1; ++i) {
1032 if (performed_[i]->EndMax() > performed_[i + 1]->StartMin()) {
1033 solver()->Fail();
1034 }
1035 }
1036
1037 // Checks if optional intervals can be inserted.
1038 if (optional_.empty()) return;
1039 int index = 0;
1040 const int num_performed = performed_.size();
1041 std::sort(optional_.begin(), optional_.end(), IntervalStartMinLessThan);
1042 for (IntervalVar* const candidate : optional_) {
1043 const int64_t start = candidate->StartMin();
1044 while (index < num_performed && start >= performed_[index]->EndMax()) {
1045 index++;
1046 }
1047 if (index == num_performed) return;
1048 if (Intersect(candidate, performed_[index]) ||
1049 (index < num_performed - 1 &&
1050 Intersect(candidate, performed_[index + 1]))) {
1051 candidate->SetPerformed(false);
1052 }
1053 }
1054 }
1055
1056 void Accept(ModelVisitor* const visitor) const override {
1057 visitor->BeginVisitConstraint(ModelVisitor::kDisjunctive, this);
1058 visitor->VisitIntervalArrayArgument(ModelVisitor::kIntervalsArgument,
1059 intervals_);
1060 if (sequence_var_ != nullptr) {
1061 visitor->VisitSequenceArgument(ModelVisitor::kSequenceArgument,
1062 sequence_var_);
1063 }
1064 visitor->EndVisitConstraint(ModelVisitor::kDisjunctive, this);
1065 }
1066
1067 SequenceVar* MakeSequenceVar() override {
1068 BuildNextModelIfNeeded();
1069 if (sequence_var_ == nullptr) {
1070 solver()->SaveValue(reinterpret_cast<void**>(&sequence_var_));
1071 sequence_var_ = solver()->RevAlloc(
1072 new SequenceVar(solver(), intervals_, nexts_, name()));
1073 }
1074 return sequence_var_;
1075 }
1076
1077 std::string DebugString() const override {
1078 return absl::StrFormat("FullDisjunctiveConstraint([%s], %i)",
1079 JoinDebugStringPtr(intervals_, ", "), strict_);
1080 }
1081
1082 const std::vector<IntVar*>& nexts() const override { return nexts_; }
1083
1084 const std::vector<IntVar*>& actives() const override { return actives_; }
1085
1086 const std::vector<IntVar*>& time_cumuls() const override {
1087 return time_cumuls_;
1088 }
1089
1090 const std::vector<IntVar*>& time_slacks() const override {
1091 return time_slacks_;
1092 }
1093
1094 private:
1095 int64_t Distance(int64_t activity_plus_one, int64_t next_activity_plus_one) {
1096 return (activity_plus_one == 0 ||
1097 next_activity_plus_one > intervals_.size())
1098 ? 0
1099 : transition_time_(activity_plus_one - 1,
1100 next_activity_plus_one - 1);
1101 }
1102
1103 void BuildNextModelIfNeeded() {
1104 if (!nexts_.empty()) {
1105 return;
1106 }
1107 Solver* const s = solver();
1108 const std::string& ct_name = name();
1109 const int num_intervals = intervals_.size();
1110 const int num_nodes = intervals_.size() + 1;
1111 int64_t horizon = 0;
1112 for (int i = 0; i < intervals_.size(); ++i) {
1113 if (intervals_[i]->MayBePerformed()) {
1114 horizon = std::max(horizon, intervals_[i]->EndMax());
1115 }
1116 }
1117
1118 // Create the next model.
1119 s->MakeIntVarArray(num_nodes, 1, num_nodes, ct_name + "_nexts", &nexts_);
1120 // Alldifferent on the nexts variable (the equivalent problem is a tsp).
1121 s->AddConstraint(s->MakeAllDifferent(nexts_));
1122
1123 actives_.resize(num_nodes);
1124 for (int i = 0; i < num_intervals; ++i) {
1125 actives_[i + 1] = intervals_[i]->PerformedExpr()->Var();
1126 s->AddConstraint(
1127 s->MakeIsDifferentCstCt(nexts_[i + 1], i + 1, actives_[i + 1]));
1128 }
1129 std::vector<IntVar*> short_actives(actives_.begin() + 1, actives_.end());
1130 actives_[0] = s->MakeMax(short_actives)->Var();
1131
1132 // No Cycle on the corresponding tsp.
1133 s->AddConstraint(s->MakeNoCycle(nexts_, actives_));
1134
1135 // Cumul on time.
1136 time_cumuls_.resize(num_nodes + 1);
1137 // Slacks between activities.
1138 time_slacks_.resize(num_nodes);
1139
1140 time_slacks_[0] = s->MakeIntVar(0, horizon, "initial_slack");
1141 // TODO(user): check this.
1142 time_cumuls_[0] = s->MakeIntConst(0);
1143
1144 for (int64_t i = 0; i < num_intervals; ++i) {
1145 IntervalVar* const var = intervals_[i];
1146 if (var->MayBePerformed()) {
1147 const int64_t duration_min = var->DurationMin();
1148 time_slacks_[i + 1] = s->MakeIntVar(
1149 duration_min, horizon, absl::StrFormat("time_slacks(%d)", i + 1));
1150 // TODO(user): Check SafeStartExpr();
1151 time_cumuls_[i + 1] = var->SafeStartExpr(var->StartMin())->Var();
1152 if (var->DurationMax() != duration_min) {
1153 s->AddConstraint(s->MakeGreaterOrEqual(
1154 time_slacks_[i + 1], var->SafeDurationExpr(duration_min)));
1155 }
1156 } else {
1157 time_slacks_[i + 1] = s->MakeIntVar(
1158 0, horizon, absl::StrFormat("time_slacks(%d)", i + 1));
1159 time_cumuls_[i + 1] = s->MakeIntConst(horizon);
1160 }
1161 }
1162 // TODO(user): Find a better UB for the last time cumul.
1163 time_cumuls_[num_nodes] = s->MakeIntVar(0, 2 * horizon, ct_name + "_ect");
1164 s->AddConstraint(s->MakePathCumul(
1165 nexts_, actives_, time_cumuls_, time_slacks_,
1166 [this](int64_t x, int64_t y) { return Distance(x, y); }));
1167
1168 std::vector<IntVar*> short_slacks(time_slacks_.begin() + 1,
1169 time_slacks_.end());
1170 s->AddConstraint(s->RevAlloc(
1171 new RankedPropagator(s, nexts_, intervals_, short_slacks, this)));
1172 }
1173
1174 SequenceVar* sequence_var_;
1175 EdgeFinderAndDetectablePrecedences straight_;
1176 EdgeFinderAndDetectablePrecedences mirror_;
1177 NotLast straight_not_last_;
1178 NotLast mirror_not_last_;
1179 std::vector<IntVar*> nexts_;
1180 std::vector<IntVar*> actives_;
1181 std::vector<IntVar*> time_cumuls_;
1182 std::vector<IntVar*> time_slacks_;
1183 std::vector<IntervalVar*> performed_;
1184 std::vector<IntervalVar*> optional_;
1185 const bool strict_;
1186 DISALLOW_COPY_AND_ASSIGN(FullDisjunctiveConstraint);
1187};
1188
1189// =====================================================================
1190// Cumulative
1191// =====================================================================
1192
1193// A cumulative Theta node, where two energies, corresponding to 2 capacities,
1194// are stored.
1195struct DualCapacityThetaNode {
1196 // Special value for task indices meaning 'no such task'.
1197 static const int kNone;
1198
1199 // Identity constructor
1200 DualCapacityThetaNode()
1201 : energy(0LL),
1202 energetic_end_min(std::numeric_limits<int64_t>::min()),
1203 residual_energetic_end_min(std::numeric_limits<int64_t>::min()) {}
1204
1205 // Constructor for a single cumulative task in the Theta set.
1206 DualCapacityThetaNode(int64_t capacity, int64_t residual_capacity,
1207 const CumulativeTask& task)
1208 : energy(task.EnergyMin()),
1209 energetic_end_min(CapAdd(capacity * task.interval->StartMin(), energy)),
1211 CapAdd(residual_capacity * task.interval->StartMin(), energy)) {}
1212
1213 // Constructor for a single variable cumulative task in the Theta set.
1214 DualCapacityThetaNode(int64_t capacity, int64_t residual_capacity,
1215 const VariableCumulativeTask& task)
1216 : energy(task.EnergyMin()),
1217 energetic_end_min(CapAdd(capacity * task.interval->StartMin(), energy)),
1219 CapAdd(residual_capacity * task.interval->StartMin(), energy)) {}
1220
1221 // Sets this DualCapacityThetaNode to the result of the natural binary
1222 // operation over the two given operands, corresponding to the following set
1223 // operation: Theta = left.Theta union right.Theta
1224 //
1225 // No set operation actually occur: we only maintain the relevant quantities
1226 // associated with such sets.
1227 void Compute(const DualCapacityThetaNode& left,
1228 const DualCapacityThetaNode& right) {
1229 energy = CapAdd(left.energy, right.energy);
1230 energetic_end_min = std::max(CapAdd(left.energetic_end_min, right.energy),
1231 right.energetic_end_min);
1233 std::max(CapAdd(left.residual_energetic_end_min, right.energy),
1234 right.residual_energetic_end_min);
1235 }
1236
1237 // Amount of resource consumed by the Theta set, in units of demand X time.
1238 // This is energy(Theta).
1239 int64_t energy;
1240
1241 // Max_{subset S of Theta} (capacity * start_min(S) + energy(S))
1242 int64_t energetic_end_min;
1243
1244 // Max_{subset S of Theta} (residual_capacity * start_min(S) + energy(S))
1246};
1247
1248const int DualCapacityThetaNode::kNone = -1;
1249
1250// A tree for dual capacity theta nodes
1251class DualCapacityThetaTree
1252 : public MonoidOperationTree<DualCapacityThetaNode> {
1253 public:
1254 static const int64_t kNotInitialized;
1255
1256 explicit DualCapacityThetaTree(int size)
1257 : MonoidOperationTree<DualCapacityThetaNode>(size),
1258 capacity_max_(-1),
1259 residual_capacity_(-1) {}
1260
1261 virtual ~DualCapacityThetaTree() {}
1262
1263 void Init(int64_t capacity_max, int64_t residual_capacity) {
1264 DCHECK_LE(0, residual_capacity);
1265 DCHECK_LE(residual_capacity, capacity_max);
1266 Clear();
1267 capacity_max_ = capacity_max;
1268 residual_capacity_ = residual_capacity;
1269 }
1270
1271 void Insert(const CumulativeTask* task) {
1272 Set(task->index,
1273 DualCapacityThetaNode(capacity_max_, residual_capacity_, *task));
1274 }
1275
1276 void Insert(const VariableCumulativeTask* task) {
1277 Set(task->index,
1278 DualCapacityThetaNode(capacity_max_, residual_capacity_, *task));
1279 }
1280
1281 private:
1282 int64_t capacity_max_;
1283 int64_t residual_capacity_;
1284 DISALLOW_COPY_AND_ASSIGN(DualCapacityThetaTree);
1285};
1286
1287const int64_t DualCapacityThetaTree::kNotInitialized = -1LL;
1288
1289// An object that can dive down a branch of a DualCapacityThetaTree to compute
1290// Env(j, c) in Petr Vilim's notations.
1291//
1292// In 'Edge finding filtering algorithm for discrete cumulative resources in
1293// O(kn log n)' by Petr Vilim, this corresponds to line 6--8 in algorithm 1.3,
1294// plus all of algorithm 1.2.
1295//
1296// http://vilim.eu/petr/cp2009.pdf
1297// Note: use the version pointed to by this pointer, not the version from the
1298// conference proceedings, which has a few errors.
1299class EnvJCComputeDiver {
1300 public:
1301 static const int64_t kNotAvailable;
1302 explicit EnvJCComputeDiver(int energy_threshold)
1303 : energy_threshold_(energy_threshold),
1304 energy_alpha_(kNotAvailable),
1305 energetic_end_min_alpha_(kNotAvailable) {}
1306 void OnArgumentReached(int index, const DualCapacityThetaNode& argument) {
1307 energy_alpha_ = argument.energy;
1308 energetic_end_min_alpha_ = argument.energetic_end_min;
1309 // We should reach a leaf that is not the identity
1310 // DCHECK_GT(energetic_end_min_alpha_, kint64min);
1311 // TODO(user): Check me.
1312 }
1313 bool ChooseGoLeft(const DualCapacityThetaNode& current,
1314 const DualCapacityThetaNode& left_child,
1315 const DualCapacityThetaNode& right_child) {
1316 if (right_child.residual_energetic_end_min > energy_threshold_) {
1317 return false; // enough energy on right
1318 } else {
1319 energy_threshold_ -= right_child.energy;
1320 return true;
1321 }
1322 }
1323 void OnComeBackFromLeft(const DualCapacityThetaNode& current,
1324 const DualCapacityThetaNode& left_child,
1325 const DualCapacityThetaNode& right_child) {
1326 // The left subtree intersects the alpha set.
1327 // The right subtree does not intersect the alpha set.
1328 // The energy_alpha_ and energetic_end_min_alpha_ previously
1329 // computed are valid for this node too: there's nothing to do.
1330 }
1331 void OnComeBackFromRight(const DualCapacityThetaNode& current,
1332 const DualCapacityThetaNode& left_child,
1333 const DualCapacityThetaNode& right_child) {
1334 // The left subtree is included in the alpha set.
1335 // The right subtree intersects the alpha set.
1336 energetic_end_min_alpha_ =
1337 std::max(energetic_end_min_alpha_,
1338 CapAdd(left_child.energetic_end_min, energy_alpha_));
1339 energy_alpha_ += left_child.energy;
1340 }
1341 int64_t GetEnvJC(const DualCapacityThetaNode& root) const {
1342 const int64_t energy = root.energy;
1343 const int64_t energy_beta = CapSub(energy, energy_alpha_);
1344 return CapAdd(energetic_end_min_alpha_, energy_beta);
1345 }
1346
1347 private:
1348 // Energy threshold such that if a set has an energetic_end_min greater than
1349 // the threshold, then it can push tasks that must end at or after the
1350 // currently considered end max.
1351 //
1352 // Used when diving down only.
1353 int64_t energy_threshold_;
1354
1355 // Energy of the alpha set, that is, the set of tasks whose start min does not
1356 // exceed the max start min of a set with excess residual energy.
1357 //
1358 // Used when swimming up only.
1359 int64_t energy_alpha_;
1360
1361 // Energetic end min of the alpha set.
1362 //
1363 // Used when swimming up only.
1364 int64_t energetic_end_min_alpha_;
1365};
1366
1367const int64_t EnvJCComputeDiver::kNotAvailable = -1LL;
1368
1369// In all the following, the term 'update' means 'a potential new start min for
1370// a task'. The edge-finding algorithm is in two phase: one compute potential
1371// new start mins, the other detects whether they are applicable or not for each
1372// task.
1373
1374// Collection of all updates (i.e., potential new start mins) for a given value
1375// of the demand.
1376class UpdatesForADemand {
1377 public:
1378 explicit UpdatesForADemand(int size)
1379 : updates_(size, 0), up_to_date_(false) {}
1380
1381 const int64_t Update(int index) { return updates_[index]; }
1382 void Reset() { up_to_date_ = false; }
1383 void SetUpdate(int index, int64_t update) {
1384 DCHECK(!up_to_date_);
1385 DCHECK_LT(index, updates_.size());
1386 updates_[index] = update;
1387 }
1388 bool up_to_date() const { return up_to_date_; }
1389 void set_up_to_date() { up_to_date_ = true; }
1390
1391 private:
1392 std::vector<int64_t> updates_;
1393 bool up_to_date_;
1394 DISALLOW_COPY_AND_ASSIGN(UpdatesForADemand);
1395};
1396
1397// One-sided cumulative edge finder.
1398template <class Task>
1399class EdgeFinder : public Constraint {
1400 public:
1401 EdgeFinder(Solver* const solver, const std::vector<Task*>& tasks,
1402 IntVar* const capacity)
1403 : Constraint(solver),
1404 capacity_(capacity),
1405 tasks_(tasks),
1406 by_start_min_(tasks.size()),
1407 by_end_max_(tasks.size()),
1408 by_end_min_(tasks.size()),
1409 lt_tree_(tasks.size(), capacity_->Max()),
1410 dual_capacity_tree_(tasks.size()),
1411 has_zero_demand_tasks_(true) {}
1412
1413 ~EdgeFinder() override {
1414 gtl::STLDeleteElements(&tasks_);
1415 gtl::STLDeleteValues(&update_map_);
1416 }
1417
1418 void Post() override {
1419 // Add the demons
1420 Demon* const demon = MakeDelayedConstraintDemon0(
1421 solver(), this, &EdgeFinder::InitialPropagate, "RangeChanged");
1422 for (Task* const task : tasks_) {
1423 // Delay propagation, as this constraint is not incremental: we pay
1424 // O(n log n) each time the constraint is awakened.
1425 task->WhenAnything(demon);
1426 }
1427 capacity_->WhenRange(demon);
1428 }
1429
1430 // The propagation algorithms: checks for overloading, computes new start mins
1431 // according to the edge-finding rules, and applies them.
1432 void InitialPropagate() override {
1433 InitPropagation();
1434 PropagateBasedOnEndMinGreaterThanEndMax();
1435 FillInTree();
1436 PropagateBasedOnEnergy();
1437 ApplyNewBounds();
1438 }
1439
1440 void Accept(ModelVisitor* const visitor) const override {
1441 LOG(FATAL) << "Should Not Be Visited";
1442 }
1443
1444 std::string DebugString() const override { return "EdgeFinder"; }
1445
1446 private:
1447 UpdatesForADemand* GetOrMakeUpdate(int64_t demand_min) {
1448 UpdatesForADemand* update = gtl::FindPtrOrNull(update_map_, demand_min);
1449 if (update == nullptr) {
1450 update = new UpdatesForADemand(tasks_.size());
1451 update_map_[demand_min] = update;
1452 }
1453 return update;
1454 }
1455
1456 // Sets the fields in a proper state to run the propagation algorithm.
1457 void InitPropagation() {
1458 // Clear the update stack
1459 start_min_update_.clear();
1460 // Re_init vectors if has_zero_demand_tasks_ is true
1461 if (has_zero_demand_tasks_.Value()) {
1462 by_start_min_.clear();
1463 by_end_min_.clear();
1464 by_end_max_.clear();
1465 // Only populate tasks with demand_min > 0.
1466 bool zero_demand = false;
1467 for (Task* const task : tasks_) {
1468 if (task->DemandMin() > 0) {
1469 by_start_min_.push_back(task);
1470 by_end_min_.push_back(task);
1471 by_end_max_.push_back(task);
1472 } else {
1473 zero_demand = true;
1474 }
1475 }
1476 if (!zero_demand) {
1477 has_zero_demand_tasks_.SetValue(solver(), false);
1478 }
1479 }
1480
1481 // sort by start min.
1482 std::sort(by_start_min_.begin(), by_start_min_.end(),
1483 StartMinLessThan<Task>);
1484 for (int i = 0; i < by_start_min_.size(); ++i) {
1485 by_start_min_[i]->index = i;
1486 }
1487 // Sort by end max.
1488 std::sort(by_end_max_.begin(), by_end_max_.end(), EndMaxLessThan<Task>);
1489 // Sort by end min.
1490 std::sort(by_end_min_.begin(), by_end_min_.end(), EndMinLessThan<Task>);
1491 // Initialize the tree with the new capacity.
1492 lt_tree_.Init(capacity_->Max());
1493 // Clear updates
1494 for (const auto& entry : update_map_) {
1495 entry.second->Reset();
1496 }
1497 }
1498
1499 // Computes all possible update values for tasks of given demand, and stores
1500 // these values in update_map_[demand].
1501 // Runs in O(n log n).
1502 // This corresponds to lines 2--13 in algorithm 1.3 in Petr Vilim's paper.
1503 void ComputeConditionalStartMins(UpdatesForADemand* updates,
1504 int64_t demand_min) {
1505 DCHECK_GT(demand_min, 0);
1506 DCHECK(updates != nullptr);
1507 const int64_t capacity_max = capacity_->Max();
1508 const int64_t residual_capacity = CapSub(capacity_max, demand_min);
1509 dual_capacity_tree_.Init(capacity_max, residual_capacity);
1510 // It's important to initialize the update at IntervalVar::kMinValidValue
1511 // rather than at kInt64min, because its opposite may be used if it's a
1512 // mirror variable, and
1513 // -kInt64min = -(-kInt64max - 1) = kInt64max + 1 = -kInt64min
1514 int64_t update = IntervalVar::kMinValidValue;
1515 for (int i = 0; i < by_end_max_.size(); ++i) {
1516 Task* const task = by_end_max_[i];
1517 if (task->EnergyMin() == 0) continue;
1518 const int64_t current_end_max = task->interval->EndMax();
1519 dual_capacity_tree_.Insert(task);
1520 const int64_t energy_threshold = residual_capacity * current_end_max;
1521 const DualCapacityThetaNode& root = dual_capacity_tree_.result();
1522 const int64_t res_energetic_end_min = root.residual_energetic_end_min;
1523 if (res_energetic_end_min > energy_threshold) {
1524 EnvJCComputeDiver diver(energy_threshold);
1525 dual_capacity_tree_.DiveInTree(&diver);
1526 const int64_t enjv = diver.GetEnvJC(dual_capacity_tree_.result());
1527 const int64_t numerator = CapSub(enjv, energy_threshold);
1528 const int64_t diff = MathUtil::CeilOfRatio(numerator, demand_min);
1529 update = std::max(update, diff);
1530 }
1531 updates->SetUpdate(i, update);
1532 }
1533 updates->set_up_to_date();
1534 }
1535
1536 // Returns the new start min that can be inferred for task_to_push if it is
1537 // proved that it cannot end before by_end_max[end_max_index] does.
1538 int64_t ConditionalStartMin(const Task& task_to_push, int end_max_index) {
1539 if (task_to_push.EnergyMin() == 0) {
1540 return task_to_push.interval->StartMin();
1541 }
1542 const int64_t demand_min = task_to_push.DemandMin();
1543 UpdatesForADemand* const updates = GetOrMakeUpdate(demand_min);
1544 if (!updates->up_to_date()) {
1545 ComputeConditionalStartMins(updates, demand_min);
1546 }
1547 DCHECK(updates->up_to_date());
1548 return updates->Update(end_max_index);
1549 }
1550
1551 // Propagates by discovering all end-after-end relationships purely based on
1552 // comparisons between end mins and end maxes: there is no energetic reasoning
1553 // here, but this allow updates that the standard edge-finding detection rule
1554 // misses.
1555 // See paragraph 6.2 in http://vilim.eu/petr/cp2009.pdf.
1556 void PropagateBasedOnEndMinGreaterThanEndMax() {
1557 int end_max_index = 0;
1558 int64_t max_start_min = std::numeric_limits<int64_t>::min();
1559 for (Task* const task : by_end_min_) {
1560 const int64_t end_min = task->interval->EndMin();
1561 while (end_max_index < by_start_min_.size() &&
1562 by_end_max_[end_max_index]->interval->EndMax() <= end_min) {
1563 max_start_min = std::max(
1564 max_start_min, by_end_max_[end_max_index]->interval->StartMin());
1565 ++end_max_index;
1566 }
1567 if (end_max_index > 0 && task->interval->StartMin() <= max_start_min &&
1568 task->interval->EndMax() > task->interval->EndMin()) {
1569 DCHECK_LE(by_end_max_[end_max_index - 1]->interval->EndMax(), end_min);
1570 // The update is valid and may be interesting:
1571 // * If task->StartMin() > max_start_min, then all tasks whose end_max
1572 // is less than or equal to end_min have a start min that is less
1573 // than task->StartMin(). In this case, any update we could
1574 // compute would also be computed by the standard edge-finding
1575 // rule. It's better not to compute it, then: it may not be
1576 // needed.
1577 // * If task->EndMax() <= task->EndMin(), that means the end max is
1578 // bound. In that case, 'task' itself belong to the set of tasks
1579 // that must end before end_min, which may cause the result of
1580 // ConditionalStartMin(task, end_max_index - 1) not to be a valid
1581 // update.
1582 const int64_t update = ConditionalStartMin(*task, end_max_index - 1);
1583 start_min_update_.push_back(std::make_pair(task->interval, update));
1584 }
1585 }
1586 }
1587
1588 // Fill the theta-lambda-tree, and check for overloading.
1589 void FillInTree() {
1590 for (Task* const task : by_end_max_) {
1591 lt_tree_.Insert(*task);
1592 // Maximum energetic end min without overload.
1593 const int64_t max_feasible =
1594 CapProd(capacity_->Max(), task->interval->EndMax());
1595 if (lt_tree_.energetic_end_min() > max_feasible) {
1596 solver()->Fail();
1597 }
1598 }
1599 }
1600
1601 // The heart of the propagation algorithm. Should be called with all tasks
1602 // being in the Theta set. It detects tasks that need to be pushed.
1603 void PropagateBasedOnEnergy() {
1604 for (int j = by_start_min_.size() - 2; j >= 0; --j) {
1605 lt_tree_.Grey(*by_end_max_[j + 1]);
1606 Task* const twj = by_end_max_[j];
1607 // We should have checked for overload earlier.
1608 const int64_t max_feasible =
1609 CapProd(capacity_->Max(), twj->interval->EndMax());
1610 DCHECK_LE(lt_tree_.energetic_end_min(), max_feasible);
1611 while (lt_tree_.energetic_end_min_opt() > max_feasible) {
1612 const int i = lt_tree_.argmax_energetic_end_min_opt();
1613 DCHECK_GE(i, 0);
1614 PropagateTaskCannotEndBefore(i, j);
1615 lt_tree_.Reset(i);
1616 }
1617 }
1618 }
1619
1620 // Takes into account the fact that the task of given index cannot end before
1621 // the given new end min.
1622 void PropagateTaskCannotEndBefore(int index, int end_max_index) {
1623 Task* const task_to_push = by_start_min_[index];
1624 const int64_t update = ConditionalStartMin(*task_to_push, end_max_index);
1625 start_min_update_.push_back(std::make_pair(task_to_push->interval, update));
1626 }
1627
1628 // Applies the previously computed updates.
1629 void ApplyNewBounds() {
1630 for (const std::pair<IntervalVar*, int64_t>& update : start_min_update_) {
1631 update.first->SetStartMin(update.second);
1632 }
1633 }
1634
1635 // Capacity of the cumulative resource.
1636 IntVar* const capacity_;
1637
1638 // Initial vector of tasks
1639 std::vector<Task*> tasks_;
1640
1641 // Cumulative tasks, ordered by non-decreasing start min.
1642 std::vector<Task*> by_start_min_;
1643
1644 // Cumulative tasks, ordered by non-decreasing end max.
1645 std::vector<Task*> by_end_max_;
1646
1647 // Cumulative tasks, ordered by non-decreasing end min.
1648 std::vector<Task*> by_end_min_;
1649
1650 // Cumulative theta-lamba tree.
1651 CumulativeLambdaThetaTree lt_tree_;
1652
1653 // Needed by ComputeConditionalStartMins.
1654 DualCapacityThetaTree dual_capacity_tree_;
1655
1656 // Stack of updates to the new start min to do.
1657 std::vector<std::pair<IntervalVar*, int64_t>> start_min_update_;
1658
1659 // update_map_[d][i] is an integer such that if a task
1660 // whose demand is d cannot end before by_end_max_[i], then it cannot start
1661 // before update_map_[d][i].
1662 absl::flat_hash_map<int64_t, UpdatesForADemand*> update_map_;
1663
1664 // Has one task a demand min == 0
1665 Rev<bool> has_zero_demand_tasks_;
1666
1667 DISALLOW_COPY_AND_ASSIGN(EdgeFinder);
1668};
1669
1670// A point in time where the usage profile changes.
1671// Starting from time (included), the usage is what it was immediately before
1672// time, plus the delta.
1673//
1674// Example:
1675// Consider the following vector of ProfileDelta's:
1676// { t=1, d=+3}, { t=4, d=+1 }, { t=5, d=-2}, { t=8, d=-1}
1677// This represents the following usage profile:
1678//
1679// usage
1680// 4 | ****.
1681// 3 | ************. .
1682// 2 | . . ************.
1683// 1 | . . . .
1684// 0 |*******----------------------------*******************-> time
1685// 0 1 2 3 4 5 6 7 8 9
1686//
1687// Note that the usage profile is right-continuous (see
1688// http://en.wikipedia.org/wiki/Left-continuous#Directional_continuity).
1689// This is because intervals for tasks are always closed on the start side
1690// and open on the end side.
1691struct ProfileDelta {
1692 ProfileDelta(int64_t _time, int64_t _delta) : time(_time), delta(_delta) {}
1693 int64_t time;
1694 int64_t delta;
1695};
1696
1697bool TimeLessThan(const ProfileDelta& delta1, const ProfileDelta& delta2) {
1698 return delta1.time < delta2.time;
1699}
1700
1701// Cumulative time-table.
1702//
1703// This class implements a propagator for the CumulativeConstraint which is not
1704// incremental, and where a call to InitialPropagate() takes time which is
1705// O(n^2) and Omega(n log n) with n the number of cumulative tasks.
1706//
1707// Despite the high complexity, this propagator is needed, because of those
1708// implemented, it is the only one that satisfy that if all instantiated, no
1709// contradiction will be detected if and only if the constraint is satisfied.
1710//
1711// The implementation is quite naive, and could certainly be improved, for
1712// example by maintaining the profile incrementally.
1713template <class Task>
1714class CumulativeTimeTable : public Constraint {
1715 public:
1716 CumulativeTimeTable(Solver* const solver, const std::vector<Task*>& tasks,
1717 IntVar* const capacity)
1718 : Constraint(solver), by_start_min_(tasks), capacity_(capacity) {
1719 // There may be up to 2 delta's per interval (one on each side),
1720 // plus two sentinels
1721 const int profile_max_size = 2 * by_start_min_.size() + 2;
1722 profile_non_unique_time_.reserve(profile_max_size);
1723 profile_unique_time_.reserve(profile_max_size);
1724 }
1725
1726 ~CumulativeTimeTable() override { gtl::STLDeleteElements(&by_start_min_); }
1727
1728 void InitialPropagate() override {
1729 BuildProfile();
1730 PushTasks();
1731 // TODO(user): When a task has a fixed part, we could propagate
1732 // max_demand from its current location.
1733 }
1734
1735 void Post() override {
1736 Demon* demon = MakeDelayedConstraintDemon0(
1737 solver(), this, &CumulativeTimeTable::InitialPropagate,
1738 "InitialPropagate");
1739 for (Task* const task : by_start_min_) {
1740 task->WhenAnything(demon);
1741 }
1742 capacity_->WhenRange(demon);
1743 }
1744
1745 void Accept(ModelVisitor* const visitor) const override {
1746 LOG(FATAL) << "Should not be visited";
1747 }
1748
1749 std::string DebugString() const override { return "CumulativeTimeTable"; }
1750
1751 private:
1752 // Build the usage profile. Runs in O(n log n).
1753 void BuildProfile() {
1754 // Build profile with non unique time
1755 profile_non_unique_time_.clear();
1756 for (const Task* const task : by_start_min_) {
1757 const IntervalVar* const interval = task->interval;
1758 const int64_t start_max = interval->StartMax();
1759 const int64_t end_min = interval->EndMin();
1760 if (interval->MustBePerformed() && start_max < end_min) {
1761 const int64_t demand_min = task->DemandMin();
1762 if (demand_min > 0) {
1763 profile_non_unique_time_.emplace_back(start_max, +demand_min);
1764 profile_non_unique_time_.emplace_back(end_min, -demand_min);
1765 }
1766 }
1767 }
1768 // Sort
1769 std::sort(profile_non_unique_time_.begin(), profile_non_unique_time_.end(),
1770 TimeLessThan);
1771 // Build profile with unique times
1772 profile_unique_time_.clear();
1773 profile_unique_time_.emplace_back(std::numeric_limits<int64_t>::min(), 0);
1774 int64_t usage = 0;
1775 for (const ProfileDelta& step : profile_non_unique_time_) {
1776 if (step.time == profile_unique_time_.back().time) {
1777 profile_unique_time_.back().delta += step.delta;
1778 } else {
1779 profile_unique_time_.push_back(step);
1780 }
1781 // Update usage.
1782 usage += step.delta;
1783 }
1784 // Check final usage to be 0.
1785 DCHECK_EQ(0, usage);
1786 // Scan to find max usage.
1787 int64_t max_usage = 0;
1788 for (const ProfileDelta& step : profile_unique_time_) {
1789 usage += step.delta;
1790 if (usage > max_usage) {
1791 max_usage = usage;
1792 }
1793 }
1794 DCHECK_EQ(0, usage);
1795 capacity_->SetMin(max_usage);
1796 // Add a sentinel.
1797 profile_unique_time_.emplace_back(std::numeric_limits<int64_t>::max(), 0);
1798 }
1799
1800 // Update the start min for all tasks. Runs in O(n^2) and Omega(n).
1801 void PushTasks() {
1802 std::sort(by_start_min_.begin(), by_start_min_.end(),
1803 StartMinLessThan<Task>);
1804 int64_t usage = 0;
1805 int profile_index = 0;
1806 for (const Task* const task : by_start_min_) {
1807 const IntervalVar* const interval = task->interval;
1808 if (interval->StartMin() == interval->StartMax() &&
1809 interval->EndMin() == interval->EndMax()) {
1810 continue;
1811 }
1812 while (interval->StartMin() > profile_unique_time_[profile_index].time) {
1813 DCHECK(profile_index < profile_unique_time_.size());
1814 ++profile_index;
1815 usage += profile_unique_time_[profile_index].delta;
1816 }
1817 PushTask(task, profile_index, usage);
1818 }
1819 }
1820
1821 // Push the given task to new_start_min, defined as the smallest integer such
1822 // that the profile usage for all tasks, excluding the current one, does not
1823 // exceed capacity_ - task->demand on the interval
1824 // [new_start_min, new_start_min + task->interval->DurationMin() ).
1825 void PushTask(const Task* const task, int profile_index, int64_t usage) {
1826 // Init
1827 const IntervalVar* const interval = task->interval;
1828 const int64_t demand_min = task->DemandMin();
1829 if (demand_min == 0) { // Demand can be null, nothing to propagate.
1830 return;
1831 }
1832 const int64_t residual_capacity = CapSub(capacity_->Max(), demand_min);
1833 const int64_t duration = task->interval->DurationMin();
1834 const ProfileDelta& first_prof_delta = profile_unique_time_[profile_index];
1835
1836 int64_t new_start_min = interval->StartMin();
1837
1838 DCHECK_GE(first_prof_delta.time, interval->StartMin());
1839 // The check above is with a '>='. Let's first treat the '>' case
1840 if (first_prof_delta.time > interval->StartMin()) {
1841 // There was no profile delta at a time between interval->StartMin()
1842 // (included) and the current one.
1843 // As we don't delete delta's of 0 value, this means the current task
1844 // does not contribute to the usage before:
1845 DCHECK((interval->StartMax() >= first_prof_delta.time) ||
1846 (interval->StartMax() >= interval->EndMin()));
1847 // The 'usage' given in argument is valid at first_prof_delta.time. To
1848 // compute the usage at the start min, we need to remove the last delta.
1849 const int64_t usage_at_start_min = CapSub(usage, first_prof_delta.delta);
1850 if (usage_at_start_min > residual_capacity) {
1851 new_start_min = profile_unique_time_[profile_index].time;
1852 }
1853 }
1854
1855 // Influence of current task
1856 const int64_t start_max = interval->StartMax();
1857 const int64_t end_min = interval->EndMin();
1858 ProfileDelta delta_start(start_max, 0);
1859 ProfileDelta delta_end(end_min, 0);
1860 if (interval->MustBePerformed() && start_max < end_min) {
1861 delta_start.delta = +demand_min;
1862 delta_end.delta = -demand_min;
1863 }
1864 while (profile_unique_time_[profile_index].time <
1865 CapAdd(duration, new_start_min)) {
1866 const ProfileDelta& profile_delta = profile_unique_time_[profile_index];
1867 DCHECK(profile_index < profile_unique_time_.size());
1868 // Compensate for current task
1869 if (profile_delta.time == delta_start.time) {
1870 usage -= delta_start.delta;
1871 }
1872 if (profile_delta.time == delta_end.time) {
1873 usage -= delta_end.delta;
1874 }
1875 // Increment time
1876 ++profile_index;
1877 DCHECK(profile_index < profile_unique_time_.size());
1878 // Does it fit?
1879 if (usage > residual_capacity) {
1880 new_start_min = profile_unique_time_[profile_index].time;
1881 }
1882 usage += profile_unique_time_[profile_index].delta;
1883 }
1884 task->interval->SetStartMin(new_start_min);
1885 }
1886
1887 typedef std::vector<ProfileDelta> Profile;
1888
1889 Profile profile_unique_time_;
1890 Profile profile_non_unique_time_;
1891 std::vector<Task*> by_start_min_;
1892 IntVar* const capacity_;
1893
1894 DISALLOW_COPY_AND_ASSIGN(CumulativeTimeTable);
1895};
1896
1897// Cumulative idempotent Time-Table.
1898//
1899// This propagator is based on Letort et al. 2012 add Gay et al. 2015.
1900//
1901// TODO(user): fill the description once the incremental aspect are
1902// implemented.
1903//
1904// Worst case: O(n^2 log n) -- really unlikely in practice.
1905// Best case: Omega(1).
1906// Practical: Almost linear in the number of unfixed tasks.
1907template <class Task>
1908class TimeTableSync : public Constraint {
1909 public:
1910 TimeTableSync(Solver* const solver, const std::vector<Task*>& tasks,
1911 IntVar* const capacity)
1912 : Constraint(solver), tasks_(tasks), capacity_(capacity) {
1913 num_tasks_ = tasks_.size();
1914 gap_ = 0;
1915 prev_gap_ = 0;
1918 // Allocate vectors to contain no more than n_tasks.
1919 start_min_.reserve(num_tasks_);
1920 start_max_.reserve(num_tasks_);
1921 end_min_.reserve(num_tasks_);
1922 durations_.reserve(num_tasks_);
1923 demands_.reserve(num_tasks_);
1924 }
1925
1926 ~TimeTableSync() override { gtl::STLDeleteElements(&tasks_); }
1927
1928 void InitialPropagate() override {
1929 // Reset data structures.
1930 BuildEvents();
1931 while (!events_scp_.empty() && !events_ecp_.empty()) {
1932 // Move the sweep line.
1933 pos_ = NextEventTime();
1934 // Update the profile with compulsory part events.
1935 ProcessEventsScp();
1936 ProcessEventsEcp();
1937 // Update minimum capacity (may fail)
1938 capacity_->SetMin(capacity_->Max() - gap_);
1939 // Time to the next possible profile increase.
1940 next_pos_ = NextScpTime();
1941 // Consider new task to schedule.
1942 ProcessEventsPr();
1943 // Filter.
1944 FilterMin();
1945 }
1946 }
1947
1948 void Post() override {
1949 Demon* demon = MakeDelayedConstraintDemon0(
1950 solver(), this, &TimeTableSync::InitialPropagate, "InitialPropagate");
1951 for (Task* const task : tasks_) {
1952 task->WhenAnything(demon);
1953 }
1954 capacity_->WhenRange(demon);
1955 }
1956
1957 void Accept(ModelVisitor* const visitor) const override {
1958 LOG(FATAL) << "Should not be visited";
1959 }
1960
1961 std::string DebugString() const override { return "TimeTableSync"; }
1962
1963 private:
1964 // Task state.
1965 enum State { NONE, READY, CHECK, CONFLICT };
1966
1967 inline int64_t NextScpTime() {
1968 return !events_scp_.empty() ? events_scp_.top().first
1970 }
1971
1972 inline int64_t NextEventTime() {
1974 if (!events_pr_.empty()) {
1975 time = events_pr_.top().first;
1976 }
1977 if (!events_scp_.empty()) {
1978 int64_t t = events_scp_.top().first;
1979 time = t < time ? t : time;
1980 }
1981 if (!events_ecp_.empty()) {
1982 int64_t t = events_ecp_.top().first;
1983 time = t < time ? t : time;
1984 }
1985 return time;
1986 }
1987
1988 void ProcessEventsScp() {
1989 while (!events_scp_.empty() && events_scp_.top().first == pos_) {
1990 const int64_t task_id = events_scp_.top().second;
1991 events_scp_.pop();
1992 const int64_t old_end_min = end_min_[task_id];
1993 if (states_[task_id] == State::CONFLICT) {
1994 // Update cached values.
1995 const int64_t new_end_min = pos_ + durations_[task_id];
1996 start_min_[task_id] = pos_;
1997 end_min_[task_id] = new_end_min;
1998 // Filter the domain
1999 tasks_[task_id]->interval->SetStartMin(pos_);
2000 }
2001 // The task is scheduled.
2002 states_[task_id] = State::READY;
2003 // Update the profile if the task has a compulsory part.
2004 if (pos_ < end_min_[task_id]) {
2005 gap_ -= demands_[task_id];
2006 if (old_end_min <= pos_) {
2007 events_ecp_.push(kv(end_min_[task_id], task_id));
2008 }
2009 }
2010 }
2011 }
2012
2013 void ProcessEventsEcp() {
2014 while (!events_ecp_.empty() && events_ecp_.top().first == pos_) {
2015 const int64_t task_id = events_ecp_.top().second;
2016 events_ecp_.pop();
2017 // Update the event if it is not up to date.
2018 if (pos_ < end_min_[task_id]) {
2019 events_ecp_.push(kv(end_min_[task_id], task_id));
2020 } else {
2021 gap_ += demands_[task_id];
2022 }
2023 }
2024 }
2025
2026 void ProcessEventsPr() {
2027 while (!events_pr_.empty() && events_pr_.top().first == pos_) {
2028 const int64_t task_id = events_pr_.top().second;
2029 events_pr_.pop();
2030 // The task is in conflict with the current profile.
2031 if (demands_[task_id] > gap_) {
2032 states_[task_id] = State::CONFLICT;
2033 conflict_.push(kv(demands_[task_id], task_id));
2034 continue;
2035 }
2036 // The task is not in conflict for the moment.
2037 if (next_pos_ < end_min_[task_id]) {
2038 states_[task_id] = State::CHECK;
2039 check_.push(kv(demands_[task_id], task_id));
2040 continue;
2041 }
2042 // The task is not in conflict and can be scheduled.
2043 states_[task_id] = State::READY;
2044 }
2045 }
2046
2047 void FilterMin() {
2048 // The profile exceeds the capacity.
2049 capacity_->SetMin(capacity_->Max() - gap_);
2050 // The profile has increased.
2051 if (gap_ < prev_gap_) {
2052 // Reconsider the task in check state.
2053 while (!check_.empty() && demands_[check_.top().second] > gap_) {
2054 const int64_t task_id = check_.top().second;
2055 check_.pop();
2056 if (states_[task_id] == State::CHECK && pos_ < end_min_[task_id]) {
2057 states_[task_id] = State::CONFLICT;
2058 conflict_.push(kv(demands_[task_id], task_id));
2059 continue;
2060 }
2061 states_[task_id] = State::READY;
2062 }
2063 prev_gap_ = gap_;
2064 }
2065 // The profile has decreased.
2066 if (gap_ > prev_gap_) {
2067 // Reconsider the tasks in conflict.
2068 while (!conflict_.empty() && demands_[conflict_.top().second] <= gap_) {
2069 const int64_t task_id = conflict_.top().second;
2070 conflict_.pop();
2071 if (states_[task_id] != State::CONFLICT) {
2072 continue;
2073 }
2074 const int64_t old_end_min = end_min_[task_id];
2075 // Update the cache.
2076 start_min_[task_id] = pos_;
2077 end_min_[task_id] = pos_ + durations_[task_id];
2078 // Filter the domain.
2079 tasks_[task_id]->interval->SetStartMin(pos_); // should not fail.
2080 // The task still have to be checked.
2081 if (next_pos_ < end_min_[task_id]) {
2082 states_[task_id] = State::CHECK;
2083 check_.push(kv(demands_[task_id], task_id));
2084 } else {
2085 states_[task_id] = State::READY;
2086 }
2087 // Update possible compulsory part.
2088 const int64_t start_max = start_max_[task_id];
2089 if (start_max >= old_end_min && start_max < end_min_[task_id]) {
2090 events_ecp_.push(kv(end_min_[task_id], task_id));
2091 }
2092 }
2093 }
2094 prev_gap_ = gap_;
2095 }
2096
2097 void BuildEvents() {
2098 // Reset the sweep line.
2101 gap_ = capacity_->Max();
2102 prev_gap_ = capacity_->Max();
2103 // Reset dynamic states.
2104 conflict_ = min_heap();
2105 check_ = max_heap();
2106 // Reset profile events.
2107 events_pr_ = min_heap();
2108 events_scp_ = min_heap();
2109 events_ecp_ = min_heap();
2110 // Reset cache.
2111 start_min_.clear();
2112 start_max_.clear();
2113 end_min_.clear();
2114 durations_.clear();
2115 demands_.clear();
2116 states_.clear();
2117 // Build events.
2118 for (int i = 0; i < num_tasks_; i++) {
2119 const int64_t s_min = tasks_[i]->interval->StartMin();
2120 const int64_t s_max = tasks_[i]->interval->StartMax();
2121 const int64_t e_min = tasks_[i]->interval->EndMin();
2122 // Cache the values.
2123 start_min_.push_back(s_min);
2124 start_max_.push_back(s_max);
2125 end_min_.push_back(e_min);
2126 durations_.push_back(tasks_[i]->interval->DurationMin());
2127 demands_.push_back(tasks_[i]->DemandMin());
2128 // Reset task state.
2129 states_.push_back(State::NONE);
2130 // Start compulsory part event.
2131 events_scp_.push(kv(s_max, i));
2132 // Pruning event only if the start time of the task is not fixed.
2133 if (s_min != s_max) {
2134 events_pr_.push(kv(s_min, i));
2135 }
2136 // End of compulsory part only if the task has a compulsory part.
2137 if (s_max < e_min) {
2138 events_ecp_.push(kv(e_min, i));
2139 }
2140 }
2141 }
2142
2143 int64_t num_tasks_;
2144 std::vector<Task*> tasks_;
2145 IntVar* const capacity_;
2146
2147 std::vector<int64_t> start_min_;
2148 std::vector<int64_t> start_max_;
2149 std::vector<int64_t> end_min_;
2150 std::vector<int64_t> end_max_;
2151 std::vector<int64_t> durations_;
2152 std::vector<int64_t> demands_;
2153
2154 // Pair key value.
2155 typedef std::pair<int64_t, int64_t> kv;
2156 typedef std::priority_queue<kv, std::vector<kv>, std::greater<kv>> min_heap;
2157 typedef std::priority_queue<kv, std::vector<kv>, std::less<kv>> max_heap;
2158
2159 // Profile events.
2160 min_heap events_pr_;
2161 min_heap events_scp_;
2162 min_heap events_ecp_;
2163
2164 // Task state.
2165 std::vector<State> states_;
2166 min_heap conflict_;
2167 max_heap check_;
2168
2169 // Sweep line state.
2170 int64_t pos_;
2171 int64_t next_pos_;
2172 int64_t gap_;
2173 int64_t prev_gap_;
2174};
2175
2176class CumulativeConstraint : public Constraint {
2177 public:
2178 CumulativeConstraint(Solver* const s,
2179 const std::vector<IntervalVar*>& intervals,
2180 const std::vector<int64_t>& demands,
2181 IntVar* const capacity, const std::string& name)
2182 : Constraint(s),
2183 capacity_(capacity),
2184 intervals_(intervals),
2185 demands_(demands) {
2186 tasks_.reserve(intervals.size());
2187 for (int i = 0; i < intervals.size(); ++i) {
2188 tasks_.push_back(CumulativeTask(intervals[i], demands[i]));
2189 }
2190 }
2191
2192 void Post() override {
2193 // For the cumulative constraint, there are many propagators, and they
2194 // don't dominate each other. So the strongest propagation is obtained
2195 // by posting a bunch of different propagators.
2196 const ConstraintSolverParameters& params = solver()->parameters();
2197 if (params.use_cumulative_time_table()) {
2198 if (params.use_cumulative_time_table_sync()) {
2199 PostOneSidedConstraint(false, false, true);
2200 PostOneSidedConstraint(true, false, true);
2201 } else {
2202 PostOneSidedConstraint(false, false, false);
2203 PostOneSidedConstraint(true, false, false);
2204 }
2205 }
2206 if (params.use_cumulative_edge_finder()) {
2207 PostOneSidedConstraint(false, true, false);
2208 PostOneSidedConstraint(true, true, false);
2209 }
2210 if (params.use_sequence_high_demand_tasks()) {
2211 PostHighDemandSequenceConstraint();
2212 }
2213 if (params.use_all_possible_disjunctions()) {
2214 PostAllDisjunctions();
2215 }
2216 }
2217
2218 void InitialPropagate() override {
2219 // Nothing to do: this constraint delegates all the work to other classes
2220 }
2221
2222 void Accept(ModelVisitor* const visitor) const override {
2223 // TODO(user): Build arrays on demand?
2224 visitor->BeginVisitConstraint(ModelVisitor::kCumulative, this);
2225 visitor->VisitIntervalArrayArgument(ModelVisitor::kIntervalsArgument,
2226 intervals_);
2227 visitor->VisitIntegerArrayArgument(ModelVisitor::kDemandsArgument,
2228 demands_);
2229 visitor->VisitIntegerExpressionArgument(ModelVisitor::kCapacityArgument,
2230 capacity_);
2231 visitor->EndVisitConstraint(ModelVisitor::kCumulative, this);
2232 }
2233
2234 std::string DebugString() const override {
2235 return absl::StrFormat("CumulativeConstraint([%s], %s)",
2236 JoinDebugString(tasks_, ", "),
2237 capacity_->DebugString());
2238 }
2239
2240 private:
2241 // Post temporal disjunctions for tasks that cannot overlap.
2242 void PostAllDisjunctions() {
2243 for (int i = 0; i < intervals_.size(); ++i) {
2244 IntervalVar* const interval_i = intervals_[i];
2245 if (interval_i->MayBePerformed()) {
2246 for (int j = i + 1; j < intervals_.size(); ++j) {
2247 IntervalVar* const interval_j = intervals_[j];
2248 if (interval_j->MayBePerformed()) {
2249 if (CapAdd(tasks_[i].demand, tasks_[j].demand) > capacity_->Max()) {
2250 Constraint* const constraint =
2251 solver()->MakeTemporalDisjunction(interval_i, interval_j);
2252 solver()->AddConstraint(constraint);
2253 }
2254 }
2255 }
2256 }
2257 }
2258 }
2259
2260 // Post a Sequence constraint for tasks that requires strictly more than half
2261 // of the resource
2262 void PostHighDemandSequenceConstraint() {
2263 Constraint* constraint = nullptr;
2264 { // Need a block to avoid memory leaks in case the AddConstraint fails
2265 std::vector<IntervalVar*> high_demand_intervals;
2266 high_demand_intervals.reserve(intervals_.size());
2267 for (int i = 0; i < demands_.size(); ++i) {
2268 const int64_t demand = tasks_[i].demand;
2269 // Consider two tasks with demand d1 and d2 such that
2270 // d1 * 2 > capacity_ and d2 * 2 > capacity_.
2271 // Then d1 + d2 = 1/2 (d1 * 2 + d2 * 2)
2272 // > 1/2 (capacity_ + capacity_)
2273 // > capacity_.
2274 // Therefore these two tasks cannot overlap.
2275 if (demand * 2 > capacity_->Max() &&
2276 tasks_[i].interval->MayBePerformed()) {
2277 high_demand_intervals.push_back(tasks_[i].interval);
2278 }
2279 }
2280 if (high_demand_intervals.size() >= 2) {
2281 // If there are less than 2 such intervals, the constraint would do
2282 // nothing
2283 std::string seq_name = absl::StrCat(name(), "-HighDemandSequence");
2284 constraint = solver()->MakeDisjunctiveConstraint(high_demand_intervals,
2285 seq_name);
2286 }
2287 }
2288 if (constraint != nullptr) {
2289 solver()->AddConstraint(constraint);
2290 }
2291 }
2292
2293 // Populate the given vector with useful tasks, meaning the ones on which
2294 // some propagation can be done
2295 void PopulateVectorUsefulTasks(
2296 bool mirror, std::vector<CumulativeTask*>* const useful_tasks) {
2297 DCHECK(useful_tasks->empty());
2298 for (int i = 0; i < tasks_.size(); ++i) {
2299 const CumulativeTask& original_task = tasks_[i];
2300 IntervalVar* const interval = original_task.interval;
2301 // Check if exceed capacity
2302 if (original_task.demand > capacity_->Max()) {
2303 interval->SetPerformed(false);
2304 }
2305 // Add to the useful_task vector if it may be performed and that it
2306 // actually consumes some of the resource.
2307 if (interval->MayBePerformed() && original_task.demand > 0) {
2308 Solver* const s = solver();
2309 IntervalVar* const original_interval = original_task.interval;
2310 IntervalVar* const interval =
2311 mirror ? s->MakeMirrorInterval(original_interval)
2312 : original_interval;
2313 IntervalVar* const relaxed_max = s->MakeIntervalRelaxedMax(interval);
2314 useful_tasks->push_back(
2315 new CumulativeTask(relaxed_max, original_task.demand));
2316 }
2317 }
2318 }
2319
2320 // Makes and return an edge-finder or a time table, or nullptr if it is not
2321 // necessary.
2322 Constraint* MakeOneSidedConstraint(bool mirror, bool edge_finder,
2323 bool tt_sync) {
2324 std::vector<CumulativeTask*> useful_tasks;
2325 PopulateVectorUsefulTasks(mirror, &useful_tasks);
2326 if (useful_tasks.empty()) {
2327 return nullptr;
2328 } else {
2329 Solver* const s = solver();
2330 if (edge_finder) {
2331 const ConstraintSolverParameters& params = solver()->parameters();
2332 return useful_tasks.size() < params.max_edge_finder_size()
2333 ? s->RevAlloc(new EdgeFinder<CumulativeTask>(s, useful_tasks,
2334 capacity_))
2335 : nullptr;
2336 }
2337 if (tt_sync) {
2338 return s->RevAlloc(
2339 new TimeTableSync<CumulativeTask>(s, useful_tasks, capacity_));
2340 }
2341 return s->RevAlloc(
2342 new CumulativeTimeTable<CumulativeTask>(s, useful_tasks, capacity_));
2343 }
2344 }
2345
2346 // Post a straight or mirrored edge-finder, if needed
2347 void PostOneSidedConstraint(bool mirror, bool edge_finder, bool tt_sync) {
2348 Constraint* const constraint =
2349 MakeOneSidedConstraint(mirror, edge_finder, tt_sync);
2350 if (constraint != nullptr) {
2351 solver()->AddConstraint(constraint);
2352 }
2353 }
2354
2355 // Capacity of the cumulative resource
2356 IntVar* const capacity_;
2357
2358 // The tasks that share the cumulative resource
2359 std::vector<CumulativeTask> tasks_;
2360
2361 // Array of intervals for the visitor.
2362 const std::vector<IntervalVar*> intervals_;
2363 // Array of demands for the visitor.
2364 const std::vector<int64_t> demands_;
2365
2366 DISALLOW_COPY_AND_ASSIGN(CumulativeConstraint);
2367};
2368
2369class VariableDemandCumulativeConstraint : public Constraint {
2370 public:
2371 VariableDemandCumulativeConstraint(Solver* const s,
2372 const std::vector<IntervalVar*>& intervals,
2373 const std::vector<IntVar*>& demands,
2374 IntVar* const capacity,
2375 const std::string& name)
2376 : Constraint(s),
2377 capacity_(capacity),
2378 intervals_(intervals),
2379 demands_(demands) {
2380 tasks_.reserve(intervals.size());
2381 for (int i = 0; i < intervals.size(); ++i) {
2382 tasks_.push_back(VariableCumulativeTask(intervals[i], demands[i]));
2383 }
2384 }
2385
2386 void Post() override {
2387 // For the cumulative constraint, there are many propagators, and they
2388 // don't dominate each other. So the strongest propagation is obtained
2389 // by posting a bunch of different propagators.
2390 const ConstraintSolverParameters& params = solver()->parameters();
2391 if (params.use_cumulative_time_table()) {
2392 PostOneSidedConstraint(false, false, false);
2393 PostOneSidedConstraint(true, false, false);
2394 }
2395 if (params.use_cumulative_edge_finder()) {
2396 PostOneSidedConstraint(false, true, false);
2397 PostOneSidedConstraint(true, true, false);
2398 }
2399 if (params.use_sequence_high_demand_tasks()) {
2400 PostHighDemandSequenceConstraint();
2401 }
2402 if (params.use_all_possible_disjunctions()) {
2403 PostAllDisjunctions();
2404 }
2405 }
2406
2407 void InitialPropagate() override {
2408 // Nothing to do: this constraint delegates all the work to other classes
2409 }
2410
2411 void Accept(ModelVisitor* const visitor) const override {
2412 // TODO(user): Build arrays on demand?
2413 visitor->BeginVisitConstraint(ModelVisitor::kCumulative, this);
2414 visitor->VisitIntervalArrayArgument(ModelVisitor::kIntervalsArgument,
2415 intervals_);
2416 visitor->VisitIntegerVariableArrayArgument(ModelVisitor::kDemandsArgument,
2417 demands_);
2418 visitor->VisitIntegerExpressionArgument(ModelVisitor::kCapacityArgument,
2419 capacity_);
2420 visitor->EndVisitConstraint(ModelVisitor::kCumulative, this);
2421 }
2422
2423 std::string DebugString() const override {
2424 return absl::StrFormat("VariableDemandCumulativeConstraint([%s], %s)",
2425 JoinDebugString(tasks_, ", "),
2426 capacity_->DebugString());
2427 }
2428
2429 private:
2430 // Post temporal disjunctions for tasks that cannot overlap.
2431 void PostAllDisjunctions() {
2432 for (int i = 0; i < intervals_.size(); ++i) {
2433 IntervalVar* const interval_i = intervals_[i];
2434 if (interval_i->MayBePerformed()) {
2435 for (int j = i + 1; j < intervals_.size(); ++j) {
2436 IntervalVar* const interval_j = intervals_[j];
2437 if (interval_j->MayBePerformed()) {
2438 if (CapAdd(tasks_[i].demand->Min(), tasks_[j].demand->Min()) >
2439 capacity_->Max()) {
2440 Constraint* const constraint =
2441 solver()->MakeTemporalDisjunction(interval_i, interval_j);
2442 solver()->AddConstraint(constraint);
2443 }
2444 }
2445 }
2446 }
2447 }
2448 }
2449
2450 // Post a Sequence constraint for tasks that requires strictly more than half
2451 // of the resource
2452 void PostHighDemandSequenceConstraint() {
2453 Constraint* constraint = nullptr;
2454 { // Need a block to avoid memory leaks in case the AddConstraint fails
2455 std::vector<IntervalVar*> high_demand_intervals;
2456 high_demand_intervals.reserve(intervals_.size());
2457 for (int i = 0; i < demands_.size(); ++i) {
2458 const int64_t demand = tasks_[i].demand->Min();
2459 // Consider two tasks with demand d1 and d2 such that
2460 // d1 * 2 > capacity_ and d2 * 2 > capacity_.
2461 // Then d1 + d2 = 1/2 (d1 * 2 + d2 * 2)
2462 // > 1/2 (capacity_ + capacity_)
2463 // > capacity_.
2464 // Therefore these two tasks cannot overlap.
2465 if (demand * 2 > capacity_->Max() &&
2466 tasks_[i].interval->MayBePerformed()) {
2467 high_demand_intervals.push_back(tasks_[i].interval);
2468 }
2469 }
2470 if (high_demand_intervals.size() >= 2) {
2471 // If there are less than 2 such intervals, the constraint would do
2472 // nothing
2473 const std::string seq_name =
2474 absl::StrCat(name(), "-HighDemandSequence");
2475 constraint = solver()->MakeStrictDisjunctiveConstraint(
2476 high_demand_intervals, seq_name);
2477 }
2478 }
2479 if (constraint != nullptr) {
2480 solver()->AddConstraint(constraint);
2481 }
2482 }
2483
2484 // Populates the given vector with useful tasks, meaning the ones on which
2485 // some propagation can be done
2486 void PopulateVectorUsefulTasks(
2487 bool mirror, std::vector<VariableCumulativeTask*>* const useful_tasks) {
2488 DCHECK(useful_tasks->empty());
2489 for (int i = 0; i < tasks_.size(); ++i) {
2490 const VariableCumulativeTask& original_task = tasks_[i];
2491 IntervalVar* const interval = original_task.interval;
2492 // Check if exceed capacity
2493 if (original_task.demand->Min() > capacity_->Max()) {
2494 interval->SetPerformed(false);
2495 }
2496 // Add to the useful_task vector if it may be performed and that it
2497 // may actually consume some of the resource.
2498 if (interval->MayBePerformed() && original_task.demand->Max() > 0) {
2499 Solver* const s = solver();
2500 IntervalVar* const original_interval = original_task.interval;
2501 IntervalVar* const interval =
2502 mirror ? s->MakeMirrorInterval(original_interval)
2503 : original_interval;
2504 IntervalVar* const relaxed_max = s->MakeIntervalRelaxedMax(interval);
2505 useful_tasks->push_back(
2506 new VariableCumulativeTask(relaxed_max, original_task.demand));
2507 }
2508 }
2509 }
2510
2511 // Makes and returns an edge-finder or a time table, or nullptr if it is not
2512 // necessary.
2513 Constraint* MakeOneSidedConstraint(bool mirror, bool edge_finder,
2514 bool tt_sync) {
2515 std::vector<VariableCumulativeTask*> useful_tasks;
2516 PopulateVectorUsefulTasks(mirror, &useful_tasks);
2517 if (useful_tasks.empty()) {
2518 return nullptr;
2519 } else {
2520 Solver* const s = solver();
2521 if (edge_finder) {
2522 return s->RevAlloc(
2523 new EdgeFinder<VariableCumulativeTask>(s, useful_tasks, capacity_));
2524 }
2525 if (tt_sync) {
2526 return s->RevAlloc(new TimeTableSync<VariableCumulativeTask>(
2527 s, useful_tasks, capacity_));
2528 }
2529 return s->RevAlloc(new CumulativeTimeTable<VariableCumulativeTask>(
2530 s, useful_tasks, capacity_));
2531 }
2532 }
2533
2534 // Post a straight or mirrored edge-finder, if needed
2535 void PostOneSidedConstraint(bool mirror, bool edge_finder, bool tt_sync) {
2536 Constraint* const constraint =
2537 MakeOneSidedConstraint(mirror, edge_finder, tt_sync);
2538 if (constraint != nullptr) {
2539 solver()->AddConstraint(constraint);
2540 }
2541 }
2542
2543 // Capacity of the cumulative resource
2544 IntVar* const capacity_;
2545
2546 // The tasks that share the cumulative resource
2547 std::vector<VariableCumulativeTask> tasks_;
2548
2549 // Array of intervals for the visitor.
2550 const std::vector<IntervalVar*> intervals_;
2551 // Array of demands for the visitor.
2552 const std::vector<IntVar*> demands_;
2553
2554 DISALLOW_COPY_AND_ASSIGN(VariableDemandCumulativeConstraint);
2555};
2556} // namespace
2557
2558// Sequence Constraint
2559
2560// ----- Public class -----
2561
2562DisjunctiveConstraint::DisjunctiveConstraint(
2563 Solver* const s, const std::vector<IntervalVar*>& intervals,
2564 const std::string& name)
2565 : Constraint(s), intervals_(intervals) {
2566 if (!name.empty()) {
2567 set_name(name);
2568 }
2569 transition_time_ = [](int64_t x, int64_t y) { return 0; };
2570}
2571
2573
2575 std::function<int64_t(int64_t, int64_t)> transition_time) {
2576 if (transition_time != nullptr) {
2577 transition_time_ = transition_time;
2578 } else {
2579 transition_time_ = [](int64_t x, int64_t y) { return 0; };
2580 }
2581}
2582
2583// ---------- Factory methods ----------
2584
2586 const std::vector<IntervalVar*>& intervals, const std::string& name) {
2587 return RevAlloc(new FullDisjunctiveConstraint(this, intervals, name, false));
2588}
2589
2591 const std::vector<IntervalVar*>& intervals, const std::string& name) {
2592 return RevAlloc(new FullDisjunctiveConstraint(this, intervals, name, true));
2593}
2594
2595// Demands are constant
2596
2597Constraint* Solver::MakeCumulative(const std::vector<IntervalVar*>& intervals,
2598 const std::vector<int64_t>& demands,
2599 int64_t capacity, const std::string& name) {
2600 CHECK_EQ(intervals.size(), demands.size());
2601 for (int i = 0; i < intervals.size(); ++i) {
2602 CHECK_GE(demands[i], 0);
2603 }
2604 if (capacity == 1 && AreAllOnes(demands)) {
2605 return MakeDisjunctiveConstraint(intervals, name);
2606 }
2607 return RevAlloc(new CumulativeConstraint(this, intervals, demands,
2609}
2610
2611Constraint* Solver::MakeCumulative(const std::vector<IntervalVar*>& intervals,
2612 const std::vector<int>& demands,
2613 int64_t capacity, const std::string& name) {
2614 return MakeCumulative(intervals, ToInt64Vector(demands), capacity, name);
2615}
2616
2617Constraint* Solver::MakeCumulative(const std::vector<IntervalVar*>& intervals,
2618 const std::vector<int64_t>& demands,
2619 IntVar* const capacity,
2620 const std::string& name) {
2621 CHECK_EQ(intervals.size(), demands.size());
2622 for (int i = 0; i < intervals.size(); ++i) {
2623 CHECK_GE(demands[i], 0);
2624 }
2625 return RevAlloc(
2626 new CumulativeConstraint(this, intervals, demands, capacity, name));
2627}
2628
2629Constraint* Solver::MakeCumulative(const std::vector<IntervalVar*>& intervals,
2630 const std::vector<int>& demands,
2631 IntVar* const capacity,
2632 const std::string& name) {
2633 return MakeCumulative(intervals, ToInt64Vector(demands), capacity, name);
2634}
2635
2636// Demands are variable
2637
2638Constraint* Solver::MakeCumulative(const std::vector<IntervalVar*>& intervals,
2639 const std::vector<IntVar*>& demands,
2640 int64_t capacity, const std::string& name) {
2641 CHECK_EQ(intervals.size(), demands.size());
2642 for (int i = 0; i < intervals.size(); ++i) {
2643 CHECK_GE(demands[i]->Min(), 0);
2644 }
2645 if (AreAllBound(demands)) {
2646 std::vector<int64_t> fixed_demands(demands.size());
2647 for (int i = 0; i < demands.size(); ++i) {
2648 fixed_demands[i] = demands[i]->Value();
2649 }
2650 return MakeCumulative(intervals, fixed_demands, capacity, name);
2651 }
2652 return RevAlloc(new VariableDemandCumulativeConstraint(
2653 this, intervals, demands, MakeIntConst(capacity), name));
2654}
2655
2656Constraint* Solver::MakeCumulative(const std::vector<IntervalVar*>& intervals,
2657 const std::vector<IntVar*>& demands,
2658 IntVar* const capacity,
2659 const std::string& name) {
2660 CHECK_EQ(intervals.size(), demands.size());
2661 for (int i = 0; i < intervals.size(); ++i) {
2662 CHECK_GE(demands[i]->Min(), 0);
2663 }
2664 if (AreAllBound(demands)) {
2665 std::vector<int64_t> fixed_demands(demands.size());
2666 for (int i = 0; i < demands.size(); ++i) {
2667 fixed_demands[i] = demands[i]->Value();
2668 }
2669 return MakeCumulative(intervals, fixed_demands, capacity, name);
2670 }
2671 return RevAlloc(new VariableDemandCumulativeConstraint(
2672 this, intervals, demands, capacity, name));
2673}
2674} // namespace operations_research
int64_t max
Definition: alldiff_cst.cc:140
int64_t min
Definition: alldiff_cst.cc:139
int right_child
#define CHECK(condition)
Definition: base/logging.h:495
#define DCHECK_LE(val1, val2)
Definition: base/logging.h:893
#define DCHECK_NE(val1, val2)
Definition: base/logging.h:892
#define CHECK_EQ(val1, val2)
Definition: base/logging.h:703
#define CHECK_GE(val1, val2)
Definition: base/logging.h:707
#define DCHECK_GE(val1, val2)
Definition: base/logging.h:895
#define DCHECK_GT(val1, val2)
Definition: base/logging.h:896
#define DCHECK_LT(val1, val2)
Definition: base/logging.h:894
#define LOG(severity)
Definition: base/logging.h:420
#define DCHECK(condition)
Definition: base/logging.h:890
#define DCHECK_EQ(val1, val2)
Definition: base/logging.h:891
#define VLOG(verboselevel)
Definition: base/logging.h:984
A constraint is the main modeling object.
void SetTransitionTime(Solver::IndexEvaluator2 transition_time)
Add a transition time between intervals.
Definition: resource.cc:2574
The class IntVar is a subset of IntExpr.
static IntegralType CeilOfRatio(IntegralType numerator, IntegralType denominator)
Definition: mathutil.h:39
virtual std::string name() const
Object naming.
DisjunctiveConstraint * MakeStrictDisjunctiveConstraint(const std::vector< IntervalVar * > &intervals, const std::string &name)
This constraint forces all interval vars into an non-overlapping sequence.
Definition: resource.cc:2590
DisjunctiveConstraint * MakeDisjunctiveConstraint(const std::vector< IntervalVar * > &intervals, const std::string &name)
This constraint forces all interval vars into an non-overlapping sequence.
Definition: resource.cc:2585
Constraint * MakeCumulative(const std::vector< IntervalVar * > &intervals, const std::vector< int64_t > &demands, int64_t capacity, const std::string &name)
This constraint forces that, for any integer t, the sum of the demands corresponding to an interval c...
Definition: resource.cc:2597
IntVar * MakeIntConst(int64_t val, const std::string &name)
IntConst will create a constant expression.
T * RevAlloc(T *object)
Registers the given object as being reversible.
const std::string name
IntVar * var
Definition: expr_array.cc:1874
const int FATAL
Definition: log_severity.h:32
#define DISALLOW_COPY_AND_ASSIGN(TypeName)
Definition: macros.h:29
const Collection::value_type::second_type FindPtrOrNull(const Collection &collection, const typename Collection::value_type::first_type &key)
Definition: map_util.h:89
void STLDeleteValues(T *v)
Definition: stl_util.h:382
void STLDeleteElements(T *container)
Definition: stl_util.h:372
double Distance(const VectorXd &vector1, const VectorXd &vector2, const Sharder &sharder)
Definition: sharder.cc:231
Collection of objects used to extend the Constraint Solver library.
int64_t CapAdd(int64_t x, int64_t y)
int64_t CapSub(int64_t x, int64_t y)
Demon * MakeDelayedConstraintDemon0(Solver *const s, T *const ct, void(T::*method)(), const std::string &name)
std::string JoinDebugStringPtr(const std::vector< T > &v, const std::string &separator)
Definition: string_array.h:45
int64_t CapProd(int64_t x, int64_t y)
std::vector< int64_t > ToInt64Vector(const std::vector< int > &input)
Definition: utilities.cc:828
bool AreAllOnes(const std::vector< T > &values)
bool AreAllBound(const std::vector< IntVar * > &vars)
std::string JoinDebugString(const std::vector< T > &v, const std::string &separator)
Definition: string_array.h:38
STL namespace.
static const int kNone
Definition: resource.cc:235
int64_t demand
Definition: resource.cc:125
int64_t residual_energetic_end_min
Definition: resource.cc:1245
int argmax_energy_opt
Definition: resource.cc:364
int64_t energetic_end_min
Definition: resource.cc:357
int64_t energy
Definition: resource.cc:354
int64_t energetic_end_min_opt
Definition: resource.cc:368
int64_t total_processing
Definition: resource.cc:199
int index
Definition: resource.cc:101
int64_t total_ect
Definition: resource.cc:200
static const int64_t kNotInitialized
Definition: resource.cc:1254
int64_t energy_opt
Definition: resource.cc:360
int argmax_energetic_end_min_opt
Definition: resource.cc:372
static const int64_t kNotAvailable
Definition: resource.cc:1301
int64_t time
Definition: resource.cc:1693
int64_t delta
Definition: resource.cc:1694
IntervalVar * interval
Definition: resource.cc:100
int64_t capacity
Rev< int64_t > start_max
Rev< int > performed
Rev< int64_t > end_min
int64_t start