OR-Tools  9.3
cumulative.cc
Go to the documentation of this file.
1// Copyright 2010-2021 Google LLC
2// Licensed under the Apache License, Version 2.0 (the "License");
3// you may not use this file except in compliance with the License.
4// You may obtain a copy of the License at
5//
6// http://www.apache.org/licenses/LICENSE-2.0
7//
8// Unless required by applicable law or agreed to in writing, software
9// distributed under the License is distributed on an "AS IS" BASIS,
10// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11// See the License for the specific language governing permissions and
12// limitations under the License.
13
15
16#include <algorithm>
17#include <functional>
18#include <vector>
19
23#include "ortools/sat/integer.h"
27#include "ortools/sat/model.h"
31#include "ortools/sat/sat_parameters.pb.h"
36
37namespace operations_research {
38namespace sat {
39
40std::function<void(Model*)> Cumulative(
41 const std::vector<IntervalVariable>& vars,
42 const std::vector<AffineExpression>& demands, AffineExpression capacity,
44 return [=](Model* model) mutable {
45 if (vars.empty()) return;
46
47 auto* intervals = model->GetOrCreate<IntervalsRepository>();
48 auto* encoder = model->GetOrCreate<IntegerEncoder>();
49 auto* integer_trail = model->GetOrCreate<IntegerTrail>();
50 auto* watcher = model->GetOrCreate<GenericLiteralWatcher>();
51
52 // Redundant constraints to ensure that the resource capacity is high enough
53 // for each task. Also ensure that no task consumes more resource than what
54 // is available. This is useful because the subsequent propagators do not
55 // filter the capacity variable very well.
56 for (int i = 0; i < demands.size(); ++i) {
57 if (intervals->MaxSize(vars[i]) == 0) continue;
58
59 LinearConstraintBuilder builder(model, kMinIntegerValue, IntegerValue(0));
60 builder.AddTerm(demands[i], IntegerValue(1));
61 builder.AddTerm(capacity, IntegerValue(-1));
62 LinearConstraint ct = builder.Build();
63
64 std::vector<Literal> enforcement_literals;
65 if (intervals->IsOptional(vars[i])) {
66 enforcement_literals.push_back(intervals->PresenceLiteral(vars[i]));
67 }
68
69 // If the interval can be of size zero, it currently do not count towards
70 // the capacity. TODO(user): Change that since we have optional interval
71 // for this.
72 if (intervals->MinSize(vars[i]) == 0) {
73 enforcement_literals.push_back(encoder->GetOrCreateAssociatedLiteral(
74 intervals->Size(vars[i]).GreaterOrEqual(IntegerValue(1))));
75 }
76
77 if (enforcement_literals.empty()) {
79 } else {
80 LoadConditionalLinearConstraint(enforcement_literals, ct, model);
81 }
82 }
83
84 if (vars.size() == 1) return;
85
86 const SatParameters& parameters = *(model->GetOrCreate<SatParameters>());
87
88 // Detect a subset of intervals that needs to be in disjunction and add a
89 // Disjunctive() constraint over them.
90 if (parameters.use_disjunctive_constraint_in_cumulative_constraint()) {
91 // TODO(user): We need to exclude intervals that can be of size zero
92 // because the disjunctive do not "ignore" them like the cumulative
93 // does. That is, the interval [2,2) will be assumed to be in
94 // disjunction with [1, 3) for instance. We need to uniformize the
95 // handling of interval with size zero.
96 //
97 // TODO(user): improve the condition (see CL147454185).
98 std::vector<IntervalVariable> in_disjunction;
99 for (int i = 0; i < vars.size(); ++i) {
100 if (intervals->MinSize(vars[i]) > 0 &&
101 2 * integer_trail->LowerBound(demands[i]) >
102 integer_trail->UpperBound(capacity)) {
103 in_disjunction.push_back(vars[i]);
104 }
105 }
106
107 // Add a disjunctive constraint on the intervals in in_disjunction. Do not
108 // create the cumulative at all when all intervals must be in disjunction.
109 //
110 // TODO(user): Do proper experiments to see how beneficial this is, the
111 // disjunctive will propagate more but is also using slower algorithms.
112 // That said, this is more a question of optimizing the disjunctive
113 // propagation code.
114 //
115 // TODO(user): Another "known" idea is to detect pair of tasks that must
116 // be in disjunction and to create a Boolean to indicate which one is
117 // before the other. It shouldn't change the propagation, but may result
118 // in a faster one with smaller explanations, and the solver can also take
119 // decision on such Boolean.
120 //
121 // TODO(user): A better place for stuff like this could be in the
122 // presolver so that it is easier to disable and play with alternatives.
123 if (in_disjunction.size() > 1) model->Add(Disjunctive(in_disjunction));
124 if (in_disjunction.size() == vars.size()) return;
125 }
126
127 if (helper == nullptr) {
128 helper = new SchedulingConstraintHelper(vars, model);
129 model->TakeOwnership(helper);
130 }
131
132 // For each variables that is after a subset of task ends (i.e. like a
133 // makespan objective), we detect it and add a special constraint to
134 // propagate it.
135 //
136 // TODO(user): Models that include the makespan as a special interval might
137 // be better, but then not everyone does that. In particular this code
138 // allows to have decent lower bound on the large cumulative minizinc
139 // instances.
140 //
141 // TODO(user): this require the precedence constraints to be already loaded,
142 // and there is no guarantee of that currently. Find a more robust way.
143 //
144 // TODO(user): There is a bit of code duplication with the disjunctive
145 // precedence propagator. Abstract more?
146 {
147 std::vector<IntegerVariable> index_to_end_vars;
148 std::vector<int> index_to_task;
149 std::vector<PrecedencesPropagator::IntegerPrecedences> before;
150 index_to_end_vars.clear();
151 for (int t = 0; t < helper->NumTasks(); ++t) {
152 const AffineExpression& end_exp = helper->Ends()[t];
153
154 // TODO(user): Handle generic affine relation?
155 if (end_exp.var == kNoIntegerVariable || end_exp.coeff != 1) continue;
156 index_to_end_vars.push_back(end_exp.var);
157 index_to_task.push_back(t);
158 }
159 model->GetOrCreate<PrecedencesPropagator>()->ComputePrecedences(
160 index_to_end_vars, &before);
161 const int size = before.size();
162 for (int i = 0; i < size;) {
163 const IntegerVariable var = before[i].var;
165
166 IntegerValue min_offset = kMaxIntegerValue;
167 std::vector<int> subtasks;
168 for (; i < size && before[i].var == var; ++i) {
169 const int t = index_to_task[before[i].index];
170 subtasks.push_back(t);
171
172 // We have var >= end_exp.var + offset, so
173 // var >= (end_exp.var + end_exp.cte) + (offset - end_exp.cte)
174 // var >= task end + new_offset.
175 const AffineExpression& end_exp = helper->Ends()[t];
176 min_offset =
177 std::min(min_offset, before[i].offset - end_exp.constant);
178 }
179
180 if (subtasks.size() > 1) {
183 demands, subtasks,
184 integer_trail, helper);
185 constraint->RegisterWith(watcher);
186 model->TakeOwnership(constraint);
187 }
188 }
189 }
190
191 // Propagator responsible for applying Timetabling filtering rule. It
192 // increases the minimum of the start variables, decrease the maximum of the
193 // end variables, and increase the minimum of the capacity variable.
194 TimeTablingPerTask* time_tabling =
195 new TimeTablingPerTask(demands, capacity, integer_trail, helper);
196 time_tabling->RegisterWith(watcher);
197 model->TakeOwnership(time_tabling);
198
199 // Propagator responsible for applying the Overload Checking filtering rule.
200 // It increases the minimum of the capacity variable.
201 if (parameters.use_overload_checker_in_cumulative_constraint()) {
203 }
204
205 // Propagator responsible for applying the Timetable Edge finding filtering
206 // rule. It increases the minimum of the start variables and decreases the
207 // maximum of the end variables,
208 if (parameters.use_timetable_edge_finding_in_cumulative_constraint()) {
209 TimeTableEdgeFinding* time_table_edge_finding =
210 new TimeTableEdgeFinding(demands, capacity, helper, integer_trail);
211 time_table_edge_finding->RegisterWith(watcher);
212 model->TakeOwnership(time_table_edge_finding);
213 }
214 };
215}
216
217std::function<void(Model*)> CumulativeTimeDecomposition(
218 const std::vector<IntervalVariable>& vars,
219 const std::vector<AffineExpression>& demands, AffineExpression capacity,
221 return [=](Model* model) {
222 if (vars.empty()) return;
223
224 IntegerTrail* integer_trail = model->GetOrCreate<IntegerTrail>();
225 CHECK(integer_trail->IsFixed(capacity));
226 const Coefficient fixed_capacity(
227 integer_trail->UpperBound(capacity).value());
228
229 const int num_tasks = vars.size();
230 SatSolver* sat_solver = model->GetOrCreate<SatSolver>();
231 IntegerEncoder* encoder = model->GetOrCreate<IntegerEncoder>();
232 IntervalsRepository* intervals = model->GetOrCreate<IntervalsRepository>();
233
234 std::vector<IntegerVariable> start_vars;
235 std::vector<IntegerVariable> end_vars;
236 std::vector<IntegerValue> fixed_demands;
237
238 for (int t = 0; t < num_tasks; ++t) {
239 start_vars.push_back(intervals->StartVar(vars[t]));
240 end_vars.push_back(intervals->EndVar(vars[t]));
241 CHECK(integer_trail->IsFixed(demands[t]));
242 fixed_demands.push_back(integer_trail->LowerBound(demands[t]));
243 }
244
245 // Compute time range.
246 IntegerValue min_start = kMaxIntegerValue;
247 IntegerValue max_end = kMinIntegerValue;
248 for (int t = 0; t < num_tasks; ++t) {
249 min_start = std::min(min_start, integer_trail->LowerBound(start_vars[t]));
250 max_end = std::max(max_end, integer_trail->UpperBound(end_vars[t]));
251 }
252
253 for (IntegerValue time = min_start; time < max_end; ++time) {
254 std::vector<LiteralWithCoeff> literals_with_coeff;
255 for (int t = 0; t < num_tasks; ++t) {
256 sat_solver->Propagate();
257 const IntegerValue start_min = integer_trail->LowerBound(start_vars[t]);
258 const IntegerValue end_max = integer_trail->UpperBound(end_vars[t]);
259 if (end_max <= time || time < start_min || fixed_demands[t] == 0) {
260 continue;
261 }
262
263 // Task t consumes the resource at time if consume_condition is true.
264 std::vector<Literal> consume_condition;
265 const Literal consume = Literal(model->Add(NewBooleanVariable()), true);
266
267 // Task t consumes the resource at time if it is present.
268 if (intervals->IsOptional(vars[t])) {
269 consume_condition.push_back(intervals->PresenceLiteral(vars[t]));
270 }
271
272 // Task t overlaps time.
273 consume_condition.push_back(encoder->GetOrCreateAssociatedLiteral(
274 IntegerLiteral::LowerOrEqual(start_vars[t], IntegerValue(time))));
275 consume_condition.push_back(encoder->GetOrCreateAssociatedLiteral(
277 IntegerValue(time + 1))));
278
279 model->Add(ReifiedBoolAnd(consume_condition, consume));
280
281 // TODO(user): this is needed because we currently can't create a
282 // boolean variable if the model is unsat.
283 if (sat_solver->IsModelUnsat()) return;
284
285 literals_with_coeff.push_back(
286 LiteralWithCoeff(consume, Coefficient(fixed_demands[t].value())));
287 }
288 // The profile cannot exceed the capacity at time.
289 sat_solver->AddLinearConstraint(false, Coefficient(0), true,
290 fixed_capacity, &literals_with_coeff);
291
292 // Abort if UNSAT.
293 if (sat_solver->IsModelUnsat()) return;
294 }
295 };
296}
297
298std::function<void(Model*)> CumulativeUsingReservoir(
299 const std::vector<IntervalVariable>& vars,
300 const std::vector<AffineExpression>& demands, AffineExpression capacity,
302 return [=](Model* model) {
303 if (vars.empty()) return;
304
305 auto* integer_trail = model->GetOrCreate<IntegerTrail>();
306 auto* encoder = model->GetOrCreate<IntegerEncoder>();
307 auto* intervals = model->GetOrCreate<IntervalsRepository>();
308
309 CHECK(integer_trail->IsFixed(capacity));
310 const IntegerValue fixed_capacity(
311 integer_trail->UpperBound(capacity).value());
312
313 std::vector<AffineExpression> times;
314 std::vector<IntegerValue> deltas;
315 std::vector<Literal> presences;
316
317 const int num_tasks = vars.size();
318 for (int t = 0; t < num_tasks; ++t) {
319 CHECK(integer_trail->IsFixed(demands[t]));
320 times.push_back(intervals->StartVar(vars[t]));
321 deltas.push_back(integer_trail->LowerBound(demands[t]));
322 times.push_back(intervals->EndVar(vars[t]));
323 deltas.push_back(-integer_trail->LowerBound(demands[t]));
324 if (intervals->IsOptional(vars[t])) {
325 presences.push_back(intervals->PresenceLiteral(vars[t]));
326 presences.push_back(intervals->PresenceLiteral(vars[t]));
327 } else {
328 presences.push_back(encoder->GetTrueLiteral());
329 presences.push_back(encoder->GetTrueLiteral());
330 }
331 }
332 AddReservoirConstraint(times, deltas, presences, 0, fixed_capacity.value(),
333 model);
334 };
335}
336
337} // namespace sat
338} // namespace operations_research
int64_t max
Definition: alldiff_cst.cc:140
int64_t min
Definition: alldiff_cst.cc:139
#define CHECK(condition)
Definition: base/logging.h:495
#define DCHECK_NE(val1, val2)
Definition: base/logging.h:892
bool IsFixed(IntegerVariable i) const
Definition: integer.h:1453
IntegerValue UpperBound(IntegerVariable i) const
Definition: integer.h:1449
IntegerValue LowerBound(IntegerVariable i) const
Definition: integer.h:1445
void AddTerm(IntegerVariable var, IntegerValue coeff)
Class that owns everything related to a particular optimization model.
Definition: sat/model.h:42
bool AddLinearConstraint(bool use_lower_bound, Coefficient lower_bound, bool use_upper_bound, Coefficient upper_bound, std::vector< LiteralWithCoeff > *cst)
Definition: sat_solver.cc:343
const std::vector< AffineExpression > & Ends() const
Definition: intervals.h:340
void RegisterWith(GenericLiteralWatcher *watcher)
void RegisterWith(GenericLiteralWatcher *watcher)
Definition: timetable.cc:318
SatParameters parameters
const Constraint * ct
int64_t value
IntVar * var
Definition: expr_array.cc:1874
GRBmodel * model
std::tuple< int64_t, int64_t, const double > Coefficient
void AddReservoirConstraint(std::vector< AffineExpression > times, std::vector< IntegerValue > deltas, std::vector< Literal > presences, int64_t min_level, int64_t max_level, Model *model)
Definition: timetable.cc:32
void AddCumulativeOverloadChecker(const std::vector< AffineExpression > &demands, AffineExpression capacity, SchedulingConstraintHelper *helper, Model *model)
constexpr IntegerValue kMaxIntegerValue(std::numeric_limits< IntegerValue::ValueType >::max() - 1)
std::function< BooleanVariable(Model *)> NewBooleanVariable()
Definition: integer.h:1608
void LoadConditionalLinearConstraint(const absl::Span< const Literal > enforcement_literals, const LinearConstraint &cst, Model *model)
Definition: integer_expr.h:643
constexpr IntegerValue kMinIntegerValue(-kMaxIntegerValue.value())
const IntegerVariable kNoIntegerVariable(-1)
std::function< void(Model *)> Cumulative(const std::vector< IntervalVariable > &vars, const std::vector< AffineExpression > &demands, AffineExpression capacity, SchedulingConstraintHelper *helper)
Definition: cumulative.cc:40
void LoadLinearConstraint(const ConstraintProto &ct, Model *m)
std::function< void(Model *)> Disjunctive(const std::vector< IntervalVariable > &vars)
Definition: disjunctive.cc:38
std::function< void(Model *)> ReifiedBoolAnd(const std::vector< Literal > &literals, Literal r)
Definition: sat_solver.h:999
std::function< void(Model *)> CumulativeTimeDecomposition(const std::vector< IntervalVariable > &vars, const std::vector< AffineExpression > &demands, AffineExpression capacity, SchedulingConstraintHelper *helper)
Definition: cumulative.cc:217
std::function< void(Model *)> CumulativeUsingReservoir(const std::vector< IntervalVariable > &vars, const std::vector< AffineExpression > &demands, AffineExpression capacity, SchedulingConstraintHelper *helper)
Definition: cumulative.cc:298
Collection of objects used to extend the Constraint Solver library.
int64_t time
Definition: resource.cc:1693
int64_t capacity
Rev< int64_t > end_max
Rev< int64_t > start_min
static IntegerLiteral LowerOrEqual(IntegerVariable i, IntegerValue bound)
Definition: integer.h:1393
static IntegerLiteral GreaterOrEqual(IntegerVariable i, IntegerValue bound)
Definition: integer.h:1387