OR-Tools  9.2
linear_relaxation.cc
Go to the documentation of this file.
1 // Copyright 2010-2021 Google LLC
2 // Licensed under the Apache License, Version 2.0 (the "License");
3 // you may not use this file except in compliance with the License.
4 // You may obtain a copy of the License at
5 //
6 // http://www.apache.org/licenses/LICENSE-2.0
7 //
8 // Unless required by applicable law or agreed to in writing, software
9 // distributed under the License is distributed on an "AS IS" BASIS,
10 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 // See the License for the specific language governing permissions and
12 // limitations under the License.
13 
15 
16 #include <algorithm>
17 #include <cstdint>
18 #include <limits>
19 #include <vector>
20 
21 #include "absl/container/flat_hash_set.h"
23 #include "ortools/base/stl_util.h"
24 #include "ortools/sat/circuit.h" // for ReindexArcs.
27 #include "ortools/sat/cuts.h"
29 #include "ortools/sat/integer.h"
31 #include "ortools/sat/intervals.h"
34 #include "ortools/sat/sat_base.h"
38 
39 namespace operations_research {
40 namespace sat {
41 
42 bool AppendFullEncodingRelaxation(IntegerVariable var, const Model& model,
43  LinearRelaxation* relaxation) {
44  const auto* encoder = model.Get<IntegerEncoder>();
45  if (encoder == nullptr) return false;
46  if (!encoder->VariableIsFullyEncoded(var)) return false;
47 
48  const auto& encoding = encoder->FullDomainEncoding(var);
49  const IntegerValue var_min = model.Get<IntegerTrail>()->LowerBound(var);
50 
51  LinearConstraintBuilder at_least_one(&model, IntegerValue(1),
53  LinearConstraintBuilder encoding_ct(&model, var_min, var_min);
54  encoding_ct.AddTerm(var, IntegerValue(1));
55 
56  // Create the constraint if all literal have a view.
57  std::vector<Literal> at_most_one;
58 
59  for (const auto value_literal : encoding) {
60  const Literal lit = value_literal.literal;
61  const IntegerValue delta = value_literal.value - var_min;
62  DCHECK_GE(delta, IntegerValue(0));
63  at_most_one.push_back(lit);
64  if (!at_least_one.AddLiteralTerm(lit, IntegerValue(1))) return false;
65  if (delta != IntegerValue(0)) {
66  if (!encoding_ct.AddLiteralTerm(lit, -delta)) return false;
67  }
68  }
69 
70  relaxation->linear_constraints.push_back(at_least_one.Build());
71  relaxation->linear_constraints.push_back(encoding_ct.Build());
72  relaxation->at_most_ones.push_back(at_most_one);
73  return true;
74 }
75 
76 namespace {
77 
78 std::pair<IntegerValue, IntegerValue> GetMinAndMaxNotEncoded(
79  IntegerVariable var,
80  const absl::flat_hash_set<IntegerValue>& encoded_values,
81  const Model& model) {
82  const auto* domains = model.Get<IntegerDomains>();
83  if (domains == nullptr || var >= domains->size()) {
85  }
86 
87  // The domain can be large, but the list of values shouldn't, so this
88  // runs in O(encoded_values.size());
89  IntegerValue min = kMaxIntegerValue;
90  for (const int64_t v : (*domains)[var].Values()) {
91  if (!encoded_values.contains(IntegerValue(v))) {
92  min = IntegerValue(v);
93  break;
94  }
95  }
96 
97  IntegerValue max = kMinIntegerValue;
98  for (const int64_t v : (*domains)[NegationOf(var)].Values()) {
99  if (!encoded_values.contains(IntegerValue(-v))) {
100  max = IntegerValue(-v);
101  break;
102  }
103  }
104 
105  return {min, max};
106 }
107 
108 bool LinMaxContainsOnlyOneVarInExpressions(const ConstraintProto& ct) {
109  CHECK_EQ(ct.constraint_case(), ConstraintProto::ConstraintCase::kLinMax);
110  int current_var = -1;
111  for (const LinearExpressionProto& expr : ct.lin_max().exprs()) {
112  if (expr.vars().empty()) continue;
113  if (expr.vars().size() > 1) return false;
114  const int var = PositiveRef(expr.vars(0));
115  if (current_var == -1) {
116  current_var = var;
117  } else if (var != current_var) {
118  return false;
119  }
120  }
121  return true;
122 }
123 
124 // Collect all the affines expressions in a LinMax constraint.
125 // It checks that these are indeed affine expressions, and that they all share
126 // the same variable.
127 // It returns the shared variable, as well as a vector of pairs
128 // (coefficient, offset) when each affine is coefficient * shared_var + offset.
129 void CollectAffineExpressionWithSingleVariable(
130  const ConstraintProto& ct, CpModelMapping* mapping, IntegerVariable* var,
131  std::vector<std::pair<IntegerValue, IntegerValue>>* affines) {
132  DCHECK(LinMaxContainsOnlyOneVarInExpressions(ct));
133  CHECK_EQ(ct.constraint_case(), ConstraintProto::ConstraintCase::kLinMax);
135  affines->clear();
136  for (const LinearExpressionProto& expr : ct.lin_max().exprs()) {
137  if (expr.vars().empty()) {
138  affines->push_back({IntegerValue(0), IntegerValue(expr.offset())});
139  } else {
140  CHECK_EQ(expr.vars().size(), 1);
141  const IntegerVariable affine_var = mapping->Integer(expr.vars(0));
142  if (*var == kNoIntegerVariable) {
143  *var = PositiveVariable(affine_var);
144  }
145  if (VariableIsPositive(affine_var)) {
146  CHECK_EQ(affine_var, *var);
147  affines->push_back(
148  {IntegerValue(expr.coeffs(0)), IntegerValue(expr.offset())});
149  } else {
150  CHECK_EQ(NegationOf(affine_var), *var);
151  affines->push_back(
152  {IntegerValue(-expr.coeffs(0)), IntegerValue(expr.offset())});
153  }
154  }
155  }
156 }
157 
158 } // namespace
159 
161  const Model& model,
162  LinearRelaxation* relaxation,
163  int* num_tight, int* num_loose) {
164  const auto* encoder = model.Get<IntegerEncoder>();
165  const auto* integer_trail = model.Get<IntegerTrail>();
166  if (encoder == nullptr || integer_trail == nullptr) return;
167 
168  std::vector<Literal> at_most_one_ct;
169  absl::flat_hash_set<IntegerValue> encoded_values;
170  std::vector<ValueLiteralPair> encoding;
171  {
172  const std::vector<ValueLiteralPair>& initial_encoding =
173  encoder->PartialDomainEncoding(var);
174  if (initial_encoding.empty()) return;
175  for (const auto value_literal : initial_encoding) {
176  const Literal literal = value_literal.literal;
177 
178  // Note that we skip pairs that do not have an Integer view.
179  if (encoder->GetLiteralView(literal) == kNoIntegerVariable &&
180  encoder->GetLiteralView(literal.Negated()) == kNoIntegerVariable) {
181  continue;
182  }
183 
184  encoding.push_back(value_literal);
185  at_most_one_ct.push_back(literal);
186  encoded_values.insert(value_literal.value);
187  }
188  }
189  if (encoded_values.empty()) return;
190 
191  // TODO(user): PartialDomainEncoding() filter pair corresponding to literal
192  // set to false, however the initial variable Domain is not always updated. As
193  // a result, these min/max can be larger than in reality. Try to fix this even
194  // if in practice this is a rare occurence, as the presolve should have
195  // propagated most of what we can.
196  const auto [min_not_encoded, max_not_encoded] =
197  GetMinAndMaxNotEncoded(var, encoded_values, model);
198 
199  // This means that there are no non-encoded value and we have a full encoding.
200  // We substract the minimum value to reduce its size.
201  if (min_not_encoded == kMaxIntegerValue) {
202  const IntegerValue rhs = encoding[0].value;
203  LinearConstraintBuilder at_least_one(&model, IntegerValue(1),
205  LinearConstraintBuilder encoding_ct(&model, rhs, rhs);
206  encoding_ct.AddTerm(var, IntegerValue(1));
207  for (const auto value_literal : encoding) {
208  const Literal lit = value_literal.literal;
209  CHECK(at_least_one.AddLiteralTerm(lit, IntegerValue(1)));
210 
211  const IntegerValue delta = value_literal.value - rhs;
212  if (delta != IntegerValue(0)) {
213  CHECK_GE(delta, IntegerValue(0));
214  CHECK(encoding_ct.AddLiteralTerm(lit, -delta));
215  }
216  }
217 
218  relaxation->linear_constraints.push_back(at_least_one.Build());
219  relaxation->linear_constraints.push_back(encoding_ct.Build());
220  relaxation->at_most_ones.push_back(at_most_one_ct);
221  ++*num_tight;
222  return;
223  }
224 
225  // In this special case, the two constraints below can be merged into an
226  // equality: var = rhs + sum l_i * (value_i - rhs).
227  if (min_not_encoded == max_not_encoded) {
228  const IntegerValue rhs = min_not_encoded;
229  LinearConstraintBuilder encoding_ct(&model, rhs, rhs);
230  encoding_ct.AddTerm(var, IntegerValue(1));
231  for (const auto value_literal : encoding) {
232  CHECK(encoding_ct.AddLiteralTerm(value_literal.literal,
233  rhs - value_literal.value));
234  }
235  relaxation->at_most_ones.push_back(at_most_one_ct);
236  relaxation->linear_constraints.push_back(encoding_ct.Build());
237  ++*num_tight;
238  return;
239  }
240 
241  // min + sum l_i * (value_i - min) <= var.
242  const IntegerValue d_min = min_not_encoded;
243  LinearConstraintBuilder lower_bound_ct(&model, d_min, kMaxIntegerValue);
244  lower_bound_ct.AddTerm(var, IntegerValue(1));
245  for (const auto value_literal : encoding) {
246  CHECK(lower_bound_ct.AddLiteralTerm(value_literal.literal,
247  d_min - value_literal.value));
248  }
249 
250  // var <= max + sum l_i * (value_i - max).
251  const IntegerValue d_max = max_not_encoded;
252  LinearConstraintBuilder upper_bound_ct(&model, kMinIntegerValue, d_max);
253  upper_bound_ct.AddTerm(var, IntegerValue(1));
254  for (const auto value_literal : encoding) {
255  CHECK(upper_bound_ct.AddLiteralTerm(value_literal.literal,
256  d_max - value_literal.value));
257  }
258 
259  // Note that empty/trivial constraints will be filtered later.
260  relaxation->at_most_ones.push_back(at_most_one_ct);
261  relaxation->linear_constraints.push_back(lower_bound_ct.Build());
262  relaxation->linear_constraints.push_back(upper_bound_ct.Build());
263  ++*num_loose;
264 }
265 
267  const Model& model,
268  LinearRelaxation* relaxation) {
269  const auto* integer_trail = model.Get<IntegerTrail>();
270  const auto* encoder = model.Get<IntegerEncoder>();
271  if (integer_trail == nullptr || encoder == nullptr) return;
272 
273  const std::map<IntegerValue, Literal>& greater_than_encoding =
274  encoder->PartialGreaterThanEncoding(var);
275  if (greater_than_encoding.empty()) return;
276 
277  // Start by the var >= side.
278  // And also add the implications between used literals.
279  {
280  IntegerValue prev_used_bound = integer_trail->LowerBound(var);
281  LinearConstraintBuilder lb_constraint(&model, prev_used_bound,
283  lb_constraint.AddTerm(var, IntegerValue(1));
284  LiteralIndex prev_literal_index = kNoLiteralIndex;
285  for (const auto entry : greater_than_encoding) {
286  if (entry.first <= prev_used_bound) continue;
287 
288  const LiteralIndex literal_index = entry.second.Index();
289  const IntegerValue diff = prev_used_bound - entry.first;
290 
291  // Skip the entry if the literal doesn't have a view.
292  if (!lb_constraint.AddLiteralTerm(entry.second, diff)) continue;
293  if (prev_literal_index != kNoLiteralIndex) {
294  // Add var <= prev_var, which is the same as var + not(prev_var) <= 1
295  relaxation->at_most_ones.push_back(
296  {Literal(literal_index), Literal(prev_literal_index).Negated()});
297  }
298  prev_used_bound = entry.first;
299  prev_literal_index = literal_index;
300  }
301  relaxation->linear_constraints.push_back(lb_constraint.Build());
302  }
303 
304  // Do the same for the var <= side by using NegationOfVar().
305  // Note that we do not need to add the implications between literals again.
306  {
307  IntegerValue prev_used_bound = integer_trail->LowerBound(NegationOf(var));
308  LinearConstraintBuilder lb_constraint(&model, prev_used_bound,
310  lb_constraint.AddTerm(var, IntegerValue(-1));
311  for (const auto entry :
312  encoder->PartialGreaterThanEncoding(NegationOf(var))) {
313  if (entry.first <= prev_used_bound) continue;
314  const IntegerValue diff = prev_used_bound - entry.first;
315 
316  // Skip the entry if the literal doesn't have a view.
317  if (!lb_constraint.AddLiteralTerm(entry.second, diff)) continue;
318  prev_used_bound = entry.first;
319  }
320  relaxation->linear_constraints.push_back(lb_constraint.Build());
321  }
322 }
323 
324 namespace {
325 // Adds enforcing_lit => target <= bounding_var to relaxation.
326 void AppendEnforcedUpperBound(const Literal enforcing_lit,
327  const IntegerVariable target,
328  const IntegerVariable bounding_var, Model* model,
329  LinearRelaxation* relaxation) {
330  IntegerTrail* integer_trail = model->GetOrCreate<IntegerTrail>();
331  const IntegerValue max_target_value = integer_trail->UpperBound(target);
332  const IntegerValue min_var_value = integer_trail->LowerBound(bounding_var);
333  const IntegerValue max_term_value = max_target_value - min_var_value;
334  LinearConstraintBuilder lc(model, kMinIntegerValue, max_term_value);
335  lc.AddTerm(target, IntegerValue(1));
336  lc.AddTerm(bounding_var, IntegerValue(-1));
337  CHECK(lc.AddLiteralTerm(enforcing_lit, max_term_value));
338  relaxation->linear_constraints.push_back(lc.Build());
339 }
340 
341 // Adds {enforcing_lits} => rhs_domain_min <= expr <= rhs_domain_max.
342 // Requires expr offset to be 0.
343 void AppendEnforcedLinearExpression(
344  const std::vector<Literal>& enforcing_literals,
345  const LinearExpression& expr, const IntegerValue rhs_domain_min,
346  const IntegerValue rhs_domain_max, const Model& model,
347  LinearRelaxation* relaxation) {
348  CHECK_EQ(expr.offset, IntegerValue(0));
349  const LinearExpression canonical_expr = CanonicalizeExpr(expr);
350  const IntegerTrail* integer_trail = model.Get<IntegerTrail>();
351  const IntegerValue min_expr_value =
352  LinExprLowerBound(canonical_expr, *integer_trail);
353 
354  if (rhs_domain_min > min_expr_value) {
355  // And(ei) => terms >= rhs_domain_min
356  // <=> Sum_i (~ei * (rhs_domain_min - min_expr_value)) + terms >=
357  // rhs_domain_min
358  LinearConstraintBuilder lc(&model, rhs_domain_min, kMaxIntegerValue);
359  for (const Literal& literal : enforcing_literals) {
360  CHECK(lc.AddLiteralTerm(literal.Negated(),
361  rhs_domain_min - min_expr_value));
362  }
363  for (int i = 0; i < canonical_expr.vars.size(); i++) {
364  lc.AddTerm(canonical_expr.vars[i], canonical_expr.coeffs[i]);
365  }
366  relaxation->linear_constraints.push_back(lc.Build());
367  }
368  const IntegerValue max_expr_value =
369  LinExprUpperBound(canonical_expr, *integer_trail);
370  if (rhs_domain_max < max_expr_value) {
371  // And(ei) => terms <= rhs_domain_max
372  // <=> Sum_i (~ei * (rhs_domain_max - max_expr_value)) + terms <=
373  // rhs_domain_max
374  LinearConstraintBuilder lc(&model, kMinIntegerValue, rhs_domain_max);
375  for (const Literal& literal : enforcing_literals) {
376  CHECK(lc.AddLiteralTerm(literal.Negated(),
377  rhs_domain_max - max_expr_value));
378  }
379  for (int i = 0; i < canonical_expr.vars.size(); i++) {
380  lc.AddTerm(canonical_expr.vars[i], canonical_expr.coeffs[i]);
381  }
382  relaxation->linear_constraints.push_back(lc.Build());
383  }
384 }
385 
386 bool AllLiteralsHaveViews(const IntegerEncoder& encoder,
387  const std::vector<Literal>& literals) {
388  for (const Literal lit : literals) {
389  if (!encoder.LiteralOrNegationHasView(lit)) return false;
390  }
391  return true;
392 }
393 
394 } // namespace
395 
397  LinearRelaxation* relaxation) {
398  auto* mapping = model->GetOrCreate<CpModelMapping>();
399  LinearConstraintBuilder lc(model, IntegerValue(1), kMaxIntegerValue);
400  for (const int enforcement_ref : ct.enforcement_literal()) {
401  CHECK(lc.AddLiteralTerm(mapping->Literal(NegatedRef(enforcement_ref)),
402  IntegerValue(1)));
403  }
404  for (const int ref : ct.bool_or().literals()) {
405  CHECK(lc.AddLiteralTerm(mapping->Literal(ref), IntegerValue(1)));
406  }
407  relaxation->linear_constraints.push_back(lc.Build());
408 }
409 
411  LinearRelaxation* relaxation) {
412  // TODO(user): These constraints can be many, and if they are not regrouped
413  // in big at most ones, then they should probably only added lazily as cuts.
414  // Regroup this with future clique-cut separation logic.
415  if (!HasEnforcementLiteral(ct)) return;
416 
417  auto* mapping = model->GetOrCreate<CpModelMapping>();
418  if (ct.enforcement_literal().size() == 1) {
419  const Literal enforcement = mapping->Literal(ct.enforcement_literal(0));
420  for (const int ref : ct.bool_and().literals()) {
421  relaxation->at_most_ones.push_back(
422  {enforcement, mapping->Literal(ref).Negated()});
423  }
424  return;
425  }
426 
427  // Andi(e_i) => Andj(x_j)
428  // <=> num_rhs_terms <= Sum_j(x_j) + num_rhs_terms * Sum_i(~e_i)
429  int num_literals = ct.bool_and().literals_size();
430  LinearConstraintBuilder lc(model, IntegerValue(num_literals),
432  for (const int ref : ct.bool_and().literals()) {
433  CHECK(lc.AddLiteralTerm(mapping->Literal(ref), IntegerValue(1)));
434  }
435  for (const int enforcement_ref : ct.enforcement_literal()) {
436  CHECK(lc.AddLiteralTerm(mapping->Literal(NegatedRef(enforcement_ref)),
437  IntegerValue(num_literals)));
438  }
439  relaxation->linear_constraints.push_back(lc.Build());
440 }
441 
443  LinearRelaxation* relaxation) {
444  if (HasEnforcementLiteral(ct)) return;
445 
446  auto* mapping = model->GetOrCreate<CpModelMapping>();
447  relaxation->at_most_ones.push_back(
448  mapping->Literals(ct.at_most_one().literals()));
449 }
450 
452  LinearRelaxation* relaxation) {
453  if (HasEnforcementLiteral(ct)) return;
454  auto* mapping = model->GetOrCreate<CpModelMapping>();
455  auto* encoder = model->GetOrCreate<IntegerEncoder>();
456 
457  const std::vector<Literal> literals =
458  mapping->Literals(ct.exactly_one().literals());
459  if (AllLiteralsHaveViews(*encoder, literals)) {
460  LinearConstraintBuilder lc(model, IntegerValue(1), IntegerValue(1));
461  for (const Literal lit : literals) {
462  CHECK(lc.AddLiteralTerm(lit, IntegerValue(1)));
463  }
464  relaxation->linear_constraints.push_back(lc.Build());
465  } else {
466  // We just encode the at most one part that might be partially linearized
467  // later.
468  relaxation->at_most_ones.push_back(literals);
469  }
470 }
471 
473  int num_literals, Model* model, LinearRelaxation* relaxation) {
474  auto* encoder = model->GetOrCreate<IntegerEncoder>();
475 
476  if (num_literals == 1) {
477  // This is not supposed to happen, but it is easy enough to cover, just
478  // in case. We might however want to use encoder->GetTrueLiteral().
479  const IntegerVariable var = model->Add(NewIntegerVariable(1, 1));
480  const Literal lit =
481  encoder->GetOrCreateLiteralAssociatedToEquality(var, IntegerValue(1));
482  return {lit};
483  }
484 
485  if (num_literals == 2) {
486  const IntegerVariable var = model->Add(NewIntegerVariable(0, 1));
487  const Literal lit =
488  encoder->GetOrCreateLiteralAssociatedToEquality(var, IntegerValue(1));
489 
490  // TODO(user): We shouldn't need to create this view ideally. Even better,
491  // we should be able to handle Literal natively in the linear relaxation,
492  // but that is a lot of work.
493  const IntegerVariable var2 = model->Add(NewIntegerVariable(0, 1));
494  encoder->AssociateToIntegerEqualValue(lit.Negated(), var2, IntegerValue(1));
495 
496  return {lit, lit.Negated()};
497  }
498 
499  std::vector<Literal> literals;
500  LinearConstraintBuilder lc_builder(model, IntegerValue(1), IntegerValue(1));
501  for (int i = 0; i < num_literals; ++i) {
502  const IntegerVariable var = model->Add(NewIntegerVariable(0, 1));
503  const Literal lit =
504  encoder->GetOrCreateLiteralAssociatedToEquality(var, IntegerValue(1));
505  literals.push_back(lit);
506  CHECK(lc_builder.AddLiteralTerm(lit, IntegerValue(1)));
507  }
508  model->Add(ExactlyOneConstraint(literals));
509  relaxation->linear_constraints.push_back(lc_builder.Build());
510  return literals;
511 }
512 
514  LinearRelaxation* relaxation) {
515  if (HasEnforcementLiteral(ct)) return;
516  auto* mapping = model->GetOrCreate<CpModelMapping>();
517  const int num_arcs = ct.circuit().literals_size();
518  CHECK_EQ(num_arcs, ct.circuit().tails_size());
519  CHECK_EQ(num_arcs, ct.circuit().heads_size());
520 
521  // Each node must have exactly one incoming and one outgoing arc (note
522  // that it can be the unique self-arc of this node too).
523  std::map<int, std::vector<Literal>> incoming_arc_constraints;
524  std::map<int, std::vector<Literal>> outgoing_arc_constraints;
525  for (int i = 0; i < num_arcs; i++) {
526  const Literal arc = mapping->Literal(ct.circuit().literals(i));
527  const int tail = ct.circuit().tails(i);
528  const int head = ct.circuit().heads(i);
529 
530  // Make sure this literal has a view.
532  outgoing_arc_constraints[tail].push_back(arc);
533  incoming_arc_constraints[head].push_back(arc);
534  }
535  for (const auto* node_map :
536  {&outgoing_arc_constraints, &incoming_arc_constraints}) {
537  for (const auto& entry : *node_map) {
538  const std::vector<Literal>& exactly_one = entry.second;
539  if (exactly_one.size() > 1) {
540  LinearConstraintBuilder at_least_one_lc(model, IntegerValue(1),
542  for (const Literal l : exactly_one) {
543  CHECK(at_least_one_lc.AddLiteralTerm(l, IntegerValue(1)));
544  }
545 
546  // We separate the two constraints.
547  relaxation->at_most_ones.push_back(exactly_one);
548  relaxation->linear_constraints.push_back(at_least_one_lc.Build());
549  }
550  }
551  }
552 }
553 
555  LinearRelaxation* relaxation) {
556  if (HasEnforcementLiteral(ct)) return;
557  auto* mapping = model->GetOrCreate<CpModelMapping>();
558  const int num_arcs = ct.routes().literals_size();
559  CHECK_EQ(num_arcs, ct.routes().tails_size());
560  CHECK_EQ(num_arcs, ct.routes().heads_size());
561 
562  // Each node except node zero must have exactly one incoming and one outgoing
563  // arc (note that it can be the unique self-arc of this node too). For node
564  // zero, the number of incoming arcs should be the same as the number of
565  // outgoing arcs.
566  std::map<int, std::vector<Literal>> incoming_arc_constraints;
567  std::map<int, std::vector<Literal>> outgoing_arc_constraints;
568  for (int i = 0; i < num_arcs; i++) {
569  const Literal arc = mapping->Literal(ct.routes().literals(i));
570  const int tail = ct.routes().tails(i);
571  const int head = ct.routes().heads(i);
572 
573  // Make sure this literal has a view.
575  outgoing_arc_constraints[tail].push_back(arc);
576  incoming_arc_constraints[head].push_back(arc);
577  }
578  for (const auto* node_map :
579  {&outgoing_arc_constraints, &incoming_arc_constraints}) {
580  for (const auto& entry : *node_map) {
581  if (entry.first == 0) continue;
582  const std::vector<Literal>& exactly_one = entry.second;
583  if (exactly_one.size() > 1) {
584  LinearConstraintBuilder at_least_one_lc(model, IntegerValue(1),
586  for (const Literal l : exactly_one) {
587  CHECK(at_least_one_lc.AddLiteralTerm(l, IntegerValue(1)));
588  }
589 
590  // We separate the two constraints.
591  relaxation->at_most_ones.push_back(exactly_one);
592  relaxation->linear_constraints.push_back(at_least_one_lc.Build());
593  }
594  }
595  }
596  LinearConstraintBuilder zero_node_balance_lc(model, IntegerValue(0),
597  IntegerValue(0));
598  for (const Literal& incoming_arc : incoming_arc_constraints[0]) {
599  CHECK(zero_node_balance_lc.AddLiteralTerm(incoming_arc, IntegerValue(1)));
600  }
601  for (const Literal& outgoing_arc : outgoing_arc_constraints[0]) {
602  CHECK(zero_node_balance_lc.AddLiteralTerm(outgoing_arc, IntegerValue(-1)));
603  }
604  relaxation->linear_constraints.push_back(zero_node_balance_lc.Build());
605 }
606 
607 void AddCumulativeRelaxation(const std::vector<IntervalVariable>& intervals,
608  const std::vector<AffineExpression>& demands,
609  const std::vector<LinearExpression>& energies,
610  IntegerValue capacity_upper_bound, Model* model,
611  LinearRelaxation* relaxation) {
612  // TODO(user): Keep a map intervals -> helper, or ct_index->helper to avoid
613  // creating many helpers for the same constraint.
614  auto* helper = new SchedulingConstraintHelper(intervals, model);
615  model->TakeOwnership(helper);
616  const int num_intervals = helper->NumTasks();
617 
618  IntegerTrail* integer_trail = model->GetOrCreate<IntegerTrail>();
619 
620  IntegerValue min_of_starts = kMaxIntegerValue;
621  IntegerValue max_of_ends = kMinIntegerValue;
622 
623  int num_variable_sizes = 0;
624  int num_optionals = 0;
625 
626  for (int index = 0; index < num_intervals; ++index) {
627  min_of_starts = std::min(min_of_starts, helper->StartMin(index));
628  max_of_ends = std::max(max_of_ends, helper->EndMax(index));
629 
630  if (helper->IsOptional(index)) {
631  num_optionals++;
632  }
633 
634  if (!helper->SizeIsFixed(index) ||
635  (!demands.empty() && !integer_trail->IsFixed(demands[index]))) {
636  num_variable_sizes++;
637  }
638  }
639 
640  VLOG(2) << "Span [" << min_of_starts << ".." << max_of_ends << "] with "
641  << num_optionals << " optional intervals, and " << num_variable_sizes
642  << " variable size intervals out of " << num_intervals
643  << " intervals";
644 
645  if (num_variable_sizes + num_optionals == 0) return;
646 
647  const IntegerVariable span_start =
648  integer_trail->AddIntegerVariable(min_of_starts, max_of_ends);
649  const IntegerVariable span_size = integer_trail->AddIntegerVariable(
650  IntegerValue(0), max_of_ends - min_of_starts);
651  const IntegerVariable span_end =
652  integer_trail->AddIntegerVariable(min_of_starts, max_of_ends);
653 
654  IntervalVariable span_var;
655  if (num_optionals < num_intervals) {
656  span_var = model->Add(NewInterval(span_start, span_end, span_size));
657  } else {
658  const Literal span_lit = Literal(model->Add(NewBooleanVariable()), true);
659  span_var = model->Add(
660  NewOptionalInterval(span_start, span_end, span_size, span_lit));
661  }
662 
663  model->Add(SpanOfIntervals(span_var, intervals));
664 
665  LinearConstraintBuilder lc(model, kMinIntegerValue, IntegerValue(0));
666  lc.AddTerm(span_size, -capacity_upper_bound);
667  for (int i = 0; i < num_intervals; ++i) {
668  const IntegerValue demand_lower_bound =
669  demands.empty() ? IntegerValue(1)
670  : integer_trail->LowerBound(demands[i]);
671  const bool demand_is_fixed =
672  demands.empty() || integer_trail->IsFixed(demands[i]);
673  if (!helper->IsOptional(i)) {
674  if (demand_is_fixed) {
675  lc.AddTerm(helper->Sizes()[i], demand_lower_bound);
676  } else if (!helper->SizeIsFixed(i) &&
677  (!energies[i].vars.empty() || energies[i].offset != -1)) {
678  // We prefer the energy additional info instead of the McCormick
679  // relaxation.
680  lc.AddLinearExpression(energies[i]);
681  } else {
682  lc.AddQuadraticLowerBound(helper->Sizes()[i], demands[i],
683  integer_trail);
684  }
685  } else {
686  if (!lc.AddLiteralTerm(helper->PresenceLiteral(i),
687  helper->SizeMin(i) * demand_lower_bound)) {
688  return;
689  }
690  }
691  }
692  relaxation->linear_constraints.push_back(lc.Build());
693 }
694 
696  const ConstraintProto& ct, Model* model,
697  LinearRelaxation* relaxation) {
698  CHECK(ct.has_cumulative());
699  if (HasEnforcementLiteral(ct)) return;
700 
701  auto* mapping = model->GetOrCreate<CpModelMapping>();
702  std::vector<IntervalVariable> intervals =
703  mapping->Intervals(ct.cumulative().intervals());
704  const IntegerValue capacity_upper_bound =
705  model->GetOrCreate<IntegerTrail>()->UpperBound(
706  mapping->Affine(ct.cumulative().capacity()));
707 
708  // Scan energies.
709  IntervalsRepository* intervals_repository =
710  model->GetOrCreate<IntervalsRepository>();
711 
712  std::vector<LinearExpression> energies;
713  std::vector<AffineExpression> demands;
714  std::vector<AffineExpression> sizes;
715  for (int i = 0; i < ct.cumulative().demands_size(); ++i) {
716  demands.push_back(mapping->Affine(ct.cumulative().demands(i)));
717  sizes.push_back(intervals_repository->Size(intervals[i]));
718  }
719  LinearizeInnerProduct(demands, sizes, model, &energies);
720  AddCumulativeRelaxation(intervals, demands, energies, capacity_upper_bound,
721  model, relaxation);
722 }
723 
725  const ConstraintProto& ct, Model* model,
726  LinearRelaxation* relaxation) {
727  CHECK(ct.has_no_overlap());
728  if (HasEnforcementLiteral(ct)) return;
729 
730  auto* mapping = model->GetOrCreate<CpModelMapping>();
731  std::vector<IntervalVariable> intervals =
732  mapping->Intervals(ct.no_overlap().intervals());
733  AddCumulativeRelaxation(intervals, /*demands=*/{}, /*energies=*/{},
734  /*capacity_upper_bound=*/IntegerValue(1), model,
735  relaxation);
736 }
737 
738 // Adds the energetic relaxation sum(areas) <= bounding box area.
740  LinearRelaxation* relaxation) {
741  CHECK(ct.has_no_overlap_2d());
742  if (HasEnforcementLiteral(ct)) return;
743 
744  auto* mapping = model->GetOrCreate<CpModelMapping>();
745  std::vector<IntervalVariable> x_intervals =
746  mapping->Intervals(ct.no_overlap_2d().x_intervals());
747  std::vector<IntervalVariable> y_intervals =
748  mapping->Intervals(ct.no_overlap_2d().y_intervals());
749 
750  auto* integer_trail = model->GetOrCreate<IntegerTrail>();
751  auto* intervals_repository = model->GetOrCreate<IntervalsRepository>();
752 
753  IntegerValue x_min = kMaxIntegerValue;
754  IntegerValue x_max = kMinIntegerValue;
755  IntegerValue y_min = kMaxIntegerValue;
756  IntegerValue y_max = kMinIntegerValue;
757  std::vector<AffineExpression> x_sizes;
758  std::vector<AffineExpression> y_sizes;
759  for (int i = 0; i < ct.no_overlap_2d().x_intervals_size(); ++i) {
760  x_sizes.push_back(intervals_repository->Size(x_intervals[i]));
761  y_sizes.push_back(intervals_repository->Size(y_intervals[i]));
762  x_min = std::min(x_min, integer_trail->LevelZeroLowerBound(
763  intervals_repository->Start(x_intervals[i])));
764  x_max = std::max(x_max, integer_trail->LevelZeroUpperBound(
765  intervals_repository->End(x_intervals[i])));
766  y_min = std::min(y_min, integer_trail->LevelZeroLowerBound(
767  intervals_repository->Start(y_intervals[i])));
768  y_max = std::max(y_max, integer_trail->LevelZeroUpperBound(
769  intervals_repository->End(y_intervals[i])));
770  }
771 
772  const IntegerValue max_area =
773  IntegerValue(CapProd(CapSub(x_max.value(), x_min.value()),
774  CapSub(y_max.value(), y_min.value())));
775  if (max_area == kMaxIntegerValue) return;
776 
777  LinearConstraintBuilder lc(model, IntegerValue(0), max_area);
778  for (int i = 0; i < ct.no_overlap_2d().x_intervals_size(); ++i) {
779  if (intervals_repository->IsPresent(x_intervals[i]) &&
780  intervals_repository->IsPresent(y_intervals[i])) {
781  LinearConstraintBuilder linear_energy(model);
782  if (DetectLinearEncodingOfProducts(x_sizes[i], y_sizes[i], model,
783  &linear_energy)) {
784  lc.AddLinearExpression(linear_energy.BuildExpression());
785  } else {
786  lc.AddQuadraticLowerBound(x_sizes[i], y_sizes[i], integer_trail);
787  }
788  } else if (intervals_repository->IsPresent(x_intervals[i]) ||
789  intervals_repository->IsPresent(y_intervals[i]) ||
790  (intervals_repository->PresenceLiteral(x_intervals[i]) ==
791  intervals_repository->PresenceLiteral(y_intervals[i]))) {
792  // We have only one active literal.
793  const Literal presence_literal =
794  intervals_repository->IsPresent(x_intervals[i])
795  ? intervals_repository->PresenceLiteral(y_intervals[i])
796  : intervals_repository->PresenceLiteral(x_intervals[i]);
797  const IntegerValue area_min =
798  integer_trail->LevelZeroLowerBound(x_sizes[i]) *
799  integer_trail->LevelZeroLowerBound(y_sizes[i]);
800  if (area_min != 0) {
801  // Not including the term if we don't have a view is ok.
802  (void)lc.AddLiteralTerm(presence_literal, area_min);
803  }
804  }
805  }
806  relaxation->linear_constraints.push_back(lc.Build());
807 }
808 
810  LinearRelaxation* relaxation) {
811  auto* mapping = model->GetOrCreate<CpModelMapping>();
812 
813  // We want to linearize target = max(exprs[1], exprs[2], ..., exprs[d]).
814  // Part 1: Encode target >= max(exprs[1], exprs[2], ..., exprs[d])
815  const LinearExpression negated_target =
816  NegationOf(mapping->GetExprFromProto(ct.lin_max().target()));
817  for (int i = 0; i < ct.lin_max().exprs_size(); ++i) {
818  const LinearExpression expr =
819  mapping->GetExprFromProto(ct.lin_max().exprs(i));
820  LinearConstraintBuilder lc(model, kMinIntegerValue, IntegerValue(0));
821  lc.AddLinearExpression(negated_target);
822  lc.AddLinearExpression(expr);
823  relaxation->linear_constraints.push_back(lc.Build());
824  }
825 }
826 
827 // TODO(user): experiment with:
828 // 1) remove this code
829 // 2) keep this code
830 // 3) remove this code and create the cut generator at level 1.
832  LinearRelaxation* relaxation) {
833  IntegerVariable var;
834  std::vector<std::pair<IntegerValue, IntegerValue>> affines;
835  auto* mapping = model->GetOrCreate<CpModelMapping>();
836  CollectAffineExpressionWithSingleVariable(ct, mapping, &var, &affines);
837  if (var == kNoIntegerVariable ||
838  model->GetOrCreate<IntegerTrail>()->IsFixed(var)) {
839  return;
840  }
841 
843  const LinearExpression target_expr =
844  PositiveVarExpr(mapping->GetExprFromProto(ct.lin_max().target()));
845  relaxation->linear_constraints.push_back(
846  BuildMaxAffineUpConstraint(target_expr, var, affines, model));
847 }
848 
850  LinearRelaxation* relaxation) {
851  IntegerVariable var;
852  std::vector<std::pair<IntegerValue, IntegerValue>> affines;
853  auto* mapping = model->GetOrCreate<CpModelMapping>();
854  CollectAffineExpressionWithSingleVariable(ct, mapping, &var, &affines);
855  if (var == kNoIntegerVariable ||
856  model->GetOrCreate<IntegerTrail>()->IsFixed(var)) {
857  return;
858  }
859 
860  // If the target is constant, propagation is enough.
861  if (ct.lin_max().target().vars().empty()) return;
862 
863  const LinearExpression target_expr =
864  PositiveVarExpr(mapping->GetExprFromProto(ct.lin_max().target()));
865  relaxation->cut_generators.push_back(CreateMaxAffineCutGenerator(
866  target_expr, var, affines, "AffineMax", model));
867 }
868 
869 // Part 2: Encode upper bound on X.
870 //
871 // Add linking constraint to the CP solver
872 // sum zi = 1 and for all i, zi => max = expr_i.
874  IntegerVariable target, const std::vector<Literal>& alternative_literals,
875  const std::vector<LinearExpression>& exprs, Model* model,
876  LinearRelaxation* relaxation) {
877  const int num_exprs = exprs.size();
878  GenericLiteralWatcher* watcher = model->GetOrCreate<GenericLiteralWatcher>();
879 
880  // First add the CP constraints.
881  for (int i = 0; i < num_exprs; ++i) {
882  LinearExpression local_expr;
883  local_expr.vars = NegationOf(exprs[i].vars);
884  local_expr.vars.push_back(target);
885  local_expr.coeffs = exprs[i].coeffs;
886  local_expr.coeffs.push_back(IntegerValue(1));
888  new IntegerSumLE({alternative_literals[i]}, local_expr.vars,
889  local_expr.coeffs, exprs[i].offset, model);
890  upper_bound->RegisterWith(watcher);
891  model->TakeOwnership(upper_bound);
892  }
893 
894  // For the relaxation, we use different constraints with a stronger linear
895  // relaxation as explained in the .h
896  //
897  // TODO(user): Consider passing the x_vars to this method instead of
898  // computing it here.
899  std::vector<IntegerVariable> x_vars;
900  for (int i = 0; i < num_exprs; ++i) {
901  x_vars.insert(x_vars.end(), exprs[i].vars.begin(), exprs[i].vars.end());
902  }
904 
905  // All expressions should only contain positive variables.
906  DCHECK(std::all_of(x_vars.begin(), x_vars.end(), [](IntegerVariable var) {
907  return VariableIsPositive(var);
908  }));
909 
910  std::vector<std::vector<IntegerValue>> sum_of_max_corner_diff(
911  num_exprs, std::vector<IntegerValue>(num_exprs, IntegerValue(0)));
912 
913  IntegerTrail* integer_trail = model->GetOrCreate<IntegerTrail>();
914  for (int i = 0; i < num_exprs; ++i) {
915  for (int j = 0; j < num_exprs; ++j) {
916  if (i == j) continue;
917  for (const IntegerVariable x_var : x_vars) {
918  const IntegerValue lb = integer_trail->LevelZeroLowerBound(x_var);
919  const IntegerValue ub = integer_trail->LevelZeroUpperBound(x_var);
920  const IntegerValue diff =
921  GetCoefficient(x_var, exprs[j]) - GetCoefficient(x_var, exprs[i]);
922  sum_of_max_corner_diff[i][j] += std::max(diff * lb, diff * ub);
923  }
924  }
925  }
926  for (int i = 0; i < num_exprs; ++i) {
927  LinearConstraintBuilder lc(model, kMinIntegerValue, IntegerValue(0));
928  lc.AddTerm(target, IntegerValue(1));
929  for (int j = 0; j < exprs[i].vars.size(); ++j) {
930  lc.AddTerm(exprs[i].vars[j], -exprs[i].coeffs[j]);
931  }
932  for (int j = 0; j < num_exprs; ++j) {
933  CHECK(lc.AddLiteralTerm(alternative_literals[j],
934  -exprs[j].offset - sum_of_max_corner_diff[i][j]));
935  }
936  relaxation->linear_constraints.push_back(lc.Build());
937  }
938 }
939 
941  bool linearize_enforced_constraints,
942  Model* model,
943  LinearRelaxation* relaxation) {
944  auto* mapping = model->Get<CpModelMapping>();
945 
946  // Note that we ignore the holes in the domain.
947  //
948  // TODO(user): In LoadLinearConstraint() we already created intermediate
949  // Booleans for each disjoint interval, we should reuse them here if
950  // possible.
951  //
952  // TODO(user): process the "at most one" part of a == 1 separately?
953  const IntegerValue rhs_domain_min = IntegerValue(ct.linear().domain(0));
954  const IntegerValue rhs_domain_max =
955  IntegerValue(ct.linear().domain(ct.linear().domain_size() - 1));
956  if (rhs_domain_min == std::numeric_limits<int64_t>::min() &&
957  rhs_domain_max == std::numeric_limits<int64_t>::max())
958  return;
959 
960  if (!HasEnforcementLiteral(ct)) {
961  LinearConstraintBuilder lc(model, rhs_domain_min, rhs_domain_max);
962  for (int i = 0; i < ct.linear().vars_size(); i++) {
963  const int ref = ct.linear().vars(i);
964  const int64_t coeff = ct.linear().coeffs(i);
965  lc.AddTerm(mapping->Integer(ref), IntegerValue(coeff));
966  }
967  relaxation->linear_constraints.push_back(lc.Build());
968  return;
969  }
970 
971  // Reified version.
972  if (!linearize_enforced_constraints) return;
973 
974  // We linearize fully reified constraints of size 1 all together for a given
975  // variable. But we need to process half-reified ones.
976  if (!mapping->IsHalfEncodingConstraint(&ct) && ct.linear().vars_size() <= 1) {
977  return;
978  }
979 
980  std::vector<Literal> enforcing_literals;
981  enforcing_literals.reserve(ct.enforcement_literal_size());
982  for (const int enforcement_ref : ct.enforcement_literal()) {
983  enforcing_literals.push_back(mapping->Literal(enforcement_ref));
984  }
985  LinearExpression expr;
986  expr.vars.reserve(ct.linear().vars_size());
987  expr.coeffs.reserve(ct.linear().vars_size());
988  for (int i = 0; i < ct.linear().vars_size(); i++) {
989  int ref = ct.linear().vars(i);
990  IntegerValue coeff(ct.linear().coeffs(i));
991  if (!RefIsPositive(ref)) {
992  ref = PositiveRef(ref);
993  coeff = -coeff;
994  }
995  const IntegerVariable int_var = mapping->Integer(ref);
996  expr.vars.push_back(int_var);
997  expr.coeffs.push_back(coeff);
998  }
999  AppendEnforcedLinearExpression(enforcing_literals, expr, rhs_domain_min,
1000  rhs_domain_max, *model, relaxation);
1001 }
1002 
1003 // Add a static and a dynamic linear relaxation of the CP constraint to the set
1004 // of linear constraints. The highest linearization_level is, the more types of
1005 // constraint we encode. This method should be called only for
1006 // linearization_level > 0. The static part is just called a relaxation and is
1007 // called at the root node of the search. The dynamic part is implemented
1008 // through a set of linear cut generators that will be called throughout the
1009 // search.
1010 //
1011 // TODO(user): In full generality, we could encode all the constraint as an LP.
1012 // TODO(user): Add unit tests for this method.
1013 // TODO(user): Remove and merge with model loading.
1015  const ConstraintProto& ct,
1016  int linearization_level, Model* model,
1017  LinearRelaxation* relaxation) {
1018  CHECK_EQ(model->GetOrCreate<SatSolver>()->CurrentDecisionLevel(), 0);
1019  DCHECK_GT(linearization_level, 0);
1020 
1021  switch (ct.constraint_case()) {
1022  case ConstraintProto::ConstraintCase::kBoolOr: {
1023  if (linearization_level > 1) {
1024  AppendBoolOrRelaxation(ct, model, relaxation);
1025  }
1026  break;
1027  }
1028  case ConstraintProto::ConstraintCase::kBoolAnd: {
1029  if (linearization_level > 1) {
1030  AppendBoolAndRelaxation(ct, model, relaxation);
1031  }
1032  break;
1033  }
1034  case ConstraintProto::ConstraintCase::kAtMostOne: {
1035  AppendAtMostOneRelaxation(ct, model, relaxation);
1036  break;
1037  }
1038  case ConstraintProto::ConstraintCase::kExactlyOne: {
1039  AppendExactlyOneRelaxation(ct, model, relaxation);
1040  break;
1041  }
1042  case ConstraintProto::ConstraintCase::kIntProd: {
1043  // No relaxation, just a cut generator .
1044  AddIntProdCutGenerator(ct, linearization_level, model, relaxation);
1045  break;
1046  }
1047  case ConstraintProto::ConstraintCase::kLinMax: {
1048  AppendLinMaxRelaxationPart1(ct, model, relaxation);
1049  const bool is_affine_max = LinMaxContainsOnlyOneVarInExpressions(ct);
1050  if (is_affine_max) {
1051  AppendMaxAffineRelaxation(ct, model, relaxation);
1052  }
1053 
1054  // Add cut generators.
1055  if (linearization_level > 1) {
1056  if (is_affine_max) {
1057  AddMaxAffineCutGenerator(ct, model, relaxation);
1058  } else {
1059  AddLinMaxCutGenerator(ct, model, relaxation);
1060  }
1061  }
1062  break;
1063  }
1064  case ConstraintProto::ConstraintCase::kAllDiff: {
1065  if (linearization_level > 1) {
1066  AddAllDiffCutGenerator(ct, model, relaxation);
1067  }
1068  break;
1069  }
1070  case ConstraintProto::ConstraintCase::kLinear: {
1072  ct, /*linearize_enforced_constraints=*/linearization_level > 1, model,
1073  relaxation);
1074  break;
1075  }
1076  case ConstraintProto::ConstraintCase::kCircuit: {
1077  AppendCircuitRelaxation(ct, model, relaxation);
1078  if (linearization_level > 1) {
1079  AddCircuitCutGenerator(ct, model, relaxation);
1080  }
1081  break;
1082  }
1083  case ConstraintProto::ConstraintCase::kRoutes: {
1084  AppendRoutesRelaxation(ct, model, relaxation);
1085  if (linearization_level > 1) {
1086  AddRoutesCutGenerator(ct, model, relaxation);
1087  }
1088  break;
1089  }
1090  case ConstraintProto::ConstraintCase::kNoOverlap: {
1091  if (linearization_level > 1) {
1093  AddNoOverlapCutGenerator(ct, model, relaxation);
1094  }
1095  break;
1096  }
1097  case ConstraintProto::ConstraintCase::kCumulative: {
1098  if (linearization_level > 1) {
1100  AddCumulativeCutGenerator(ct, model, relaxation);
1101  }
1102  break;
1103  }
1104  case ConstraintProto::ConstraintCase::kNoOverlap2D: {
1105  // Adds an energetic relaxation (sum of areas fits in bounding box).
1106  AppendNoOverlap2dRelaxation(ct, model, relaxation);
1107  if (linearization_level > 1) {
1108  // Adds a completion time cut generator and an energetic cut generator.
1109  AddNoOverlap2dCutGenerator(ct, model, relaxation);
1110  }
1111  break;
1112  }
1113  default: {
1114  }
1115  }
1116 }
1117 
1118 // Cut generators.
1119 
1121  LinearRelaxation* relaxation) {
1122  std::vector<int> tails(ct.circuit().tails().begin(),
1123  ct.circuit().tails().end());
1124  std::vector<int> heads(ct.circuit().heads().begin(),
1125  ct.circuit().heads().end());
1126  auto* mapping = m->GetOrCreate<CpModelMapping>();
1127  std::vector<Literal> literals = mapping->Literals(ct.circuit().literals());
1128  const int num_nodes = ReindexArcs(&tails, &heads);
1129 
1131  num_nodes, tails, heads, literals, m));
1132 }
1133 
1135  LinearRelaxation* relaxation) {
1136  std::vector<int> tails(ct.routes().tails().begin(),
1137  ct.routes().tails().end());
1138  std::vector<int> heads(ct.routes().heads().begin(),
1139  ct.routes().heads().end());
1140  auto* mapping = m->GetOrCreate<CpModelMapping>();
1141  std::vector<Literal> literals = mapping->Literals(ct.routes().literals());
1142 
1143  int num_nodes = 0;
1144  for (int i = 0; i < ct.routes().tails_size(); ++i) {
1145  num_nodes = std::max(num_nodes, 1 + ct.routes().tails(i));
1146  num_nodes = std::max(num_nodes, 1 + ct.routes().heads(i));
1147  }
1148  if (ct.routes().demands().empty() || ct.routes().capacity() == 0) {
1149  relaxation->cut_generators.push_back(
1150  CreateStronglyConnectedGraphCutGenerator(num_nodes, tails, heads,
1151  literals, m));
1152  } else {
1153  const std::vector<int64_t> demands(ct.routes().demands().begin(),
1154  ct.routes().demands().end());
1155  relaxation->cut_generators.push_back(CreateCVRPCutGenerator(
1156  num_nodes, tails, heads, literals, demands, ct.routes().capacity(), m));
1157  }
1158 }
1159 
1160 void AddIntProdCutGenerator(const ConstraintProto& ct, int linearization_level,
1161  Model* m, LinearRelaxation* relaxation) {
1162  if (HasEnforcementLiteral(ct)) return;
1163  if (ct.int_prod().exprs_size() != 2) return;
1164  auto* mapping = m->GetOrCreate<CpModelMapping>();
1165 
1166  // Constraint is z == x * y.
1167 
1168  AffineExpression z = mapping->Affine(ct.int_prod().target());
1169  AffineExpression x = mapping->Affine(ct.int_prod().exprs(0));
1170  AffineExpression y = mapping->Affine(ct.int_prod().exprs(1));
1171 
1172  IntegerTrail* const integer_trail = m->GetOrCreate<IntegerTrail>();
1173  IntegerValue x_lb = integer_trail->LowerBound(x);
1174  IntegerValue x_ub = integer_trail->UpperBound(x);
1175  IntegerValue y_lb = integer_trail->LowerBound(y);
1176  IntegerValue y_ub = integer_trail->UpperBound(y);
1177 
1178  if (x == y) {
1179  // We currently only support variables with non-negative domains.
1180  if (x_lb < 0 && x_ub > 0) return;
1181 
1182  // Change the sigh of x if its domain is non-positive.
1183  if (x_ub <= 0) {
1184  x = x.Negated();
1185  }
1186 
1187  relaxation->cut_generators.push_back(
1188  CreateSquareCutGenerator(z, x, linearization_level, m));
1189  } else {
1190  // We currently only support variables with non-negative domains.
1191  if (x_lb < 0 && x_ub > 0) return;
1192  if (y_lb < 0 && y_ub > 0) return;
1193 
1194  // Change signs to return to the case where all variables are a domain
1195  // with non negative values only.
1196  if (x_ub <= 0) {
1197  x = x.Negated();
1198  z = z.Negated();
1199  }
1200  if (y_ub <= 0) {
1201  y = y.Negated();
1202  z = z.Negated();
1203  }
1204 
1205  relaxation->cut_generators.push_back(
1206  CreatePositiveMultiplicationCutGenerator(z, x, y, linearization_level,
1207  m));
1208  }
1209 }
1210 
1212  LinearRelaxation* relaxation) {
1213  if (HasEnforcementLiteral(ct)) return;
1214  auto* mapping = m->GetOrCreate<CpModelMapping>();
1215  const int num_exprs = ct.all_diff().exprs_size();
1216 
1217  if (num_exprs <= m->GetOrCreate<SatParameters>()->max_all_diff_cut_size()) {
1218  std::vector<AffineExpression> exprs(num_exprs);
1219  for (const LinearExpressionProto& expr : ct.all_diff().exprs()) {
1220  exprs.push_back(mapping->Affine(expr));
1221  }
1222  relaxation->cut_generators.push_back(
1223  CreateAllDifferentCutGenerator(exprs, m));
1224  }
1225 }
1226 
1227 bool IntervalIsVariable(const IntervalVariable interval,
1228  IntervalsRepository* intervals_repository) {
1229  // Ignore absent rectangles.
1230  if (intervals_repository->IsAbsent(interval)) {
1231  return false;
1232  }
1233 
1234  // Checks non-present intervals.
1235  if (!intervals_repository->IsPresent(interval)) {
1236  return true;
1237  }
1238 
1239  // Checks variable sized intervals.
1240  if (intervals_repository->MinSize(interval) !=
1241  intervals_repository->MaxSize(interval)) {
1242  return true;
1243  }
1244 
1245  return false;
1246 }
1247 
1249  LinearRelaxation* relaxation) {
1250  if (HasEnforcementLiteral(ct)) return;
1251  auto* mapping = m->GetOrCreate<CpModelMapping>();
1252 
1253  const std::vector<IntervalVariable> intervals =
1254  mapping->Intervals(ct.cumulative().intervals());
1255  const AffineExpression capacity = mapping->Affine(ct.cumulative().capacity());
1256 
1257  // Scan energies.
1258  IntervalsRepository* intervals_repository =
1260 
1261  std::vector<LinearExpression> energies;
1262  std::vector<AffineExpression> demands;
1263  std::vector<AffineExpression> sizes;
1264  for (int i = 0; i < intervals.size(); ++i) {
1265  demands.push_back(mapping->Affine(ct.cumulative().demands(i)));
1266  sizes.push_back(intervals_repository->Size(intervals[i]));
1267  }
1268  LinearizeInnerProduct(demands, sizes, m, &energies);
1269 
1270  relaxation->cut_generators.push_back(
1271  CreateCumulativeTimeTableCutGenerator(intervals, capacity, demands, m));
1272  relaxation->cut_generators.push_back(
1274  energies, m));
1275  relaxation->cut_generators.push_back(
1276  CreateCumulativePrecedenceCutGenerator(intervals, capacity, demands, m));
1277 
1278  // Checks if at least one rectangle has a variable size, is optional, or if
1279  // the demand if variable.
1280  bool has_variable_part = false;
1281  IntegerTrail* integer_trail = m->GetOrCreate<IntegerTrail>();
1282  for (int i = 0; i < intervals.size(); ++i) {
1283  if (IntervalIsVariable(intervals[i], intervals_repository)) {
1284  has_variable_part = true;
1285  break;
1286  }
1287  // Checks variable demand.
1288  if (!integer_trail->IsFixed(demands[i])) {
1289  has_variable_part = true;
1290  break;
1291  }
1292  }
1293  if (has_variable_part) {
1294  relaxation->cut_generators.push_back(CreateCumulativeEnergyCutGenerator(
1295  intervals, capacity, demands, energies, m));
1296  }
1297 }
1298 
1300  LinearRelaxation* relaxation) {
1301  if (HasEnforcementLiteral(ct)) return;
1302 
1303  auto* mapping = m->GetOrCreate<CpModelMapping>();
1304  std::vector<IntervalVariable> intervals =
1305  mapping->Intervals(ct.no_overlap().intervals());
1306  relaxation->cut_generators.push_back(
1308  relaxation->cut_generators.push_back(
1310 
1311  // Checks if at least one rectangle has a variable size or is optional.
1312  IntervalsRepository* intervals_repository =
1314  bool has_variable_part = false;
1315  for (int i = 0; i < intervals.size(); ++i) {
1316  if (IntervalIsVariable(intervals[i], intervals_repository)) {
1317  has_variable_part = true;
1318  break;
1319  }
1320  }
1321  if (has_variable_part) {
1322  relaxation->cut_generators.push_back(
1323  CreateNoOverlapEnergyCutGenerator(intervals, m));
1324  }
1325 }
1326 
1328  LinearRelaxation* relaxation) {
1329  if (HasEnforcementLiteral(ct)) return;
1330 
1331  auto* mapping = m->GetOrCreate<CpModelMapping>();
1332  std::vector<IntervalVariable> x_intervals =
1333  mapping->Intervals(ct.no_overlap_2d().x_intervals());
1334  std::vector<IntervalVariable> y_intervals =
1335  mapping->Intervals(ct.no_overlap_2d().y_intervals());
1336  relaxation->cut_generators.push_back(
1337  CreateNoOverlap2dCompletionTimeCutGenerator(x_intervals, y_intervals, m));
1338 
1339  // Checks if at least one rectangle has a variable dimension or is optional.
1340  IntervalsRepository* intervals_repository =
1342  bool has_variable_part = false;
1343  for (int i = 0; i < x_intervals.size(); ++i) {
1344  // Ignore absent rectangles.
1345  if (intervals_repository->IsAbsent(x_intervals[i]) ||
1346  intervals_repository->IsAbsent(y_intervals[i])) {
1347  continue;
1348  }
1349 
1350  // Checks non-present intervals.
1351  if (!intervals_repository->IsPresent(x_intervals[i]) ||
1352  !intervals_repository->IsPresent(y_intervals[i])) {
1353  has_variable_part = true;
1354  break;
1355  }
1356 
1357  // Checks variable sized intervals.
1358  if (intervals_repository->MinSize(x_intervals[i]) !=
1359  intervals_repository->MaxSize(x_intervals[i]) ||
1360  intervals_repository->MinSize(y_intervals[i]) !=
1361  intervals_repository->MaxSize(y_intervals[i])) {
1362  has_variable_part = true;
1363  break;
1364  }
1365  }
1366  if (has_variable_part) {
1367  relaxation->cut_generators.push_back(
1368  CreateNoOverlap2dEnergyCutGenerator(x_intervals, y_intervals, m));
1369  }
1370 }
1371 
1373  LinearRelaxation* relaxation) {
1374  if (!m->GetOrCreate<SatParameters>()->add_lin_max_cuts()) return;
1375  if (HasEnforcementLiteral(ct)) return;
1376 
1377  // TODO(user): Support linearization of general target expression.
1378  auto* mapping = m->GetOrCreate<CpModelMapping>();
1379  if (ct.lin_max().target().vars_size() != 1) return;
1380  if (ct.lin_max().target().coeffs(0) != 1) return;
1381  if (ct.lin_max().target().offset() != 0) return;
1382 
1383  const IntegerVariable target =
1384  mapping->Integer(ct.lin_max().target().vars(0));
1385  std::vector<LinearExpression> exprs;
1386  exprs.reserve(ct.lin_max().exprs_size());
1387  for (int i = 0; i < ct.lin_max().exprs_size(); ++i) {
1388  // Note: Cut generator requires all expressions to contain only positive
1389  // vars.
1390  exprs.push_back(
1391  PositiveVarExpr(mapping->GetExprFromProto(ct.lin_max().exprs(i))));
1392  }
1393 
1394  const std::vector<Literal> alternative_literals =
1395  CreateAlternativeLiteralsWithView(exprs.size(), m, relaxation);
1396 
1397  // TODO(user): Move this out of here.
1398  //
1399  // Add initial big-M linear relaxation.
1400  // z_vars[i] == 1 <=> target = exprs[i].
1401  AppendLinMaxRelaxationPart2(target, alternative_literals, exprs, m,
1402  relaxation);
1403 
1404  std::vector<IntegerVariable> z_vars;
1405  auto* encoder = m->GetOrCreate<IntegerEncoder>();
1406  for (const Literal lit : alternative_literals) {
1407  z_vars.push_back(encoder->GetLiteralView(lit));
1408  CHECK_NE(z_vars.back(), kNoIntegerVariable);
1409  }
1410  relaxation->cut_generators.push_back(
1411  CreateLinMaxCutGenerator(target, exprs, z_vars, m));
1412 }
1413 
1414 // If we have an exactly one between literals l_i, and each l_i => var ==
1415 // value_i, then we can add a strong linear relaxation: var = sum l_i * value_i.
1416 //
1417 // This codes detect this and add the corresponding linear equations.
1418 //
1419 // TODO(user): We can do something similar with just an at most one, however
1420 // it is harder to detect that if all literal are false then none of the implied
1421 // value can be taken.
1423  LinearRelaxation* relaxation) {
1424  auto* implied_bounds = m->GetOrCreate<ImpliedBounds>();
1425 
1426  int num_exactly_one_elements = 0;
1427 
1428  for (const IntegerVariable var :
1429  implied_bounds->GetElementEncodedVariables()) {
1430  for (const auto& [index, literal_value_list] :
1431  implied_bounds->GetElementEncodings(var)) {
1432  // We only want to deal with the case with duplicate values, because
1433  // otherwise, the target will be fully encoded, and this is already
1434  // covered by another function.
1435  IntegerValue min_value = kMaxIntegerValue;
1436  {
1437  absl::flat_hash_set<IntegerValue> values;
1438  for (const auto& literal_value : literal_value_list) {
1439  min_value = std::min(min_value, literal_value.value);
1440  values.insert(literal_value.value);
1441  }
1442  if (values.size() == literal_value_list.size()) continue;
1443  }
1444 
1445  LinearConstraintBuilder linear_encoding(m, -min_value, -min_value);
1446  linear_encoding.AddTerm(var, IntegerValue(-1));
1447  for (const auto& [value, literal] : literal_value_list) {
1448  const IntegerValue delta_min = value - min_value;
1449  if (delta_min != 0) {
1450  // If the term has no view, we abort.
1451  if (!linear_encoding.AddLiteralTerm(literal, delta_min)) {
1452  return;
1453  }
1454  }
1455  }
1456  ++num_exactly_one_elements;
1457  relaxation->linear_constraints.push_back(linear_encoding.Build());
1458  }
1459  }
1460 
1461  if (num_exactly_one_elements != 0) {
1462  auto* logger = m->GetOrCreate<SolverLogger>();
1463  SOLVER_LOG(logger,
1464  "[ElementLinearRelaxation]"
1465  " #from_exactly_one:",
1466  num_exactly_one_elements);
1467  }
1468 }
1469 
1471  Model* m) {
1472  LinearRelaxation relaxation;
1473 
1474  // Linearize the constraints.
1475  const SatParameters& params = *m->GetOrCreate<SatParameters>();
1476  for (const auto& ct : model_proto.constraints()) {
1478  &relaxation);
1479  }
1480 
1481  // Linearize the encoding of variable that are fully encoded.
1482  int num_loose_equality_encoding_relaxations = 0;
1483  int num_tight_equality_encoding_relaxations = 0;
1484  int num_inequality_encoding_relaxations = 0;
1485  auto* mapping = m->GetOrCreate<CpModelMapping>();
1486  for (int i = 0; i < model_proto.variables_size(); ++i) {
1487  if (mapping->IsBoolean(i)) continue;
1488 
1489  const IntegerVariable var = mapping->Integer(i);
1490  if (m->Get(IsFixed(var))) continue;
1491 
1492  // We first try to linerize the values encoding.
1494  var, *m, &relaxation, &num_tight_equality_encoding_relaxations,
1495  &num_loose_equality_encoding_relaxations);
1496 
1497  // The we try to linearize the inequality encoding. Note that on some
1498  // problem like pizza27i.mps.gz, adding both equality and inequality
1499  // encoding is a must.
1500  //
1501  // Even if the variable is fully encoded, sometimes not all its associated
1502  // literal have a view (if they are not part of the original model for
1503  // instance).
1504  //
1505  // TODO(user): Should we add them to the LP anyway? this isn't clear as
1506  // we can sometimes create a lot of Booleans like this.
1507  const int old = relaxation.linear_constraints.size();
1509  if (relaxation.linear_constraints.size() > old) {
1510  ++num_inequality_encoding_relaxations;
1511  }
1512  }
1513 
1514  // TODO(user): This is similar to AppendRelaxationForEqualityEncoding() above.
1515  // Investigate if we can merge the code.
1516  if (params.linearization_level() >= 2) {
1518  }
1519 
1520  // TODO(user): I am not sure this is still needed. Investigate and explain why
1521  // or remove.
1522  if (!m->GetOrCreate<SatSolver>()->FinishPropagation()) {
1523  return relaxation;
1524  }
1525 
1526  // We display the stats before linearizing the at most ones.
1527  auto* logger = m->GetOrCreate<SolverLogger>();
1528  if (num_tight_equality_encoding_relaxations != 0 ||
1529  num_loose_equality_encoding_relaxations != 0 ||
1530  num_inequality_encoding_relaxations != 0) {
1531  SOLVER_LOG(logger,
1532  "[EncodingLinearRelaxation]"
1533  " #tight_equality:",
1534  num_tight_equality_encoding_relaxations,
1535  " #loose_equality:", num_loose_equality_encoding_relaxations,
1536  " #inequality:", num_inequality_encoding_relaxations);
1537  }
1538  if (!relaxation.linear_constraints.empty() ||
1539  !relaxation.at_most_ones.empty()) {
1540  SOLVER_LOG(logger,
1541  "[LinearRelaxationBeforeCliqueExpansion]"
1542  " #linear:",
1543  relaxation.linear_constraints.size(),
1544  " #at_most_ones:", relaxation.at_most_ones.size());
1545  }
1546 
1547  // Linearize the at most one constraints. Note that we transform them
1548  // into maximum "at most one" first and we removes redundant ones.
1549  m->GetOrCreate<BinaryImplicationGraph>()->TransformIntoMaxCliques(
1550  &relaxation.at_most_ones, params.merge_at_most_one_work_limit());
1551  for (const std::vector<Literal>& at_most_one : relaxation.at_most_ones) {
1552  if (at_most_one.empty()) continue;
1553 
1554  LinearConstraintBuilder lc(m, kMinIntegerValue, IntegerValue(1));
1555  for (const Literal literal : at_most_one) {
1556  // Note that it is okay to simply ignore the literal if it has no
1557  // integer view.
1558  const bool unused ABSL_ATTRIBUTE_UNUSED =
1559  lc.AddLiteralTerm(literal, IntegerValue(1));
1560  }
1561  relaxation.linear_constraints.push_back(lc.Build());
1562  }
1563 
1564  // We converted all at_most_one to LP constraints, so we need to clear them
1565  // so that we don't do extra work in the connected component computation.
1566  relaxation.at_most_ones.clear();
1567 
1568  // Remove size one LP constraints, they are not useful.
1569  relaxation.linear_constraints.erase(
1570  std::remove_if(
1571  relaxation.linear_constraints.begin(),
1572  relaxation.linear_constraints.end(),
1573  [](const LinearConstraint& lc) { return lc.vars.size() <= 1; }),
1574  relaxation.linear_constraints.end());
1575 
1576  // We add a clique cut generation over all Booleans of the problem.
1577  // Note that in practice this might regroup independent LP together.
1578  //
1579  // TODO(user): compute connected components of the original problem and
1580  // split these cuts accordingly.
1581  if (params.linearization_level() > 1 && params.add_clique_cuts()) {
1582  LinearConstraintBuilder builder(m);
1583  for (int i = 0; i < model_proto.variables_size(); ++i) {
1584  if (!mapping->IsBoolean(i)) continue;
1585 
1586  // Note that it is okay to simply ignore the literal if it has no
1587  // integer view.
1588  const bool unused ABSL_ATTRIBUTE_UNUSED =
1589  builder.AddLiteralTerm(mapping->Literal(i), IntegerValue(1));
1590  }
1591 
1592  // We add a generator touching all the variable in the builder.
1593  const LinearExpression& expr = builder.BuildExpression();
1594  if (!expr.vars.empty()) {
1595  relaxation.cut_generators.push_back(
1596  CreateCliqueCutGenerator(expr.vars, m));
1597  }
1598  }
1599 
1600  if (!relaxation.linear_constraints.empty() ||
1601  !relaxation.cut_generators.empty()) {
1602  SOLVER_LOG(logger,
1603  "[FinalLinearRelaxation]"
1604  " #linear:",
1605  relaxation.linear_constraints.size(),
1606  " #cut_generators:", relaxation.cut_generators.size());
1607  }
1608 
1609  return relaxation;
1610 }
1611 
1612 } // namespace sat
1613 } // namespace operations_research
int64_t head
#define CHECK(condition)
Definition: base/logging.h:495
CutGenerator CreateNoOverlapEnergyCutGenerator(const std::vector< IntervalVariable > &intervals, Model *model)
int64_t CapSub(int64_t x, int64_t y)
bool IsPresent(IntervalVariable i) const
Definition: intervals.h:78
std::function< BooleanVariable(Model *)> NewBooleanVariable()
Definition: integer.h:1598
AffineExpression Negated() const
Definition: integer.h:252
IntegerVariable AddIntegerVariable(IntegerValue lower_bound, IntegerValue upper_bound)
Definition: integer.cc:622
void AddAllDiffCutGenerator(const ConstraintProto &ct, Model *m, LinearRelaxation *relaxation)
IntegerValue GetCoefficient(const IntegerVariable var, const LinearExpression &expr)
int64_t min
Definition: alldiff_cst.cc:139
void AppendLinMaxRelaxationPart1(const ConstraintProto &ct, Model *model, LinearRelaxation *relaxation)
#define SOLVER_LOG(logger,...)
Definition: util/logging.h:69
#define CHECK_GE(val1, val2)
Definition: base/logging.h:706
IntegerValue LinExprLowerBound(const LinearExpression &expr, const IntegerTrail &integer_trail)
void AddIntProdCutGenerator(const ConstraintProto &ct, int linearization_level, Model *m, LinearRelaxation *relaxation)
Class that owns everything related to a particular optimization model.
Definition: sat/model.h:38
Literal(int signed_value)
Definition: sat_base.h:70
bool IsAbsent(IntervalVariable i) const
Definition: intervals.h:82
constexpr IntegerValue kMinIntegerValue(-kMaxIntegerValue)
LinearConstraint BuildMaxAffineUpConstraint(const LinearExpression &target, IntegerVariable var, const std::vector< std::pair< IntegerValue, IntegerValue >> &affines, Model *model)
Definition: cuts.cc:2054
void AppendCircuitRelaxation(const ConstraintProto &ct, Model *model, LinearRelaxation *relaxation)
void AppendElementEncodingRelaxation(const CpModelProto &model_proto, Model *m, LinearRelaxation *relaxation)
#define VLOG(verboselevel)
Definition: base/logging.h:983
T * GetOrCreate()
Returns an object of type T that is unique to this model (like a "local" singleton).
Definition: sat/model.h:106
AffineExpression Size(IntervalVariable i) const
Definition: intervals.h:94
IntegerValue LowerBound(IntegerVariable i) const
Definition: integer.h:1435
CutGenerator CreateMaxAffineCutGenerator(LinearExpression target, IntegerVariable var, std::vector< std::pair< IntegerValue, IntegerValue >> affines, const std::string cut_name, Model *model)
Definition: cuts.cc:2090
void AddLinearExpression(const LinearExpression &expr)
GRBmodel * model
LinearExpression PositiveVarExpr(const LinearExpression &expr)
void AddTerm(IntegerVariable var, IntegerValue coeff)
int64_t CapProd(int64_t x, int64_t y)
std::function< IntegerVariable(Model *)> NewIntegerVariableFromLiteral(Literal lit)
Definition: integer.h:1630
std::vector< Literal > CreateAlternativeLiteralsWithView(int num_literals, Model *model, LinearRelaxation *relaxation)
ABSL_MUST_USE_RESULT bool AddLiteralTerm(Literal lit, IntegerValue coeff)
void AppendAtMostOneRelaxation(const ConstraintProto &ct, Model *model, LinearRelaxation *relaxation)
void STLSortAndRemoveDuplicates(T *v, const LessFunc &less_func)
Definition: stl_util.h:58
bool DetectLinearEncodingOfProducts(const AffineExpression &left, const AffineExpression &right, Model *model, LinearConstraintBuilder *builder)
#define DCHECK_GT(val1, val2)
Definition: base/logging.h:895
CutGenerator CreateCVRPCutGenerator(int num_nodes, const std::vector< int > &tails, const std::vector< int > &heads, const std::vector< Literal > &literals, const std::vector< int64_t > &demands, int64_t capacity, Model *model)
constexpr IntegerValue kMaxIntegerValue(std::numeric_limits< IntegerValue::ValueType >::max() - 1)
int64_t tail
void AddCumulativeRelaxation(const std::vector< IntervalVariable > &x_intervals, SchedulingConstraintHelper *x, SchedulingConstraintHelper *y, Model *model)
Definition: sat/diffn.cc:80
CutGenerator CreateNoOverlap2dEnergyCutGenerator(const std::vector< IntervalVariable > &x_intervals, const std::vector< IntervalVariable > &y_intervals, Model *model)
CutGenerator CreatePositiveMultiplicationCutGenerator(AffineExpression z, AffineExpression x, AffineExpression y, int linearization_level, Model *model)
Definition: cuts.cc:1348
LinearExpression CanonicalizeExpr(const LinearExpression &expr)
IntegerVariable PositiveVariable(IntegerVariable i)
Definition: integer.h:143
std::function< IntegerVariable(Model *)> NewIntegerVariable(int64_t lb, int64_t ub)
Definition: integer.h:1612
CutGenerator CreateLinMaxCutGenerator(const IntegerVariable target, const std::vector< LinearExpression > &exprs, const std::vector< IntegerVariable > &z_vars, Model *model)
Definition: cuts.cc:1970
int ReindexArcs(IntContainer *tails, IntContainer *heads)
Definition: circuit.h:168
void AppendNoOverlap2dRelaxation(const ConstraintProto &ct, Model *model, LinearRelaxation *relaxation)
int64_t max
Definition: alldiff_cst.cc:140
void AddCumulativeCutGenerator(const ConstraintProto &ct, Model *m, LinearRelaxation *relaxation)
std::vector< std::vector< Literal > > at_most_ones
double upper_bound
void AddRoutesCutGenerator(const ConstraintProto &ct, Model *m, LinearRelaxation *relaxation)
void AppendRelaxationForEqualityEncoding(IntegerVariable var, const Model &model, LinearRelaxation *relaxation, int *num_tight, int *num_loose)
std::function< IntervalVariable(Model *)> NewInterval(int64_t min_start, int64_t max_end, int64_t size)
Definition: intervals.h:666
T Get(std::function< T(const Model &)> f) const
Similar to Add() but this is const.
Definition: sat/model.h:87
void AddNoOverlap2dCutGenerator(const ConstraintProto &ct, Model *m, LinearRelaxation *relaxation)
CutGenerator CreateAllDifferentCutGenerator(const std::vector< AffineExpression > &exprs, Model *model)
Definition: cuts.cc:1861
CutGenerator CreateStronglyConnectedGraphCutGenerator(int num_nodes, const std::vector< int > &tails, const std::vector< int > &heads, const std::vector< Literal > &literals, Model *model)
void AppendPartialGreaterThanEncodingRelaxation(IntegerVariable var, const Model &model, LinearRelaxation *relaxation)
void AppendLinearConstraintRelaxation(const ConstraintProto &ct, bool linearize_enforced_constraints, Model *model, LinearRelaxation *relaxation)
const ::operations_research::sat::ConstraintProto & constraints(int index) const
CutGenerator CreateSquareCutGenerator(AffineExpression y, AffineExpression x, int linearization_level, Model *model)
Definition: cuts.cc:1437
IntegerValue LevelZeroUpperBound(IntegerVariable var) const
Definition: integer.h:1524
CutGenerator CreateCliqueCutGenerator(const std::vector< IntegerVariable > &base_variables, Model *model)
Definition: cuts.cc:2112
std::function< bool(const Model &)> IsFixed(IntegerVariable v)
Definition: integer.h:1665
void AppendBoolAndRelaxation(const ConstraintProto &ct, Model *model, LinearRelaxation *relaxation)
std::vector< ValueLiteralPair > FullDomainEncoding(IntegerVariable var) const
Definition: integer.cc:114
int64_t capacity
int index
Definition: pack.cc:509
std::function< IntervalVariable(Model *)> NewOptionalInterval(int64_t min_start, int64_t max_end, int64_t size, Literal is_present)
Definition: intervals.h:696
bool VariableIsPositive(IntegerVariable i)
Definition: integer.h:139
#define DCHECK_GE(val1, val2)
Definition: base/logging.h:894
CutGenerator CreateNoOverlapPrecedenceCutGenerator(const std::vector< IntervalVariable > &intervals, Model *model)
void TryToLinearizeConstraint(const CpModelProto &model_proto, const ConstraintProto &ct, int linearization_level, Model *model, LinearRelaxation *relaxation)
void AddNoOverlapCutGenerator(const ConstraintProto &ct, Model *m, LinearRelaxation *relaxation)
#define CHECK_EQ(val1, val2)
Definition: base/logging.h:702
void AppendExactlyOneRelaxation(const ConstraintProto &ct, Model *model, LinearRelaxation *relaxation)
std::function< int64_t(const Model &)> UpperBound(IntegerVariable v)
Definition: integer.h:1659
void AppendMaxAffineRelaxation(const ConstraintProto &ct, Model *model, LinearRelaxation *relaxation)
int64_t delta
Definition: resource.cc:1692
void AppendBoolOrRelaxation(const ConstraintProto &ct, Model *model, LinearRelaxation *relaxation)
IntegerValue LinExprUpperBound(const LinearExpression &expr, const IntegerTrail &integer_trail)
CpModelProto const * model_proto
std::vector< IntegerVariable > NegationOf(const std::vector< IntegerVariable > &vars)
Definition: integer.cc:30
#define DCHECK(condition)
Definition: base/logging.h:889
void AppendNoOverlapRelaxation(const CpModelProto &model_proto, const ConstraintProto &ct, Model *model, LinearRelaxation *relaxation)
void LinearizeInnerProduct(const std::vector< AffineExpression > &left, const std::vector< AffineExpression > &right, Model *model, std::vector< LinearExpression > *energies)
CutGenerator CreateCumulativeTimeTableCutGenerator(const std::vector< IntervalVariable > &intervals, const AffineExpression &capacity, const std::vector< AffineExpression > &demands, Model *model)
void AddCircuitCutGenerator(const ConstraintProto &ct, Model *m, LinearRelaxation *relaxation)
std::function< int64_t(const Model &)> LowerBound(IntegerVariable v)
Definition: integer.h:1653
bool HasEnforcementLiteral(const ConstraintProto &ct)
std::vector< CutGenerator > cut_generators
CutGenerator CreateCumulativeEnergyCutGenerator(const std::vector< IntervalVariable > &intervals, const AffineExpression &capacity, const std::vector< AffineExpression > &demands, const std::vector< LinearExpression > &energies, Model *model)
IntegerValue UpperBound(IntegerVariable i) const
Definition: integer.h:1439
CutGenerator CreateCumulativeCompletionTimeCutGenerator(const std::vector< IntervalVariable > &intervals, const AffineExpression &capacity, const std::vector< AffineExpression > &demands, const std::vector< LinearExpression > &energies, Model *model)
std::vector< IntervalVariable > Intervals(const ProtoIndices &indices) const
Collection of objects used to extend the Constraint Solver library.
CutGenerator CreateNoOverlap2dCompletionTimeCutGenerator(const std::vector< IntervalVariable > &x_intervals, const std::vector< IntervalVariable > &y_intervals, Model *model)
const IntegerVariable kNoIntegerVariable(-1)
void AppendRoutesRelaxation(const ConstraintProto &ct, Model *model, LinearRelaxation *relaxation)
IntegerValue MaxSize(IntervalVariable i) const
Definition: intervals.h:127
bool IntervalIsVariable(const IntervalVariable interval, IntervalsRepository *intervals_repository)
IntegerValue MinSize(IntervalVariable i) const
Definition: intervals.h:122
bool RefIsPositive(int ref)
std::function< void(Model *)> SpanOfIntervals(IntervalVariable span, const std::vector< IntervalVariable > &intervals)
const LiteralIndex kNoLiteralIndex(-1)
IntVar * var
Definition: expr_array.cc:1874
bool AppendFullEncodingRelaxation(IntegerVariable var, const Model &model, LinearRelaxation *relaxation)
void AppendLinMaxRelaxationPart2(IntegerVariable target, const std::vector< Literal > &alternative_literals, const std::vector< LinearExpression > &exprs, Model *model, LinearRelaxation *relaxation)
std::vector< LinearConstraint > linear_constraints
std::function< void(Model *)> ExactlyOneConstraint(const std::vector< Literal > &literals)
Definition: sat_solver.h:878
CutGenerator CreateNoOverlapCompletionTimeCutGenerator(const std::vector< IntervalVariable > &intervals, Model *model)
void AddLinMaxCutGenerator(const ConstraintProto &ct, Model *m, LinearRelaxation *relaxation)
LinearRelaxation ComputeLinearRelaxation(const CpModelProto &model_proto, Model *m)
IntegerVariable Integer(int ref) const
IntegerValue LevelZeroLowerBound(IntegerVariable var) const
Definition: integer.h:1519
int64_t value
Literal literal
Definition: optimization.cc:85
IntervalVar * interval
Definition: resource.cc:100
#define CHECK_NE(val1, val2)
Definition: base/logging.h:703
bool IsFixed(IntegerVariable i) const
Definition: integer.h:1443
const Constraint * ct
void AddQuadraticLowerBound(AffineExpression left, AffineExpression right, IntegerTrail *integer_trail)
void AddMaxAffineCutGenerator(const ConstraintProto &ct, Model *model, LinearRelaxation *relaxation)
void AppendCumulativeRelaxation(const CpModelProto &model_proto, const ConstraintProto &ct, Model *model, LinearRelaxation *relaxation)
CutGenerator CreateCumulativePrecedenceCutGenerator(const std::vector< IntervalVariable > &intervals, const AffineExpression &capacity, const std::vector< AffineExpression > &demands, Model *model)