OR-Tools  8.1
linear_programming_constraint.cc
Go to the documentation of this file.
1 // Copyright 2010-2018 Google LLC
2 // Licensed under the Apache License, Version 2.0 (the "License");
3 // you may not use this file except in compliance with the License.
4 // You may obtain a copy of the License at
5 //
6 // http://www.apache.org/licenses/LICENSE-2.0
7 //
8 // Unless required by applicable law or agreed to in writing, software
9 // distributed under the License is distributed on an "AS IS" BASIS,
10 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 // See the License for the specific language governing permissions and
12 // limitations under the License.
13 
15 
16 #include <algorithm>
17 #include <cmath>
18 #include <iterator>
19 #include <limits>
20 #include <string>
21 #include <utility>
22 #include <vector>
23 
24 #include "absl/container/flat_hash_map.h"
25 #include "absl/numeric/int128.h"
28 #include "ortools/base/logging.h"
29 #include "ortools/base/map_util.h"
30 #include "ortools/base/mathutil.h"
31 #include "ortools/base/stl_util.h"
35 #include "ortools/glop/status.h"
39 #include "ortools/sat/integer.h"
42 
43 namespace operations_research {
44 namespace sat {
45 
46 using glop::ColIndex;
47 using glop::Fractional;
48 using glop::RowIndex;
49 
50 const double LinearProgrammingConstraint::kCpEpsilon = 1e-4;
51 const double LinearProgrammingConstraint::kLpEpsilon = 1e-6;
52 
54  if (is_sparse_) {
55  for (const glop::ColIndex col : non_zeros_) {
56  dense_vector_[col] = IntegerValue(0);
57  }
58  dense_vector_.resize(size, IntegerValue(0));
59  } else {
60  dense_vector_.assign(size, IntegerValue(0));
61  }
62  for (const glop::ColIndex col : non_zeros_) {
63  is_zeros_[col] = true;
64  }
65  is_zeros_.resize(size, true);
66  non_zeros_.clear();
67  is_sparse_ = true;
68 }
69 
70 bool ScatteredIntegerVector::Add(glop::ColIndex col, IntegerValue value) {
71  const int64 add = CapAdd(value.value(), dense_vector_[col].value());
72  if (add == kint64min || add == kint64max) return false;
73  dense_vector_[col] = IntegerValue(add);
74  if (is_sparse_ && is_zeros_[col]) {
75  is_zeros_[col] = false;
76  non_zeros_.push_back(col);
77  }
78  return true;
79 }
80 
82  IntegerValue multiplier,
83  const std::vector<std::pair<glop::ColIndex, IntegerValue>>& terms) {
84  const double threshold = 0.1 * static_cast<double>(dense_vector_.size());
85  if (is_sparse_ && static_cast<double>(terms.size()) < threshold) {
86  for (const std::pair<glop::ColIndex, IntegerValue> term : terms) {
87  if (is_zeros_[term.first]) {
88  is_zeros_[term.first] = false;
89  non_zeros_.push_back(term.first);
90  }
91  if (!AddProductTo(multiplier, term.second, &dense_vector_[term.first])) {
92  return false;
93  }
94  }
95  if (static_cast<double>(non_zeros_.size()) < threshold) {
96  is_sparse_ = false;
97  }
98  } else {
99  is_sparse_ = false;
100  for (const std::pair<glop::ColIndex, IntegerValue> term : terms) {
101  if (!AddProductTo(multiplier, term.second, &dense_vector_[term.first])) {
102  return false;
103  }
104  }
105  }
106  return true;
107 }
108 
110  const std::vector<IntegerVariable>& integer_variables,
111  IntegerValue upper_bound, LinearConstraint* result) {
112  result->vars.clear();
113  result->coeffs.clear();
114  if (is_sparse_) {
115  std::sort(non_zeros_.begin(), non_zeros_.end());
116  for (const glop::ColIndex col : non_zeros_) {
117  const IntegerValue coeff = dense_vector_[col];
118  if (coeff == 0) continue;
119  result->vars.push_back(integer_variables[col.value()]);
120  result->coeffs.push_back(coeff);
121  }
122  } else {
123  const int size = dense_vector_.size();
124  for (glop::ColIndex col(0); col < size; ++col) {
125  const IntegerValue coeff = dense_vector_[col];
126  if (coeff == 0) continue;
127  result->vars.push_back(integer_variables[col.value()]);
128  result->coeffs.push_back(coeff);
129  }
130  }
131  result->lb = kMinIntegerValue;
132  result->ub = upper_bound;
133 }
134 
135 std::vector<std::pair<glop::ColIndex, IntegerValue>>
137  std::vector<std::pair<glop::ColIndex, IntegerValue>> result;
138  if (is_sparse_) {
139  std::sort(non_zeros_.begin(), non_zeros_.end());
140  for (const glop::ColIndex col : non_zeros_) {
141  const IntegerValue coeff = dense_vector_[col];
142  if (coeff != 0) result.push_back({col, coeff});
143  }
144  } else {
145  const int size = dense_vector_.size();
146  for (glop::ColIndex col(0); col < size; ++col) {
147  const IntegerValue coeff = dense_vector_[col];
148  if (coeff != 0) result.push_back({col, coeff});
149  }
150  }
151  return result;
152 }
153 
154 // TODO(user): make SatParameters singleton too, otherwise changing them after
155 // a constraint was added will have no effect on this class.
157  : constraint_manager_(model),
158  sat_parameters_(*(model->GetOrCreate<SatParameters>())),
159  model_(model),
160  time_limit_(model->GetOrCreate<TimeLimit>()),
161  integer_trail_(model->GetOrCreate<IntegerTrail>()),
162  trail_(model->GetOrCreate<Trail>()),
163  integer_encoder_(model->GetOrCreate<IntegerEncoder>()),
164  random_(model->GetOrCreate<ModelRandomGenerator>()),
165  implied_bounds_processor_({}, integer_trail_,
166  model->GetOrCreate<ImpliedBounds>()),
167  dispatcher_(model->GetOrCreate<LinearProgrammingDispatcher>()),
168  expanded_lp_solution_(
169  *model->GetOrCreate<LinearProgrammingConstraintLpSolution>()) {
170  // Tweak the default parameters to make the solve incremental.
171  glop::GlopParameters parameters;
172  parameters.set_use_dual_simplex(true);
173  simplex_.SetParameters(parameters);
174  if (sat_parameters_.use_branching_in_lp() ||
175  sat_parameters_.search_branching() == SatParameters::LP_SEARCH) {
176  compute_reduced_cost_averages_ = true;
177  }
178 
179  // Register our local rev int repository.
180  integer_trail_->RegisterReversibleClass(&rc_rev_int_repository_);
181 }
182 
184  VLOG(1) << "Total number of simplex iterations: "
185  << total_num_simplex_iterations_;
186  for (int i = 0; i < num_solves_by_status_.size(); ++i) {
187  if (num_solves_by_status_[i] == 0) continue;
188  VLOG(1) << "#" << glop::ProblemStatus(i) << " : "
189  << num_solves_by_status_[i];
190  }
191 }
192 
194  const LinearConstraint& ct) {
195  DCHECK(!lp_constraint_is_registered_);
196  constraint_manager_.Add(ct);
197 
198  // We still create the mirror variable right away though.
199  //
200  // TODO(user): clean this up? Note that it is important that the variable
201  // in lp_data_ never changes though, so we can restart from the current
202  // lp solution and be incremental (even if the constraints changed).
203  for (const IntegerVariable var : ct.vars) {
204  GetOrCreateMirrorVariable(PositiveVariable(var));
205  }
206 }
207 
208 glop::ColIndex LinearProgrammingConstraint::GetOrCreateMirrorVariable(
209  IntegerVariable positive_variable) {
210  DCHECK(VariableIsPositive(positive_variable));
211  const auto it = mirror_lp_variable_.find(positive_variable);
212  if (it == mirror_lp_variable_.end()) {
213  const glop::ColIndex col(integer_variables_.size());
214  implied_bounds_processor_.AddLpVariable(positive_variable);
215  mirror_lp_variable_[positive_variable] = col;
216  integer_variables_.push_back(positive_variable);
217  lp_solution_.push_back(std::numeric_limits<double>::infinity());
218  lp_reduced_cost_.push_back(0.0);
219  (*dispatcher_)[positive_variable] = this;
220 
221  const int index = std::max(positive_variable.value(),
222  NegationOf(positive_variable).value());
223  if (index >= expanded_lp_solution_.size()) {
224  expanded_lp_solution_.resize(index + 1, 0.0);
225  }
226  return col;
227  }
228  return it->second;
229 }
230 
232  IntegerValue coeff) {
233  CHECK(!lp_constraint_is_registered_);
234  objective_is_defined_ = true;
235  IntegerVariable pos_var = VariableIsPositive(ivar) ? ivar : NegationOf(ivar);
236  if (ivar != pos_var) coeff = -coeff;
237 
238  constraint_manager_.SetObjectiveCoefficient(pos_var, coeff);
239  const glop::ColIndex col = GetOrCreateMirrorVariable(pos_var);
240  integer_objective_.push_back({col, coeff});
241  objective_infinity_norm_ =
242  std::max(objective_infinity_norm_, IntTypeAbs(coeff));
243 }
244 
245 // TODO(user): As the search progress, some variables might get fixed. Exploit
246 // this to reduce the number of variables in the LP and in the
247 // ConstraintManager? We might also detect during the search that two variable
248 // are equivalent.
249 //
250 // TODO(user): On TSP/VRP with a lot of cuts, this can take 20% of the overall
251 // running time. We should be able to almost remove most of this from the
252 // profile by being more incremental (modulo LP scaling).
253 //
254 // TODO(user): A longer term idea for LP with a lot of variables is to not
255 // add all variables to each LP solve and do some "sifting". That can be useful
256 // for TSP for instance where the number of edges is large, but only a small
257 // fraction will be used in the optimal solution.
258 bool LinearProgrammingConstraint::CreateLpFromConstraintManager() {
259  // Fill integer_lp_.
260  integer_lp_.clear();
261  infinity_norms_.clear();
262  const auto& all_constraints = constraint_manager_.AllConstraints();
263  for (const auto index : constraint_manager_.LpConstraints()) {
264  const LinearConstraint& ct = all_constraints[index].constraint;
265 
266  integer_lp_.push_back(LinearConstraintInternal());
267  LinearConstraintInternal& new_ct = integer_lp_.back();
268  new_ct.lb = ct.lb;
269  new_ct.ub = ct.ub;
270  const int size = ct.vars.size();
271  IntegerValue infinity_norm(0);
272  if (ct.lb > ct.ub) {
273  VLOG(1) << "Trivial infeasible bound in an LP constraint";
274  return false;
275  }
276  if (ct.lb > kMinIntegerValue) {
277  infinity_norm = std::max(infinity_norm, IntTypeAbs(ct.lb));
278  }
279  if (ct.ub < kMaxIntegerValue) {
280  infinity_norm = std::max(infinity_norm, IntTypeAbs(ct.ub));
281  }
282  for (int i = 0; i < size; ++i) {
283  // We only use positive variable inside this class.
284  IntegerVariable var = ct.vars[i];
285  IntegerValue coeff = ct.coeffs[i];
286  if (!VariableIsPositive(var)) {
287  var = NegationOf(var);
288  coeff = -coeff;
289  }
290  infinity_norm = std::max(infinity_norm, IntTypeAbs(coeff));
291  new_ct.terms.push_back({GetOrCreateMirrorVariable(var), coeff});
292  }
293  infinity_norms_.push_back(infinity_norm);
294 
295  // Important to keep lp_data_ "clean".
296  std::sort(new_ct.terms.begin(), new_ct.terms.end());
297  }
298 
299  // Copy the integer_lp_ into lp_data_.
300  lp_data_.Clear();
301  for (int i = 0; i < integer_variables_.size(); ++i) {
302  CHECK_EQ(glop::ColIndex(i), lp_data_.CreateNewVariable());
303  }
304 
305  // We remove fixed variables from the objective. This should help the LP
306  // scaling, but also our integer reason computation.
307  int new_size = 0;
308  objective_infinity_norm_ = 0;
309  for (const auto entry : integer_objective_) {
310  const IntegerVariable var = integer_variables_[entry.first.value()];
311  if (integer_trail_->IsFixedAtLevelZero(var)) {
312  integer_objective_offset_ +=
313  entry.second * integer_trail_->LevelZeroLowerBound(var);
314  continue;
315  }
316  objective_infinity_norm_ =
317  std::max(objective_infinity_norm_, IntTypeAbs(entry.second));
318  integer_objective_[new_size++] = entry;
319  lp_data_.SetObjectiveCoefficient(entry.first, ToDouble(entry.second));
320  }
321  objective_infinity_norm_ =
322  std::max(objective_infinity_norm_, IntTypeAbs(integer_objective_offset_));
323  integer_objective_.resize(new_size);
324  lp_data_.SetObjectiveOffset(ToDouble(integer_objective_offset_));
325 
326  for (const LinearConstraintInternal& ct : integer_lp_) {
327  const ConstraintIndex row = lp_data_.CreateNewConstraint();
328  lp_data_.SetConstraintBounds(row, ToDouble(ct.lb), ToDouble(ct.ub));
329  for (const auto& term : ct.terms) {
330  lp_data_.SetCoefficient(row, term.first, ToDouble(term.second));
331  }
332  }
333  lp_data_.NotifyThatColumnsAreClean();
334 
335  // We scale the LP using the level zero bounds that we later override
336  // with the current ones.
337  //
338  // TODO(user): As part of the scaling, we may also want to shift the initial
339  // variable bounds so that each variable contain the value zero in their
340  // domain. Maybe just once and for all at the beginning.
341  const int num_vars = integer_variables_.size();
342  for (int i = 0; i < num_vars; i++) {
343  const IntegerVariable cp_var = integer_variables_[i];
344  const double lb = ToDouble(integer_trail_->LevelZeroLowerBound(cp_var));
345  const double ub = ToDouble(integer_trail_->LevelZeroUpperBound(cp_var));
346  lp_data_.SetVariableBounds(glop::ColIndex(i), lb, ub);
347  }
348 
349  // TODO(user): As we have an idea of the LP optimal after the first solves,
350  // maybe we can adapt the scaling accordingly.
351  glop::GlopParameters params;
352  params.set_cost_scaling(glop::GlopParameters::MEAN_COST_SCALING);
353  scaler_.Scale(params, &lp_data_);
354  UpdateBoundsOfLpVariables();
355 
356  // Set the information for the step to polish the LP basis. All our variables
357  // are integer, but for now, we just try to minimize the fractionality of the
358  // binary variables.
359  if (sat_parameters_.polish_lp_solution()) {
360  simplex_.ClearIntegralityScales();
361  for (int i = 0; i < num_vars; ++i) {
362  const IntegerVariable cp_var = integer_variables_[i];
363  const IntegerValue lb = integer_trail_->LevelZeroLowerBound(cp_var);
364  const IntegerValue ub = integer_trail_->LevelZeroUpperBound(cp_var);
365  if (lb != 0 || ub != 1) continue;
366  simplex_.SetIntegralityScale(
367  glop::ColIndex(i),
368  1.0 / scaler_.VariableScalingFactor(glop::ColIndex(i)));
369  }
370  }
371 
372  lp_data_.NotifyThatColumnsAreClean();
373  lp_data_.AddSlackVariablesWhereNecessary(false);
374  VLOG(1) << "LP relaxation: " << lp_data_.GetDimensionString() << ". "
375  << constraint_manager_.AllConstraints().size()
376  << " Managed constraints.";
377  return true;
378 }
379 
380 LPSolveInfo LinearProgrammingConstraint::SolveLpForBranching() {
381  LPSolveInfo info;
382  glop::BasisState basis_state = simplex_.GetState();
383 
384  const glop::Status status = simplex_.Solve(lp_data_, time_limit_);
385  total_num_simplex_iterations_ += simplex_.GetNumberOfIterations();
386  simplex_.LoadStateForNextSolve(basis_state);
387  if (!status.ok()) {
388  VLOG(1) << "The LP solver encountered an error: " << status.error_message();
389  info.status = glop::ProblemStatus::ABNORMAL;
390  return info;
391  }
392  info.status = simplex_.GetProblemStatus();
393  if (info.status == glop::ProblemStatus::OPTIMAL ||
394  info.status == glop::ProblemStatus::DUAL_FEASIBLE) {
395  // Record the objective bound.
396  info.lp_objective = simplex_.GetObjectiveValue();
397  info.new_obj_bound = IntegerValue(
398  static_cast<int64>(std::ceil(info.lp_objective - kCpEpsilon)));
399  }
400  return info;
401 }
402 
403 void LinearProgrammingConstraint::FillReducedCostReasonIn(
404  const glop::DenseRow& reduced_costs,
405  std::vector<IntegerLiteral>* integer_reason) {
406  integer_reason->clear();
407  const int num_vars = integer_variables_.size();
408  for (int i = 0; i < num_vars; i++) {
409  const double rc = reduced_costs[glop::ColIndex(i)];
410  if (rc > kLpEpsilon) {
411  integer_reason->push_back(
412  integer_trail_->LowerBoundAsLiteral(integer_variables_[i]));
413  } else if (rc < -kLpEpsilon) {
414  integer_reason->push_back(
415  integer_trail_->UpperBoundAsLiteral(integer_variables_[i]));
416  }
417  }
418 
419  integer_trail_->RemoveLevelZeroBounds(integer_reason);
420 }
421 
422 bool LinearProgrammingConstraint::BranchOnVar(IntegerVariable positive_var) {
423  // From the current LP solution, branch on the given var if fractional.
424  DCHECK(lp_solution_is_set_);
425  const double current_value = GetSolutionValue(positive_var);
426  DCHECK_GT(std::abs(current_value - std::round(current_value)), kCpEpsilon);
427 
428  // Used as empty reason in this method.
429  integer_reason_.clear();
430 
431  bool deductions_were_made = false;
432 
433  UpdateBoundsOfLpVariables();
434 
435  const IntegerValue current_obj_lb = integer_trail_->LowerBound(objective_cp_);
436  // This will try to branch in both direction around the LP value of the
437  // given variable and push any deduction done this way.
438 
439  const glop::ColIndex lp_var = GetOrCreateMirrorVariable(positive_var);
440  const double current_lb = ToDouble(integer_trail_->LowerBound(positive_var));
441  const double current_ub = ToDouble(integer_trail_->UpperBound(positive_var));
442  const double factor = scaler_.VariableScalingFactor(lp_var);
443  if (current_value < current_lb || current_value > current_ub) {
444  return false;
445  }
446 
447  // Form LP1 var <= floor(current_value)
448  const double new_ub = std::floor(current_value);
449  lp_data_.SetVariableBounds(lp_var, current_lb * factor, new_ub * factor);
450 
451  LPSolveInfo lower_branch_info = SolveLpForBranching();
452  if (lower_branch_info.status != glop::ProblemStatus::OPTIMAL &&
453  lower_branch_info.status != glop::ProblemStatus::DUAL_FEASIBLE &&
454  lower_branch_info.status != glop::ProblemStatus::DUAL_UNBOUNDED) {
455  return false;
456  }
457 
458  if (lower_branch_info.status == glop::ProblemStatus::DUAL_UNBOUNDED) {
459  // Push the other branch.
460  const IntegerLiteral deduction = IntegerLiteral::GreaterOrEqual(
461  positive_var, IntegerValue(std::ceil(current_value)));
462  if (!integer_trail_->Enqueue(deduction, {}, integer_reason_)) {
463  return false;
464  }
465  deductions_were_made = true;
466  } else if (lower_branch_info.new_obj_bound <= current_obj_lb) {
467  return false;
468  }
469 
470  // Form LP2 var >= ceil(current_value)
471  const double new_lb = std::ceil(current_value);
472  lp_data_.SetVariableBounds(lp_var, new_lb * factor, current_ub * factor);
473 
474  LPSolveInfo upper_branch_info = SolveLpForBranching();
475  if (upper_branch_info.status != glop::ProblemStatus::OPTIMAL &&
476  upper_branch_info.status != glop::ProblemStatus::DUAL_FEASIBLE &&
477  upper_branch_info.status != glop::ProblemStatus::DUAL_UNBOUNDED) {
478  return deductions_were_made;
479  }
480 
481  if (upper_branch_info.status == glop::ProblemStatus::DUAL_UNBOUNDED) {
482  // Push the other branch if not infeasible.
483  if (lower_branch_info.status != glop::ProblemStatus::DUAL_UNBOUNDED) {
484  const IntegerLiteral deduction = IntegerLiteral::LowerOrEqual(
485  positive_var, IntegerValue(std::floor(current_value)));
486  if (!integer_trail_->Enqueue(deduction, {}, integer_reason_)) {
487  return deductions_were_made;
488  }
489  deductions_were_made = true;
490  }
491  } else if (upper_branch_info.new_obj_bound <= current_obj_lb) {
492  return deductions_were_made;
493  }
494 
495  IntegerValue approximate_obj_lb = kMinIntegerValue;
496 
497  if (lower_branch_info.status == glop::ProblemStatus::DUAL_UNBOUNDED &&
498  upper_branch_info.status == glop::ProblemStatus::DUAL_UNBOUNDED) {
499  return integer_trail_->ReportConflict(integer_reason_);
500  } else if (lower_branch_info.status == glop::ProblemStatus::DUAL_UNBOUNDED) {
501  approximate_obj_lb = upper_branch_info.new_obj_bound;
502  } else if (upper_branch_info.status == glop::ProblemStatus::DUAL_UNBOUNDED) {
503  approximate_obj_lb = lower_branch_info.new_obj_bound;
504  } else {
505  approximate_obj_lb = std::min(lower_branch_info.new_obj_bound,
506  upper_branch_info.new_obj_bound);
507  }
508 
509  // NOTE: On some problems, the approximate_obj_lb could be inexact which add
510  // some tolerance to CP-SAT where currently there is none.
511  if (approximate_obj_lb <= current_obj_lb) return deductions_were_made;
512 
513  // Push the bound to the trail.
514  const IntegerLiteral deduction =
515  IntegerLiteral::GreaterOrEqual(objective_cp_, approximate_obj_lb);
516  if (!integer_trail_->Enqueue(deduction, {}, integer_reason_)) {
517  return deductions_were_made;
518  }
519 
520  return true;
521 }
522 
524  DCHECK(!lp_constraint_is_registered_);
525  lp_constraint_is_registered_ = true;
526  model->GetOrCreate<LinearProgrammingConstraintCollection>()->push_back(this);
527 
528  // Note fdid, this is not really needed by should lead to better cache
529  // locality.
530  std::sort(integer_objective_.begin(), integer_objective_.end());
531 
532  // Set the LP to its initial content.
533  if (!sat_parameters_.add_lp_constraints_lazily()) {
534  constraint_manager_.AddAllConstraintsToLp();
535  }
536  if (!CreateLpFromConstraintManager()) {
537  model->GetOrCreate<SatSolver>()->NotifyThatModelIsUnsat();
538  return;
539  }
540 
541  GenericLiteralWatcher* watcher = model->GetOrCreate<GenericLiteralWatcher>();
542  const int watcher_id = watcher->Register(this);
543  const int num_vars = integer_variables_.size();
544  for (int i = 0; i < num_vars; i++) {
545  watcher->WatchIntegerVariable(integer_variables_[i], watcher_id, i);
546  }
547  if (objective_is_defined_) {
548  watcher->WatchUpperBound(objective_cp_, watcher_id);
549  }
550  watcher->SetPropagatorPriority(watcher_id, 2);
551  watcher->AlwaysCallAtLevelZero(watcher_id);
552 
553  // Registering it with the trail make sure this class is always in sync when
554  // it is used in the decision heuristics.
555  integer_trail_->RegisterReversibleClass(this);
556  watcher->RegisterReversibleInt(watcher_id, &rev_optimal_constraints_size_);
557 }
558 
560  optimal_constraints_.resize(rev_optimal_constraints_size_);
561  if (lp_solution_is_set_ && level < lp_solution_level_) {
562  lp_solution_is_set_ = false;
563  }
564 
565  // Special case for level zero, we "reload" any previously known optimal
566  // solution from that level.
567  //
568  // TODO(user): Keep all optimal solution in the current branch?
569  // TODO(user): Still try to add cuts/constraints though!
570  if (level == 0 && !level_zero_lp_solution_.empty()) {
571  lp_solution_is_set_ = true;
572  lp_solution_ = level_zero_lp_solution_;
573  lp_solution_level_ = 0;
574  for (int i = 0; i < lp_solution_.size(); i++) {
575  expanded_lp_solution_[integer_variables_[i]] = lp_solution_[i];
576  expanded_lp_solution_[NegationOf(integer_variables_[i])] =
577  -lp_solution_[i];
578  }
579  }
580 }
581 
583  for (const IntegerVariable var : generator.vars) {
584  GetOrCreateMirrorVariable(VariableIsPositive(var) ? var : NegationOf(var));
585  }
586  cut_generators_.push_back(std::move(generator));
587 }
588 
590  const std::vector<int>& watch_indices) {
591  if (!lp_solution_is_set_) return Propagate();
592 
593  // At level zero, if there is still a chance to add cuts or lazy constraints,
594  // we re-run the LP.
595  if (trail_->CurrentDecisionLevel() == 0 && !lp_at_level_zero_is_final_) {
596  return Propagate();
597  }
598 
599  // Check whether the change breaks the current LP solution. If it does, call
600  // Propagate() on the current LP.
601  for (const int index : watch_indices) {
602  const double lb =
603  ToDouble(integer_trail_->LowerBound(integer_variables_[index]));
604  const double ub =
605  ToDouble(integer_trail_->UpperBound(integer_variables_[index]));
606  const double value = lp_solution_[index];
607  if (value < lb - kCpEpsilon || value > ub + kCpEpsilon) return Propagate();
608  }
609 
610  // TODO(user): The saved lp solution is still valid given the current variable
611  // bounds, so the LP optimal didn't change. However we might still want to add
612  // new cuts or new lazy constraints?
613  //
614  // TODO(user): Propagate the last optimal_constraint? Note that we need
615  // to be careful since the reversible int in IntegerSumLE are not registered.
616  // However, because we delete "optimalconstraints" on backtrack, we might not
617  // care.
618  return true;
619 }
620 
621 glop::Fractional LinearProgrammingConstraint::GetVariableValueAtCpScale(
622  glop::ColIndex var) {
623  return scaler_.UnscaleVariableValue(var, simplex_.GetVariableValue(var));
624 }
625 
627  IntegerVariable variable) const {
628  return lp_solution_[gtl::FindOrDie(mirror_lp_variable_, variable).value()];
629 }
630 
632  IntegerVariable variable) const {
633  return lp_reduced_cost_[gtl::FindOrDie(mirror_lp_variable_, variable)
634  .value()];
635 }
636 
637 void LinearProgrammingConstraint::UpdateBoundsOfLpVariables() {
638  const int num_vars = integer_variables_.size();
639  for (int i = 0; i < num_vars; i++) {
640  const IntegerVariable cp_var = integer_variables_[i];
641  const double lb = ToDouble(integer_trail_->LowerBound(cp_var));
642  const double ub = ToDouble(integer_trail_->UpperBound(cp_var));
643  const double factor = scaler_.VariableScalingFactor(glop::ColIndex(i));
644  lp_data_.SetVariableBounds(glop::ColIndex(i), lb * factor, ub * factor);
645  }
646 }
647 
648 bool LinearProgrammingConstraint::SolveLp() {
649  if (trail_->CurrentDecisionLevel() == 0) {
650  lp_at_level_zero_is_final_ = false;
651  }
652 
653  const auto status = simplex_.Solve(lp_data_, time_limit_);
654  total_num_simplex_iterations_ += simplex_.GetNumberOfIterations();
655  if (!status.ok()) {
656  VLOG(1) << "The LP solver encountered an error: " << status.error_message();
657  simplex_.ClearStateForNextSolve();
658  return false;
659  }
660  average_degeneracy_.AddData(CalculateDegeneracy());
661  if (average_degeneracy_.CurrentAverage() >= 1000.0) {
662  VLOG(2) << "High average degeneracy: "
663  << average_degeneracy_.CurrentAverage();
664  }
665 
666  const int status_as_int = static_cast<int>(simplex_.GetProblemStatus());
667  if (status_as_int >= num_solves_by_status_.size()) {
668  num_solves_by_status_.resize(status_as_int + 1);
669  }
670  num_solves_by_status_[status_as_int]++;
671  VLOG(2) << "lvl:" << trail_->CurrentDecisionLevel() << " "
672  << simplex_.GetProblemStatus()
673  << " iter:" << simplex_.GetNumberOfIterations()
674  << " obj:" << simplex_.GetObjectiveValue();
675 
677  lp_solution_is_set_ = true;
678  lp_solution_level_ = trail_->CurrentDecisionLevel();
679  const int num_vars = integer_variables_.size();
680  for (int i = 0; i < num_vars; i++) {
681  const glop::Fractional value =
682  GetVariableValueAtCpScale(glop::ColIndex(i));
683  lp_solution_[i] = value;
684  expanded_lp_solution_[integer_variables_[i]] = value;
685  expanded_lp_solution_[NegationOf(integer_variables_[i])] = -value;
686  }
687 
688  if (lp_solution_level_ == 0) {
689  level_zero_lp_solution_ = lp_solution_;
690  }
691  }
692  return true;
693 }
694 
695 bool LinearProgrammingConstraint::AddCutFromConstraints(
696  const std::string& name,
697  const std::vector<std::pair<RowIndex, IntegerValue>>& integer_multipliers) {
698  // This is initialized to a valid linear contraint (by taking linear
699  // combination of the LP rows) and will be transformed into a cut if
700  // possible.
701  //
702  // TODO(user): For CG cuts, Ideally this linear combination should have only
703  // one fractional variable (basis_col). But because of imprecision, we get a
704  // bunch of fractional entry with small coefficient (relative to the one of
705  // basis_col). We try to handle that in IntegerRoundingCut(), but it might be
706  // better to add small multiple of the involved rows to get rid of them.
707  IntegerValue cut_ub;
708  if (!ComputeNewLinearConstraint(integer_multipliers, &tmp_scattered_vector_,
709  &cut_ub)) {
710  VLOG(1) << "Issue, overflow!";
711  return false;
712  }
713 
714  // Important: because we use integer_multipliers below, we cannot just
715  // divide by GCD or call PreventOverflow() here.
716  //
717  // TODO(user): the conversion col_index -> IntegerVariable is slow and could
718  // in principle be removed. Easy for cuts, but not so much for
719  // implied_bounds_processor_. Note that in theory this could allow us to
720  // use Literal directly without the need to have an IntegerVariable for them.
721  tmp_scattered_vector_.ConvertToLinearConstraint(integer_variables_, cut_ub,
722  &cut_);
723 
724  // Note that the base constraint we use are currently always tight.
725  // It is not a requirement though.
726  if (DEBUG_MODE) {
727  const double norm = ToDouble(ComputeInfinityNorm(cut_));
728  const double activity = ComputeActivity(cut_, expanded_lp_solution_);
729  if (std::abs(activity - ToDouble(cut_.ub)) / norm > 1e-4) {
730  VLOG(1) << "Cut not tight " << activity << " <= " << ToDouble(cut_.ub);
731  return false;
732  }
733  }
734  CHECK(constraint_manager_.DebugCheckConstraint(cut_));
735 
736  // We will create "artificial" variables after this index that will be
737  // substitued back into LP variables afterwards. Also not that we only use
738  // positive variable indices for these new variables, so that algorithm that
739  // take their negation will not mess up the indexing.
740  const IntegerVariable first_new_var(expanded_lp_solution_.size());
741  CHECK_EQ(first_new_var.value() % 2, 0);
742 
743  LinearConstraint copy_in_debug;
744  if (DEBUG_MODE) {
745  copy_in_debug = cut_;
746  }
747 
748  // Unlike for the knapsack cuts, it might not be always beneficial to
749  // process the implied bounds even though it seems to be better in average.
750  //
751  // TODO(user): Perform more experiments, in particular with which bound we use
752  // and if we complement or not before the MIR rounding. Other solvers seems
753  // to try different complementation strategies in a "potprocessing" and we
754  // don't. Try this too.
755  std::vector<ImpliedBoundsProcessor::SlackInfo> ib_slack_infos;
756  implied_bounds_processor_.ProcessUpperBoundedConstraintWithSlackCreation(
757  /*substitute_only_inner_variables=*/false, first_new_var,
758  expanded_lp_solution_, &cut_, &ib_slack_infos);
759  DCHECK(implied_bounds_processor_.DebugSlack(first_new_var, copy_in_debug,
760  cut_, ib_slack_infos));
761 
762  // Fills data for IntegerRoundingCut().
763  //
764  // Note(user): we use the current bound here, so the reasonement will only
765  // produce locally valid cut if we call this at a non-root node. We could
766  // use the level zero bounds if we wanted to generate a globally valid cut
767  // at another level. For now this is only called at level zero anyway.
768  tmp_lp_values_.clear();
769  tmp_var_lbs_.clear();
770  tmp_var_ubs_.clear();
771  for (const IntegerVariable var : cut_.vars) {
772  if (var >= first_new_var) {
774  const auto& info =
775  ib_slack_infos[(var.value() - first_new_var.value()) / 2];
776  tmp_lp_values_.push_back(info.lp_value);
777  tmp_var_lbs_.push_back(info.lb);
778  tmp_var_ubs_.push_back(info.ub);
779  } else {
780  tmp_lp_values_.push_back(expanded_lp_solution_[var]);
781  tmp_var_lbs_.push_back(integer_trail_->LevelZeroLowerBound(var));
782  tmp_var_ubs_.push_back(integer_trail_->LevelZeroUpperBound(var));
783  }
784  }
785 
786  // Add slack.
787  // definition: integer_lp_[row] + slack_row == bound;
788  const IntegerVariable first_slack(first_new_var +
789  IntegerVariable(2 * ib_slack_infos.size()));
790  tmp_slack_rows_.clear();
791  tmp_slack_bounds_.clear();
792  for (const auto pair : integer_multipliers) {
793  const RowIndex row = pair.first;
794  const IntegerValue coeff = pair.second;
795  const auto status = simplex_.GetConstraintStatus(row);
796  if (status == glop::ConstraintStatus::FIXED_VALUE) continue;
797 
798  tmp_lp_values_.push_back(0.0);
799  cut_.vars.push_back(first_slack +
800  2 * IntegerVariable(tmp_slack_rows_.size()));
801  tmp_slack_rows_.push_back(row);
802  cut_.coeffs.push_back(coeff);
803 
804  const IntegerValue diff(
805  CapSub(integer_lp_[row].ub.value(), integer_lp_[row].lb.value()));
806  if (coeff > 0) {
807  tmp_slack_bounds_.push_back(integer_lp_[row].ub);
808  tmp_var_lbs_.push_back(IntegerValue(0));
809  tmp_var_ubs_.push_back(diff);
810  } else {
811  tmp_slack_bounds_.push_back(integer_lp_[row].lb);
812  tmp_var_lbs_.push_back(-diff);
813  tmp_var_ubs_.push_back(IntegerValue(0));
814  }
815  }
816 
817  bool at_least_one_added = false;
818 
819  // Try cover appraoch to find cut.
820  {
821  if (cover_cut_helper_.TrySimpleKnapsack(cut_, tmp_lp_values_, tmp_var_lbs_,
822  tmp_var_ubs_)) {
823  at_least_one_added |= PostprocessAndAddCut(
824  absl::StrCat(name, "_K"), cover_cut_helper_.Info(), first_new_var,
825  first_slack, ib_slack_infos, cover_cut_helper_.mutable_cut());
826  }
827  }
828 
829  // Try integer rounding heuristic to find cut.
830  {
831  RoundingOptions options;
832  options.max_scaling = sat_parameters_.max_integer_rounding_scaling();
833  integer_rounding_cut_helper_.ComputeCut(options, tmp_lp_values_,
834  tmp_var_lbs_, tmp_var_ubs_,
835  &implied_bounds_processor_, &cut_);
836  at_least_one_added |= PostprocessAndAddCut(
837  name,
838  absl::StrCat("num_lifted_booleans=",
839  integer_rounding_cut_helper_.NumLiftedBooleans()),
840  first_new_var, first_slack, ib_slack_infos, &cut_);
841  }
842  return at_least_one_added;
843 }
844 
845 bool LinearProgrammingConstraint::PostprocessAndAddCut(
846  const std::string& name, const std::string& info,
847  IntegerVariable first_new_var, IntegerVariable first_slack,
848  const std::vector<ImpliedBoundsProcessor::SlackInfo>& ib_slack_infos,
849  LinearConstraint* cut) {
850  // Compute the activity. Warning: the cut no longer have the same size so we
851  // cannot use tmp_lp_values_. Note that the substitution below shouldn't
852  // change the activity by definition.
853  double activity = 0.0;
854  for (int i = 0; i < cut->vars.size(); ++i) {
855  if (cut->vars[i] < first_new_var) {
856  activity +=
857  ToDouble(cut->coeffs[i]) * expanded_lp_solution_[cut->vars[i]];
858  }
859  }
860  const double kMinViolation = 1e-4;
861  const double violation = activity - ToDouble(cut->ub);
862  if (violation < kMinViolation) {
863  VLOG(3) << "Bad cut " << activity << " <= " << ToDouble(cut->ub);
864  return false;
865  }
866 
867  // Substitute any slack left.
868  {
869  int num_slack = 0;
870  tmp_scattered_vector_.ClearAndResize(integer_variables_.size());
871  IntegerValue cut_ub = cut->ub;
872  bool overflow = false;
873  for (int i = 0; i < cut->vars.size(); ++i) {
874  const IntegerVariable var = cut->vars[i];
875 
876  // Simple copy for non-slack variables.
877  if (var < first_new_var) {
878  const glop::ColIndex col =
879  gtl::FindOrDie(mirror_lp_variable_, PositiveVariable(var));
880  if (VariableIsPositive(var)) {
881  tmp_scattered_vector_.Add(col, cut->coeffs[i]);
882  } else {
883  tmp_scattered_vector_.Add(col, -cut->coeffs[i]);
884  }
885  continue;
886  }
887 
888  // Replace slack from bound substitution.
889  if (var < first_slack) {
890  const IntegerValue multiplier = cut->coeffs[i];
891  const int index = (var.value() - first_new_var.value()) / 2;
892  CHECK_LT(index, ib_slack_infos.size());
893 
894  std::vector<std::pair<ColIndex, IntegerValue>> terms;
895  for (const std::pair<IntegerVariable, IntegerValue>& term :
896  ib_slack_infos[index].terms) {
897  terms.push_back(
898  {gtl::FindOrDie(mirror_lp_variable_,
899  PositiveVariable(term.first)),
900  VariableIsPositive(term.first) ? term.second : -term.second});
901  }
902  if (!tmp_scattered_vector_.AddLinearExpressionMultiple(multiplier,
903  terms)) {
904  overflow = true;
905  break;
906  }
907  if (!AddProductTo(multiplier, -ib_slack_infos[index].offset, &cut_ub)) {
908  overflow = true;
909  break;
910  }
911  continue;
912  }
913 
914  // Replace slack from LP constraints.
915  ++num_slack;
916  const int slack_index = (var.value() - first_slack.value()) / 2;
917  const glop::RowIndex row = tmp_slack_rows_[slack_index];
918  const IntegerValue multiplier = -cut->coeffs[i];
919  if (!tmp_scattered_vector_.AddLinearExpressionMultiple(
920  multiplier, integer_lp_[row].terms)) {
921  overflow = true;
922  break;
923  }
924 
925  // Update rhs.
926  if (!AddProductTo(multiplier, tmp_slack_bounds_[slack_index], &cut_ub)) {
927  overflow = true;
928  break;
929  }
930  }
931 
932  if (overflow) {
933  VLOG(1) << "Overflow in slack removal.";
934  return false;
935  }
936 
937  VLOG(3) << " num_slack: " << num_slack;
938  tmp_scattered_vector_.ConvertToLinearConstraint(integer_variables_, cut_ub,
939  cut);
940  }
941 
942  // Display some stats used for investigation of cut generation.
943  const std::string extra_info =
944  absl::StrCat(info, " num_ib_substitutions=", ib_slack_infos.size());
945 
946  const double new_violation =
947  ComputeActivity(*cut, expanded_lp_solution_) - ToDouble(cut_.ub);
948  if (std::abs(violation - new_violation) >= 1e-4) {
949  VLOG(1) << "Violation discrepancy after slack removal. "
950  << " before = " << violation << " after = " << new_violation;
951  }
952 
953  DivideByGCD(cut);
954  return constraint_manager_.AddCut(*cut, name, expanded_lp_solution_,
955  extra_info);
956 }
957 
958 void LinearProgrammingConstraint::AddCGCuts() {
959  const RowIndex num_rows = lp_data_.num_constraints();
960  for (RowIndex row(0); row < num_rows; ++row) {
961  ColIndex basis_col = simplex_.GetBasis(row);
962  const Fractional lp_value = GetVariableValueAtCpScale(basis_col);
963 
964  // TODO(user): We could just look at the diff with std::floor() in the hope
965  // that when we are just under an integer, the exact computation below will
966  // also be just under it.
967  if (std::abs(lp_value - std::round(lp_value)) < 0.01) continue;
968 
969  // If this variable is a slack, we ignore it. This is because the
970  // corresponding row is not tight under the given lp values.
971  if (basis_col >= integer_variables_.size()) continue;
972 
973  if (time_limit_->LimitReached()) break;
974 
975  const glop::ScatteredRow& lambda = simplex_.GetUnitRowLeftInverse(row);
976  glop::DenseColumn lp_multipliers(num_rows, 0.0);
977  double magnitude = 0.0;
978  int num_non_zeros = 0;
979  for (RowIndex row(0); row < num_rows; ++row) {
980  lp_multipliers[row] = lambda.values[glop::RowToColIndex(row)];
981  if (std::abs(lp_multipliers[row]) < 1e-12) {
982  lp_multipliers[row] = 0.0;
983  continue;
984  }
985 
986  // There should be no BASIC status, but they could be imprecision
987  // in the GetUnitRowLeftInverse() code? not sure, so better be safe.
988  const auto status = simplex_.GetConstraintStatus(row);
989  if (status == glop::ConstraintStatus::BASIC) {
990  VLOG(1) << "BASIC row not expected! " << lp_multipliers[row];
991  lp_multipliers[row] = 0.0;
992  }
993 
994  magnitude = std::max(magnitude, std::abs(lp_multipliers[row]));
995  if (lp_multipliers[row] != 0.0) ++num_non_zeros;
996  }
997  if (num_non_zeros == 0) continue;
998 
999  Fractional scaling;
1000  for (int i = 0; i < 2; ++i) {
1001  if (i == 1) {
1002  // Try other sign.
1003  //
1004  // TODO(user): Maybe add an heuristic to know beforehand which sign to
1005  // use?
1006  for (RowIndex row(0); row < num_rows; ++row) {
1007  lp_multipliers[row] = -lp_multipliers[row];
1008  }
1009  }
1010 
1011  // TODO(user): We use a lower value here otherwise we might run into
1012  // overflow while computing the cut. This should be fixable.
1013  const std::vector<std::pair<RowIndex, IntegerValue>> integer_multipliers =
1014  ScaleLpMultiplier(/*take_objective_into_account=*/false,
1015  lp_multipliers, &scaling, /*max_pow=*/52);
1016  AddCutFromConstraints("CG", integer_multipliers);
1017  }
1018  }
1019 }
1020 
1021 namespace {
1022 
1023 // For each element of a, adds a random one in b and append the pair to output.
1024 void RandomPick(const std::vector<RowIndex>& a, const std::vector<RowIndex>& b,
1025  ModelRandomGenerator* random,
1026  std::vector<std::pair<RowIndex, RowIndex>>* output) {
1027  if (a.empty() || b.empty()) return;
1028  for (const RowIndex row : a) {
1029  const RowIndex other = b[absl::Uniform<int>(*random, 0, b.size())];
1030  if (other != row) {
1031  output->push_back({row, other});
1032  }
1033  }
1034 }
1035 
1036 template <class ListOfTerms>
1037 IntegerValue GetCoeff(ColIndex col, const ListOfTerms& terms) {
1038  for (const auto& term : terms) {
1039  if (term.first == col) return term.second;
1040  }
1041  return IntegerValue(0);
1042 }
1043 
1044 } // namespace
1045 
1046 void LinearProgrammingConstraint::AddMirCuts() {
1047  // Heuristic to generate MIR_n cuts by combining a small number of rows. This
1048  // works greedily and follow more or less the MIR cut description in the
1049  // literature. We have a current cut, and we add one more row to it while
1050  // eliminating a variable of the current cut whose LP value is far from its
1051  // bound.
1052  //
1053  // A notable difference is that we randomize the variable we eliminate and
1054  // the row we use to do so. We still have weights to indicate our preferred
1055  // choices. This allows to generate different cuts when called again and
1056  // again.
1057  //
1058  // TODO(user): We could combine n rows to make sure we eliminate n variables
1059  // far away from their bounds by solving exactly in integer small linear
1060  // system.
1062  integer_variables_.size(), IntegerValue(0));
1063  SparseBitset<ColIndex> non_zeros_(ColIndex(integer_variables_.size()));
1064 
1065  // We compute all the rows that are tight, these will be used as the base row
1066  // for the MIR_n procedure below.
1067  const RowIndex num_rows = lp_data_.num_constraints();
1068  std::vector<std::pair<RowIndex, IntegerValue>> base_rows;
1069  absl::StrongVector<RowIndex, double> row_weights(num_rows.value(), 0.0);
1070  for (RowIndex row(0); row < num_rows; ++row) {
1071  const auto status = simplex_.GetConstraintStatus(row);
1072  if (status == glop::ConstraintStatus::BASIC) continue;
1073  if (status == glop::ConstraintStatus::FREE) continue;
1074 
1077  base_rows.push_back({row, IntegerValue(1)});
1078  }
1081  base_rows.push_back({row, IntegerValue(-1)});
1082  }
1083 
1084  // For now, we use the dual values for the row "weights".
1085  //
1086  // Note that we use the dual at LP scale so that it make more sense when we
1087  // compare different rows since the LP has been scaled.
1088  //
1089  // TODO(user): In Kati Wolter PhD "Implementation of Cutting Plane
1090  // Separators for Mixed Integer Programs" which describe SCIP's MIR cuts
1091  // implementation (or at least an early version of it), a more complex score
1092  // is used.
1093  //
1094  // Note(user): Because we only consider tight rows under the current lp
1095  // solution (i.e. non-basic rows), most should have a non-zero dual values.
1096  // But there is some degenerate problem where these rows have a really low
1097  // weight (or even zero), and having only weight of exactly zero in
1098  // std::discrete_distribution will result in a crash.
1099  row_weights[row] = std::max(1e-8, std::abs(simplex_.GetDualValue(row)));
1100  }
1101 
1102  std::vector<double> weights;
1104  std::vector<std::pair<RowIndex, IntegerValue>> integer_multipliers;
1105  for (const std::pair<RowIndex, IntegerValue>& entry : base_rows) {
1106  if (time_limit_->LimitReached()) break;
1107 
1108  // First try to generate a cut directly from this base row (MIR1).
1109  //
1110  // Note(user): We abort on success like it seems to be done in the
1111  // literature. Note that we don't succeed that often in generating an
1112  // efficient cut, so I am not sure aborting will make a big difference
1113  // speedwise. We might generate similar cuts though, but hopefully the cut
1114  // management can deal with that.
1115  integer_multipliers = {entry};
1116  if (AddCutFromConstraints("MIR_1", integer_multipliers)) {
1117  continue;
1118  }
1119 
1120  // Cleanup.
1121  for (const ColIndex col : non_zeros_.PositionsSetAtLeastOnce()) {
1122  dense_cut[col] = IntegerValue(0);
1123  }
1124  non_zeros_.SparseClearAll();
1125 
1126  // Copy cut.
1127  const IntegerValue multiplier = entry.second;
1128  for (const std::pair<ColIndex, IntegerValue> term :
1129  integer_lp_[entry.first].terms) {
1130  const ColIndex col = term.first;
1131  const IntegerValue coeff = term.second;
1132  non_zeros_.Set(col);
1133  dense_cut[col] += coeff * multiplier;
1134  }
1135 
1136  used_rows.assign(num_rows.value(), false);
1137  used_rows[entry.first] = true;
1138 
1139  // We will aggregate at most kMaxAggregation more rows.
1140  //
1141  // TODO(user): optim + tune.
1142  const int kMaxAggregation = 5;
1143  for (int i = 0; i < kMaxAggregation; ++i) {
1144  // First pick a variable to eliminate. We currently pick a random one with
1145  // a weight that depend on how far it is from its closest bound.
1146  IntegerValue max_magnitude(0);
1147  weights.clear();
1148  std::vector<ColIndex> col_candidates;
1149  for (const ColIndex col : non_zeros_.PositionsSetAtLeastOnce()) {
1150  if (dense_cut[col] == 0) continue;
1151 
1152  max_magnitude = std::max(max_magnitude, IntTypeAbs(dense_cut[col]));
1153  const int col_degree =
1154  lp_data_.GetSparseColumn(col).num_entries().value();
1155  if (col_degree <= 1) continue;
1157  continue;
1158  }
1159 
1160  const IntegerVariable var = integer_variables_[col.value()];
1161  const double lp_value = expanded_lp_solution_[var];
1162  const double lb = ToDouble(integer_trail_->LevelZeroLowerBound(var));
1163  const double ub = ToDouble(integer_trail_->LevelZeroUpperBound(var));
1164  const double bound_distance = std::min(ub - lp_value, lp_value - lb);
1165  if (bound_distance > 1e-2) {
1166  weights.push_back(bound_distance);
1167  col_candidates.push_back(col);
1168  }
1169  }
1170  if (col_candidates.empty()) break;
1171 
1172  const ColIndex var_to_eliminate =
1173  col_candidates[std::discrete_distribution<>(weights.begin(),
1174  weights.end())(*random_)];
1175 
1176  // What rows can we add to eliminate var_to_eliminate?
1177  std::vector<RowIndex> possible_rows;
1178  weights.clear();
1179  for (const auto entry : lp_data_.GetSparseColumn(var_to_eliminate)) {
1180  const RowIndex row = entry.row();
1181  const auto status = simplex_.GetConstraintStatus(row);
1182  if (status == glop::ConstraintStatus::BASIC) continue;
1183  if (status == glop::ConstraintStatus::FREE) continue;
1184 
1185  // We disallow all the rows that contain a variable that we already
1186  // eliminated (or are about to). This mean that we choose rows that
1187  // form a "triangular" matrix on the position we choose to eliminate.
1188  if (used_rows[row]) continue;
1189  used_rows[row] = true;
1190 
1191  // TODO(user): Instead of using FIXED_VALUE consider also both direction
1192  // when we almost have an equality? that is if the LP constraints bounds
1193  // are close from each others (<1e-6 ?). Initial experiments shows it
1194  // doesn't change much, so I kept this version for now. Note that it
1195  // might just be better to use the side that constrain the current lp
1196  // optimal solution (that we get from the status).
1197  bool add_row = false;
1198  if (status == glop::ConstraintStatus::FIXED_VALUE ||
1200  if (entry.coefficient() > 0.0) {
1201  if (dense_cut[var_to_eliminate] < 0) add_row = true;
1202  } else {
1203  if (dense_cut[var_to_eliminate] > 0) add_row = true;
1204  }
1205  }
1206  if (status == glop::ConstraintStatus::FIXED_VALUE ||
1208  if (entry.coefficient() > 0.0) {
1209  if (dense_cut[var_to_eliminate] > 0) add_row = true;
1210  } else {
1211  if (dense_cut[var_to_eliminate] < 0) add_row = true;
1212  }
1213  }
1214  if (add_row) {
1215  possible_rows.push_back(row);
1216  weights.push_back(row_weights[row]);
1217  }
1218  }
1219  if (possible_rows.empty()) break;
1220 
1221  const RowIndex row_to_combine =
1222  possible_rows[std::discrete_distribution<>(weights.begin(),
1223  weights.end())(*random_)];
1224  const IntegerValue to_combine_coeff =
1225  GetCoeff(var_to_eliminate, integer_lp_[row_to_combine].terms);
1226  CHECK_NE(to_combine_coeff, 0);
1227 
1228  IntegerValue mult1 = -to_combine_coeff;
1229  IntegerValue mult2 = dense_cut[var_to_eliminate];
1230  CHECK_NE(mult2, 0);
1231  if (mult1 < 0) {
1232  mult1 = -mult1;
1233  mult2 = -mult2;
1234  }
1235 
1236  const IntegerValue gcd = IntegerValue(
1237  MathUtil::GCD64(std::abs(mult1.value()), std::abs(mult2.value())));
1238  CHECK_NE(gcd, 0);
1239  mult1 /= gcd;
1240  mult2 /= gcd;
1241 
1242  // Overflow detection.
1243  //
1244  // TODO(user): do that in the possible_rows selection? only problem is
1245  // that we do not have the integer coefficient there...
1246  for (std::pair<RowIndex, IntegerValue>& entry : integer_multipliers) {
1247  max_magnitude = std::max(max_magnitude, entry.second);
1248  }
1249  if (CapAdd(CapProd(max_magnitude.value(), std::abs(mult1.value())),
1250  CapProd(infinity_norms_[row_to_combine].value(),
1251  std::abs(mult2.value()))) == kint64max) {
1252  break;
1253  }
1254 
1255  for (std::pair<RowIndex, IntegerValue>& entry : integer_multipliers) {
1256  entry.second *= mult1;
1257  }
1258  integer_multipliers.push_back({row_to_combine, mult2});
1259 
1260  // TODO(user): Not supper efficient to recombine the rows.
1261  if (AddCutFromConstraints(absl::StrCat("MIR_", i + 2),
1262  integer_multipliers)) {
1263  break;
1264  }
1265 
1266  // Minor optim: the computation below is only needed if we do one more
1267  // iteration.
1268  if (i + 1 == kMaxAggregation) break;
1269 
1270  for (ColIndex col : non_zeros_.PositionsSetAtLeastOnce()) {
1271  dense_cut[col] *= mult1;
1272  }
1273  for (const std::pair<ColIndex, IntegerValue> term :
1274  integer_lp_[row_to_combine].terms) {
1275  const ColIndex col = term.first;
1276  const IntegerValue coeff = term.second;
1277  non_zeros_.Set(col);
1278  dense_cut[col] += coeff * mult2;
1279  }
1280  }
1281  }
1282 }
1283 
1284 void LinearProgrammingConstraint::AddZeroHalfCuts() {
1285  if (time_limit_->LimitReached()) return;
1286 
1287  tmp_lp_values_.clear();
1288  tmp_var_lbs_.clear();
1289  tmp_var_ubs_.clear();
1290  for (const IntegerVariable var : integer_variables_) {
1291  tmp_lp_values_.push_back(expanded_lp_solution_[var]);
1292  tmp_var_lbs_.push_back(integer_trail_->LevelZeroLowerBound(var));
1293  tmp_var_ubs_.push_back(integer_trail_->LevelZeroUpperBound(var));
1294  }
1295 
1296  // TODO(user): See if it make sense to try to use implied bounds there.
1297  zero_half_cut_helper_.ProcessVariables(tmp_lp_values_, tmp_var_lbs_,
1298  tmp_var_ubs_);
1299  for (glop::RowIndex row(0); row < integer_lp_.size(); ++row) {
1300  // Even though we could use non-tight row, for now we prefer to use tight
1301  // ones.
1302  const auto status = simplex_.GetConstraintStatus(row);
1303  if (status == glop::ConstraintStatus::BASIC) continue;
1304  if (status == glop::ConstraintStatus::FREE) continue;
1305 
1306  zero_half_cut_helper_.AddOneConstraint(
1307  row, integer_lp_[row].terms, integer_lp_[row].lb, integer_lp_[row].ub);
1308  }
1309  for (const std::vector<std::pair<RowIndex, IntegerValue>>& multipliers :
1310  zero_half_cut_helper_.InterestingCandidates(random_)) {
1311  if (time_limit_->LimitReached()) break;
1312 
1313  // TODO(user): Make sure that if the resulting linear coefficients are not
1314  // too high, we do try a "divisor" of two and thus try a true zero-half cut
1315  // instead of just using our best MIR heuristic (which might still be better
1316  // though).
1317  AddCutFromConstraints("ZERO_HALF", multipliers);
1318  }
1319 }
1320 
1321 void LinearProgrammingConstraint::UpdateSimplexIterationLimit(
1322  const int64 min_iter, const int64 max_iter) {
1323  if (sat_parameters_.linearization_level() < 2) return;
1324  const int64 num_degenerate_columns = CalculateDegeneracy();
1325  const int64 num_cols = simplex_.GetProblemNumCols().value();
1326  if (num_cols <= 0) {
1327  return;
1328  }
1329  CHECK_GT(num_cols, 0);
1330  const int64 decrease_factor = (10 * num_degenerate_columns) / num_cols;
1331  if (simplex_.GetProblemStatus() == glop::ProblemStatus::DUAL_FEASIBLE) {
1332  // We reached here probably because we predicted wrong. We use this as a
1333  // signal to increase the iterations or punish less for degeneracy compare
1334  // to the other part.
1335  if (is_degenerate_) {
1336  next_simplex_iter_ /= std::max(int64{1}, decrease_factor);
1337  } else {
1338  next_simplex_iter_ *= 2;
1339  }
1340  } else if (simplex_.GetProblemStatus() == glop::ProblemStatus::OPTIMAL) {
1341  if (is_degenerate_) {
1342  next_simplex_iter_ /= std::max(int64{1}, 2 * decrease_factor);
1343  } else {
1344  // This is the most common case. We use the size of the problem to
1345  // determine the limit and ignore the previous limit.
1346  next_simplex_iter_ = num_cols / 40;
1347  }
1348  }
1349  next_simplex_iter_ =
1350  std::max(min_iter, std::min(max_iter, next_simplex_iter_));
1351 }
1352 
1354  UpdateBoundsOfLpVariables();
1355 
1356  // TODO(user): It seems the time we loose by not stopping early might be worth
1357  // it because we end up with a better explanation at optimality.
1358  glop::GlopParameters parameters = simplex_.GetParameters();
1359  if (/* DISABLES CODE */ (false) && objective_is_defined_) {
1360  // We put a limit on the dual objective since there is no point increasing
1361  // it past our current objective upper-bound (we will already fail as soon
1362  // as we pass it). Note that this limit is properly transformed using the
1363  // objective scaling factor and offset stored in lp_data_.
1364  //
1365  // Note that we use a bigger epsilon here to be sure that if we abort
1366  // because of this, we will report a conflict.
1367  parameters.set_objective_upper_limit(
1368  static_cast<double>(integer_trail_->UpperBound(objective_cp_).value() +
1369  100.0 * kCpEpsilon));
1370  }
1371 
1372  // Put an iteration limit on the work we do in the simplex for this call. Note
1373  // that because we are "incremental", even if we don't solve it this time we
1374  // will make progress towards a solve in the lower node of the tree search.
1375  if (trail_->CurrentDecisionLevel() == 0) {
1376  // TODO(user): Dynamically change the iteration limit for root node as
1377  // well.
1378  parameters.set_max_number_of_iterations(2000);
1379  } else {
1380  parameters.set_max_number_of_iterations(next_simplex_iter_);
1381  }
1382  if (sat_parameters_.use_exact_lp_reason()) {
1383  parameters.set_change_status_to_imprecise(false);
1384  parameters.set_primal_feasibility_tolerance(1e-7);
1385  parameters.set_dual_feasibility_tolerance(1e-7);
1386  }
1387 
1388  simplex_.SetParameters(parameters);
1390  if (!SolveLp()) return true;
1391 
1392  // Add new constraints to the LP and resolve?
1393  const int max_cuts_rounds =
1394  trail_->CurrentDecisionLevel() == 0
1395  ? sat_parameters_.max_cut_rounds_at_level_zero()
1396  : 1;
1397  int cuts_round = 0;
1398  while (simplex_.GetProblemStatus() == glop::ProblemStatus::OPTIMAL &&
1399  cuts_round < max_cuts_rounds) {
1400  // We wait for the first batch of problem constraints to be added before we
1401  // begin to generate cuts.
1402  cuts_round++;
1403  if (!integer_lp_.empty()) {
1404  implied_bounds_processor_.ClearCache();
1405  implied_bounds_processor_.SeparateSomeImpliedBoundCuts(
1406  expanded_lp_solution_);
1407 
1408  // The "generic" cuts are currently part of this class as they are using
1409  // data from the current LP.
1410  //
1411  // TODO(user): Refactor so that they are just normal cut generators?
1412  if (trail_->CurrentDecisionLevel() == 0) {
1413  if (sat_parameters_.add_mir_cuts()) AddMirCuts();
1414  if (sat_parameters_.add_cg_cuts()) AddCGCuts();
1415  if (sat_parameters_.add_zero_half_cuts()) AddZeroHalfCuts();
1416  }
1417 
1418  // Try to add cuts.
1419  if (!cut_generators_.empty() &&
1420  (trail_->CurrentDecisionLevel() == 0 ||
1421  !sat_parameters_.only_add_cuts_at_level_zero())) {
1422  for (const CutGenerator& generator : cut_generators_) {
1423  generator.generate_cuts(expanded_lp_solution_, &constraint_manager_);
1424  }
1425  }
1426 
1427  implied_bounds_processor_.IbCutPool().TransferToManager(
1428  expanded_lp_solution_, &constraint_manager_);
1429  }
1430 
1431  glop::BasisState state = simplex_.GetState();
1432  if (constraint_manager_.ChangeLp(expanded_lp_solution_, &state)) {
1433  simplex_.LoadStateForNextSolve(state);
1434  if (!CreateLpFromConstraintManager()) {
1435  return integer_trail_->ReportConflict({});
1436  }
1437  const double old_obj = simplex_.GetObjectiveValue();
1438  if (!SolveLp()) return true;
1439  if (simplex_.GetProblemStatus() == glop::ProblemStatus::OPTIMAL) {
1440  VLOG(1) << "Relaxation improvement " << old_obj << " -> "
1441  << simplex_.GetObjectiveValue()
1442  << " diff: " << simplex_.GetObjectiveValue() - old_obj
1443  << " level: " << trail_->CurrentDecisionLevel();
1444  }
1445  } else {
1446  if (trail_->CurrentDecisionLevel() == 0) {
1447  lp_at_level_zero_is_final_ = true;
1448  }
1449  break;
1450  }
1451  }
1452 
1453  // A dual-unbounded problem is infeasible. We use the dual ray reason.
1454  if (simplex_.GetProblemStatus() == glop::ProblemStatus::DUAL_UNBOUNDED) {
1455  if (sat_parameters_.use_exact_lp_reason()) {
1456  if (!FillExactDualRayReason()) return true;
1457  } else {
1458  FillReducedCostReasonIn(simplex_.GetDualRayRowCombination(),
1459  &integer_reason_);
1460  }
1461  return integer_trail_->ReportConflict(integer_reason_);
1462  }
1463 
1464  // TODO(user): Update limits for DUAL_UNBOUNDED status as well.
1465  UpdateSimplexIterationLimit(/*min_iter=*/10, /*max_iter=*/1000);
1466 
1467  // Optimality deductions if problem has an objective.
1468  if (objective_is_defined_ &&
1470  simplex_.GetProblemStatus() == glop::ProblemStatus::DUAL_FEASIBLE)) {
1471  // Try to filter optimal objective value. Note that GetObjectiveValue()
1472  // already take care of the scaling so that it returns an objective in the
1473  // CP world.
1474  const double relaxed_optimal_objective = simplex_.GetObjectiveValue();
1475  const IntegerValue approximate_new_lb(
1476  static_cast<int64>(std::ceil(relaxed_optimal_objective - kCpEpsilon)));
1477 
1478  // TODO(user): Maybe do a bit less computation when we cannot propagate
1479  // anything.
1480  if (sat_parameters_.use_exact_lp_reason()) {
1481  if (!ExactLpReasonning()) return false;
1482 
1483  // Display when the inexact bound would have propagated more.
1484  const IntegerValue propagated_lb =
1485  integer_trail_->LowerBound(objective_cp_);
1486  if (approximate_new_lb > propagated_lb) {
1487  VLOG(2) << "LP objective [ " << ToDouble(propagated_lb) << ", "
1488  << ToDouble(integer_trail_->UpperBound(objective_cp_))
1489  << " ] approx_lb += "
1490  << ToDouble(approximate_new_lb - propagated_lb) << " gap: "
1491  << integer_trail_->UpperBound(objective_cp_) - propagated_lb;
1492  }
1493  } else {
1494  FillReducedCostReasonIn(simplex_.GetReducedCosts(), &integer_reason_);
1495  const double objective_cp_ub =
1496  ToDouble(integer_trail_->UpperBound(objective_cp_));
1497  ReducedCostStrengtheningDeductions(objective_cp_ub -
1498  relaxed_optimal_objective);
1499  if (!deductions_.empty()) {
1500  deductions_reason_ = integer_reason_;
1501  deductions_reason_.push_back(
1502  integer_trail_->UpperBoundAsLiteral(objective_cp_));
1503  }
1504 
1505  // Push new objective lb.
1506  if (approximate_new_lb > integer_trail_->LowerBound(objective_cp_)) {
1507  const IntegerLiteral deduction =
1508  IntegerLiteral::GreaterOrEqual(objective_cp_, approximate_new_lb);
1509  if (!integer_trail_->Enqueue(deduction, {}, integer_reason_)) {
1510  return false;
1511  }
1512  }
1513 
1514  // Push reduced cost strengthening bounds.
1515  if (!deductions_.empty()) {
1516  const int trail_index_with_same_reason = integer_trail_->Index();
1517  for (const IntegerLiteral deduction : deductions_) {
1518  if (!integer_trail_->Enqueue(deduction, {}, deductions_reason_,
1519  trail_index_with_same_reason)) {
1520  return false;
1521  }
1522  }
1523  }
1524  }
1525  }
1526 
1527  // Copy more info about the current solution.
1528  if (simplex_.GetProblemStatus() == glop::ProblemStatus::OPTIMAL) {
1529  CHECK(lp_solution_is_set_);
1530 
1531  lp_objective_ = simplex_.GetObjectiveValue();
1532  lp_solution_is_integer_ = true;
1533  const int num_vars = integer_variables_.size();
1534  for (int i = 0; i < num_vars; i++) {
1535  lp_reduced_cost_[i] = scaler_.UnscaleReducedCost(
1536  glop::ColIndex(i), simplex_.GetReducedCost(glop::ColIndex(i)));
1537  if (std::abs(lp_solution_[i] - std::round(lp_solution_[i])) >
1538  kCpEpsilon) {
1539  lp_solution_is_integer_ = false;
1540  }
1541  }
1542 
1543  if (compute_reduced_cost_averages_) {
1544  UpdateAverageReducedCosts();
1545  }
1546  }
1547 
1548  if (sat_parameters_.use_branching_in_lp() && objective_is_defined_ &&
1549  trail_->CurrentDecisionLevel() == 0 && !is_degenerate_ &&
1550  lp_solution_is_set_ && !lp_solution_is_integer_ &&
1551  sat_parameters_.linearization_level() >= 2 &&
1552  compute_reduced_cost_averages_ &&
1554  count_since_last_branching_++;
1555  if (count_since_last_branching_ < branching_frequency_) {
1556  return true;
1557  }
1558  count_since_last_branching_ = 0;
1559  bool branching_successful = false;
1560 
1561  // Strong branching on top max_num_branches variable.
1562  const int max_num_branches = 3;
1563  const int num_vars = integer_variables_.size();
1564  std::vector<std::pair<double, IntegerVariable>> branching_vars;
1565  for (int i = 0; i < num_vars; ++i) {
1566  const IntegerVariable var = integer_variables_[i];
1567  const IntegerVariable positive_var = PositiveVariable(var);
1568 
1569  // Skip non fractional variables.
1570  const double current_value = GetSolutionValue(positive_var);
1571  if (std::abs(current_value - std::round(current_value)) <= kCpEpsilon) {
1572  continue;
1573  }
1574 
1575  // Skip ignored variables.
1576  if (integer_trail_->IsCurrentlyIgnored(var)) continue;
1577 
1578  // We can use any metric to select a variable to branch on. Reduced cost
1579  // average is one of the most promissing metric. It captures the history
1580  // of the objective bound improvement in LP due to changes in the given
1581  // variable bounds.
1582  //
1583  // NOTE: We also experimented using PseudoCosts and most recent reduced
1584  // cost as metrics but it doesn't give better results on benchmarks.
1585  const double cost_i = rc_scores_[i];
1586  std::pair<double, IntegerVariable> branching_var =
1587  std::make_pair(-cost_i, positive_var);
1588  auto iterator = std::lower_bound(branching_vars.begin(),
1589  branching_vars.end(), branching_var);
1590 
1591  branching_vars.insert(iterator, branching_var);
1592  if (branching_vars.size() > max_num_branches) {
1593  branching_vars.resize(max_num_branches);
1594  }
1595  }
1596 
1597  for (const std::pair<double, IntegerVariable>& branching_var :
1598  branching_vars) {
1599  const IntegerVariable positive_var = branching_var.second;
1600  VLOG(2) << "Branching on: " << positive_var;
1601  if (BranchOnVar(positive_var)) {
1602  VLOG(2) << "Branching successful.";
1603  branching_successful = true;
1604  } else {
1605  break;
1606  }
1607  }
1608  if (!branching_successful) {
1609  branching_frequency_ *= 2;
1610  }
1611  }
1612  return true;
1613 }
1614 
1615 // Returns kMinIntegerValue in case of overflow.
1616 //
1617 // TODO(user): Because of PreventOverflow(), this should actually never happen.
1618 IntegerValue LinearProgrammingConstraint::GetImpliedLowerBound(
1619  const LinearConstraint& terms) const {
1620  IntegerValue lower_bound(0);
1621  const int size = terms.vars.size();
1622  for (int i = 0; i < size; ++i) {
1623  const IntegerVariable var = terms.vars[i];
1624  const IntegerValue coeff = terms.coeffs[i];
1625  CHECK_NE(coeff, 0);
1626  const IntegerValue bound = coeff > 0 ? integer_trail_->LowerBound(var)
1627  : integer_trail_->UpperBound(var);
1628  if (!AddProductTo(bound, coeff, &lower_bound)) return kMinIntegerValue;
1629  }
1630  return lower_bound;
1631 }
1632 
1633 bool LinearProgrammingConstraint::PossibleOverflow(
1634  const LinearConstraint& constraint) {
1635  IntegerValue lower_bound(0);
1636  const int size = constraint.vars.size();
1637  for (int i = 0; i < size; ++i) {
1638  const IntegerVariable var = constraint.vars[i];
1639  const IntegerValue coeff = constraint.coeffs[i];
1640  CHECK_NE(coeff, 0);
1641  const IntegerValue bound = coeff > 0
1642  ? integer_trail_->LevelZeroLowerBound(var)
1643  : integer_trail_->LevelZeroUpperBound(var);
1644  if (!AddProductTo(bound, coeff, &lower_bound)) {
1645  return true;
1646  }
1647  }
1648  const int64 slack = CapAdd(lower_bound.value(), -constraint.ub.value());
1649  if (slack == kint64min || slack == kint64max) {
1650  return true;
1651  }
1652  return false;
1653 }
1654 
1655 namespace {
1656 
1657 absl::int128 FloorRatio128(absl::int128 x, IntegerValue positive_div) {
1658  absl::int128 div128(positive_div.value());
1659  absl::int128 result = x / div128;
1660  if (result * div128 > x) return result - 1;
1661  return result;
1662 }
1663 
1664 } // namespace
1665 
1666 void LinearProgrammingConstraint::PreventOverflow(LinearConstraint* constraint,
1667  int max_pow) {
1668  // Compute the min/max possible partial sum. Note that we need to use the
1669  // level zero bounds here since we might use this cut after backtrack.
1670  double sum_min = std::min(0.0, ToDouble(-constraint->ub));
1671  double sum_max = std::max(0.0, ToDouble(-constraint->ub));
1672  const int size = constraint->vars.size();
1673  for (int i = 0; i < size; ++i) {
1674  const IntegerVariable var = constraint->vars[i];
1675  const double coeff = ToDouble(constraint->coeffs[i]);
1676  const double prod1 =
1677  coeff * ToDouble(integer_trail_->LevelZeroLowerBound(var));
1678  const double prod2 =
1679  coeff * ToDouble(integer_trail_->LevelZeroUpperBound(var));
1680  sum_min += std::min(0.0, std::min(prod1, prod2));
1681  sum_max += std::max(0.0, std::max(prod1, prod2));
1682  }
1683  const double max_value = std::max({sum_max, -sum_min, sum_max - sum_min});
1684 
1685  const IntegerValue divisor(std::ceil(std::ldexp(max_value, -max_pow)));
1686  if (divisor <= 1) return;
1687 
1688  // To be correct, we need to shift all variable so that they are positive.
1689  //
1690  // Important: One might be tempted to think that using the current variable
1691  // bounds is okay here since we only use this to derive cut/constraint that
1692  // only needs to be locally valid. However, in some corner cases (like when
1693  // one term become zero), we might loose the fact that we used one of the
1694  // variable bound to derive the new constraint, so we will miss it in the
1695  // explanation !!
1696  //
1697  // TODO(user): This code is tricky and similar to the one to generate cuts.
1698  // Test and may reduce the duplication? note however that here we use int128
1699  // to deal with potential overflow.
1700  int new_size = 0;
1701  absl::int128 adjust = 0;
1702  for (int i = 0; i < size; ++i) {
1703  const IntegerValue old_coeff = constraint->coeffs[i];
1704  const IntegerValue new_coeff = FloorRatio(old_coeff, divisor);
1705 
1706  // Compute the rhs adjustement.
1707  const absl::int128 remainder =
1708  absl::int128(old_coeff.value()) -
1709  absl::int128(new_coeff.value()) * absl::int128(divisor.value());
1710  adjust +=
1711  remainder *
1712  absl::int128(
1713  integer_trail_->LevelZeroLowerBound(constraint->vars[i]).value());
1714 
1715  if (new_coeff == 0) continue;
1716  constraint->vars[new_size] = constraint->vars[i];
1717  constraint->coeffs[new_size] = new_coeff;
1718  ++new_size;
1719  }
1720  constraint->vars.resize(new_size);
1721  constraint->coeffs.resize(new_size);
1722 
1723  constraint->ub = IntegerValue(static_cast<int64>(
1724  FloorRatio128(absl::int128(constraint->ub.value()) - adjust, divisor)));
1725 }
1726 
1727 // TODO(user): combine this with RelaxLinearReason() to avoid the extra
1728 // magnitude vector and the weird precondition of RelaxLinearReason().
1729 void LinearProgrammingConstraint::SetImpliedLowerBoundReason(
1730  const LinearConstraint& terms, IntegerValue slack) {
1731  integer_reason_.clear();
1732  std::vector<IntegerValue> magnitudes;
1733  const int size = terms.vars.size();
1734  for (int i = 0; i < size; ++i) {
1735  const IntegerVariable var = terms.vars[i];
1736  const IntegerValue coeff = terms.coeffs[i];
1737  CHECK_NE(coeff, 0);
1738  if (coeff > 0) {
1739  magnitudes.push_back(coeff);
1740  integer_reason_.push_back(integer_trail_->LowerBoundAsLiteral(var));
1741  } else {
1742  magnitudes.push_back(-coeff);
1743  integer_reason_.push_back(integer_trail_->UpperBoundAsLiteral(var));
1744  }
1745  }
1746  CHECK_GE(slack, 0);
1747  if (slack > 0) {
1748  integer_trail_->RelaxLinearReason(slack, magnitudes, &integer_reason_);
1749  }
1750  integer_trail_->RemoveLevelZeroBounds(&integer_reason_);
1751 }
1752 
1753 // TODO(user): Provide a sparse interface.
1754 std::vector<std::pair<RowIndex, IntegerValue>>
1755 LinearProgrammingConstraint::ScaleLpMultiplier(
1756  bool take_objective_into_account,
1757  const glop::DenseColumn& dense_lp_multipliers, Fractional* scaling,
1758  int max_pow) const {
1759  double max_sum = 0.0;
1760  std::vector<std::pair<RowIndex, Fractional>> cp_multipliers;
1761  for (RowIndex row(0); row < dense_lp_multipliers.size(); ++row) {
1762  const Fractional lp_multi = dense_lp_multipliers[row];
1763 
1764  // We ignore small values since these are likely errors and will not
1765  // contribute much to the new lp constraint anyway.
1766  if (std::abs(lp_multi) < 1e-12) continue;
1767 
1768  // Remove trivial bad cases.
1769  //
1770  // TODO(user): It might be better (when possible) to use the OPTIMAL row
1771  // status since in most situation we do want the constraint we add to be
1772  // tight under the current LP solution. Only for infeasible problem we might
1773  // not have access to the status.
1774  if (lp_multi > 0.0 && integer_lp_[row].ub >= kMaxIntegerValue) {
1775  continue;
1776  }
1777  if (lp_multi < 0.0 && integer_lp_[row].lb <= kMinIntegerValue) {
1778  continue;
1779  }
1780 
1781  const Fractional cp_multi = scaler_.UnscaleDualValue(row, lp_multi);
1782  cp_multipliers.push_back({row, cp_multi});
1783  max_sum += ToDouble(infinity_norms_[row]) * std::abs(cp_multi);
1784  }
1785 
1786  // This behave exactly like if we had another "objective" constraint with
1787  // an lp_multi of 1.0 and a cp_multi of 1.0.
1788  if (take_objective_into_account) {
1789  max_sum += ToDouble(objective_infinity_norm_);
1790  }
1791 
1792  *scaling = 1.0;
1793  std::vector<std::pair<RowIndex, IntegerValue>> integer_multipliers;
1794  if (max_sum == 0.0) {
1795  // Empty linear combinaison.
1796  return integer_multipliers;
1797  }
1798 
1799  // We want max_sum * scaling to be <= 2 ^ max_pow and fit on an int64.
1800  // We use a power of 2 as this seems to work better.
1801  const double threshold = std::ldexp(1, max_pow) / max_sum;
1802  if (threshold < 1.0) {
1803  // TODO(user): we currently do not support scaling down, so we just abort
1804  // in this case.
1805  return integer_multipliers;
1806  }
1807  while (2 * *scaling <= threshold) *scaling *= 2;
1808 
1809  // Scale the multipliers by *scaling.
1810  //
1811  // TODO(user): Maybe use int128 to avoid overflow?
1812  for (const auto entry : cp_multipliers) {
1813  const IntegerValue coeff(std::round(entry.second * (*scaling)));
1814  if (coeff != 0) integer_multipliers.push_back({entry.first, coeff});
1815  }
1816  return integer_multipliers;
1817 }
1818 
1819 bool LinearProgrammingConstraint::ComputeNewLinearConstraint(
1820  const std::vector<std::pair<RowIndex, IntegerValue>>& integer_multipliers,
1821  ScatteredIntegerVector* scattered_vector, IntegerValue* upper_bound) const {
1822  // Initialize the new constraint.
1823  *upper_bound = 0;
1824  scattered_vector->ClearAndResize(integer_variables_.size());
1825 
1826  // Compute the new constraint by taking the linear combination given by
1827  // integer_multipliers of the integer constraints in integer_lp_.
1828  for (const std::pair<RowIndex, IntegerValue> term : integer_multipliers) {
1829  const RowIndex row = term.first;
1830  const IntegerValue multiplier = term.second;
1831  CHECK_LT(row, integer_lp_.size());
1832 
1833  // Update the constraint.
1834  if (!scattered_vector->AddLinearExpressionMultiple(
1835  multiplier, integer_lp_[row].terms)) {
1836  return false;
1837  }
1838 
1839  // Update the upper bound.
1840  const IntegerValue bound =
1841  multiplier > 0 ? integer_lp_[row].ub : integer_lp_[row].lb;
1842  if (!AddProductTo(multiplier, bound, upper_bound)) return false;
1843  }
1844 
1845  return true;
1846 }
1847 
1848 // TODO(user): no need to update the multipliers.
1849 void LinearProgrammingConstraint::AdjustNewLinearConstraint(
1850  std::vector<std::pair<glop::RowIndex, IntegerValue>>* integer_multipliers,
1851  ScatteredIntegerVector* scattered_vector, IntegerValue* upper_bound) const {
1852  const IntegerValue kMaxWantedCoeff(1e18);
1853  for (std::pair<RowIndex, IntegerValue>& term : *integer_multipliers) {
1854  const RowIndex row = term.first;
1855  const IntegerValue multiplier = term.second;
1856  if (multiplier == 0) continue;
1857 
1858  // We will only allow change of the form "multiplier += to_add" with to_add
1859  // in [-negative_limit, positive_limit].
1860  IntegerValue negative_limit = kMaxWantedCoeff;
1861  IntegerValue positive_limit = kMaxWantedCoeff;
1862 
1863  // Make sure we never change the sign of the multiplier, except if the
1864  // row is an equality in which case we don't care.
1865  if (integer_lp_[row].ub != integer_lp_[row].lb) {
1866  if (multiplier > 0) {
1867  negative_limit = std::min(negative_limit, multiplier);
1868  } else {
1869  positive_limit = std::min(positive_limit, -multiplier);
1870  }
1871  }
1872 
1873  // Make sure upper_bound + to_add * row_bound never overflow.
1874  const IntegerValue row_bound =
1875  multiplier > 0 ? integer_lp_[row].ub : integer_lp_[row].lb;
1876  if (row_bound != 0) {
1877  const IntegerValue limit1 = FloorRatio(
1878  std::max(IntegerValue(0), kMaxWantedCoeff - IntTypeAbs(*upper_bound)),
1879  IntTypeAbs(row_bound));
1880  const IntegerValue limit2 =
1881  FloorRatio(kMaxWantedCoeff, IntTypeAbs(row_bound));
1882  if (*upper_bound > 0 == row_bound > 0) { // Same sign.
1883  positive_limit = std::min(positive_limit, limit1);
1884  negative_limit = std::min(negative_limit, limit2);
1885  } else {
1886  negative_limit = std::min(negative_limit, limit1);
1887  positive_limit = std::min(positive_limit, limit2);
1888  }
1889  }
1890 
1891  // If we add the row to the scattered_vector, diff will indicate by how much
1892  // |upper_bound - ImpliedLB(scattered_vector)| will change. That correspond
1893  // to increasing the multiplier by 1.
1894  //
1895  // At this stage, we are not sure computing sum coeff * bound will not
1896  // overflow, so we use floating point numbers. It is fine to do so since
1897  // this is not directly involved in the actual exact constraint generation:
1898  // these variables are just used in an heuristic.
1899  double positive_diff = ToDouble(row_bound);
1900  double negative_diff = ToDouble(row_bound);
1901 
1902  // TODO(user): we could relax a bit some of the condition and allow a sign
1903  // change. It is just trickier to compute the diff when we allow such
1904  // changes.
1905  for (const auto entry : integer_lp_[row].terms) {
1906  const ColIndex col = entry.first;
1907  const IntegerValue coeff = entry.second;
1908  const IntegerValue abs_coef = IntTypeAbs(coeff);
1909  CHECK_NE(coeff, 0);
1910 
1911  const IntegerVariable var = integer_variables_[col.value()];
1912  const IntegerValue lb = integer_trail_->LowerBound(var);
1913  const IntegerValue ub = integer_trail_->UpperBound(var);
1914 
1915  // Moving a variable away from zero seems to improve the bound even
1916  // if it reduces the number of non-zero. Note that this is because of
1917  // this that positive_diff and negative_diff are not the same.
1918  const IntegerValue current = (*scattered_vector)[col];
1919  if (current == 0) {
1920  const IntegerValue overflow_limit(
1921  FloorRatio(kMaxWantedCoeff, abs_coef));
1922  positive_limit = std::min(positive_limit, overflow_limit);
1923  negative_limit = std::min(negative_limit, overflow_limit);
1924  if (coeff > 0) {
1925  positive_diff -= ToDouble(coeff) * ToDouble(lb);
1926  negative_diff -= ToDouble(coeff) * ToDouble(ub);
1927  } else {
1928  positive_diff -= ToDouble(coeff) * ToDouble(ub);
1929  negative_diff -= ToDouble(coeff) * ToDouble(lb);
1930  }
1931  continue;
1932  }
1933 
1934  // We don't want to change the sign of current (except if the variable is
1935  // fixed) or to have an overflow.
1936  //
1937  // Corner case:
1938  // - IntTypeAbs(current) can be larger than kMaxWantedCoeff!
1939  // - The code assumes that 2 * kMaxWantedCoeff do not overflow.
1940  const IntegerValue current_magnitude = IntTypeAbs(current);
1941  const IntegerValue other_direction_limit = FloorRatio(
1942  lb == ub
1943  ? kMaxWantedCoeff + std::min(current_magnitude,
1944  kMaxIntegerValue - kMaxWantedCoeff)
1945  : current_magnitude,
1946  abs_coef);
1947  const IntegerValue same_direction_limit(FloorRatio(
1948  std::max(IntegerValue(0), kMaxWantedCoeff - current_magnitude),
1949  abs_coef));
1950  if (current > 0 == coeff > 0) { // Same sign.
1951  negative_limit = std::min(negative_limit, other_direction_limit);
1952  positive_limit = std::min(positive_limit, same_direction_limit);
1953  } else {
1954  negative_limit = std::min(negative_limit, same_direction_limit);
1955  positive_limit = std::min(positive_limit, other_direction_limit);
1956  }
1957 
1958  // This is how diff change.
1959  const IntegerValue implied = current > 0 ? lb : ub;
1960  if (implied != 0) {
1961  positive_diff -= ToDouble(coeff) * ToDouble(implied);
1962  negative_diff -= ToDouble(coeff) * ToDouble(implied);
1963  }
1964  }
1965 
1966  // Only add a multiple of this row if it tighten the final constraint.
1967  // The positive_diff/negative_diff are supposed to be integer modulo the
1968  // double precision, so we only add a multiple if they seems far away from
1969  // zero.
1970  IntegerValue to_add(0);
1971  if (positive_diff <= -1.0 && positive_limit > 0) {
1972  to_add = positive_limit;
1973  }
1974  if (negative_diff >= 1.0 && negative_limit > 0) {
1975  // Pick this if it is better than the positive sign.
1976  if (to_add == 0 ||
1977  std::abs(ToDouble(negative_limit) * negative_diff) >
1978  std::abs(ToDouble(positive_limit) * positive_diff)) {
1979  to_add = -negative_limit;
1980  }
1981  }
1982  if (to_add != 0) {
1983  term.second += to_add;
1984  *upper_bound += to_add * row_bound;
1985 
1986  // TODO(user): we could avoid checking overflow here, but this is likely
1987  // not in the hot loop.
1988  CHECK(scattered_vector->AddLinearExpressionMultiple(
1989  to_add, integer_lp_[row].terms));
1990  }
1991  }
1992 }
1993 
1994 // The "exact" computation go as follow:
1995 //
1996 // Given any INTEGER linear combination of the LP constraints, we can create a
1997 // new integer constraint that is valid (its computation must not overflow
1998 // though). Lets call this "linear_combination <= ub". We can then always add to
1999 // it the inequality "objective_terms <= objective_var", so we get:
2000 // ImpliedLB(objective_terms + linear_combination) - ub <= objective_var.
2001 // where ImpliedLB() is computed from the variable current bounds.
2002 //
2003 // Now, if we use for the linear combination and approximation of the optimal
2004 // negated dual LP values (by scaling them and rounding them to integer), we
2005 // will get an EXACT objective lower bound that is more or less the same as the
2006 // inexact bound given by the LP relaxation. This allows to derive exact reasons
2007 // for any propagation done by this constraint.
2008 bool LinearProgrammingConstraint::ExactLpReasonning() {
2009  // Clear old reason and deductions.
2010  integer_reason_.clear();
2011  deductions_.clear();
2012  deductions_reason_.clear();
2013 
2014  // The row multipliers will be the negation of the LP duals.
2015  //
2016  // TODO(user): Provide and use a sparse API in Glop to get the duals.
2017  const RowIndex num_rows = simplex_.GetProblemNumRows();
2018  glop::DenseColumn lp_multipliers(num_rows);
2019  for (RowIndex row(0); row < num_rows; ++row) {
2020  lp_multipliers[row] = -simplex_.GetDualValue(row);
2021  }
2022 
2023  Fractional scaling;
2024  std::vector<std::pair<RowIndex, IntegerValue>> integer_multipliers =
2025  ScaleLpMultiplier(/*take_objective_into_account=*/true, lp_multipliers,
2026  &scaling);
2027 
2028  IntegerValue rc_ub;
2029  if (!ComputeNewLinearConstraint(integer_multipliers, &tmp_scattered_vector_,
2030  &rc_ub)) {
2031  VLOG(1) << "Issue while computing the exact LP reason. Aborting.";
2032  return true;
2033  }
2034 
2035  // The "objective constraint" behave like if the unscaled cp multiplier was
2036  // 1.0, so we will multiply it by this number and add it to reduced_costs.
2037  const IntegerValue obj_scale(std::round(scaling));
2038  if (obj_scale == 0) {
2039  VLOG(1) << "Overflow during exact LP reasoning. scaling=" << scaling;
2040  return true;
2041  }
2042  CHECK(tmp_scattered_vector_.AddLinearExpressionMultiple(obj_scale,
2043  integer_objective_));
2044  CHECK(AddProductTo(-obj_scale, integer_objective_offset_, &rc_ub));
2045  AdjustNewLinearConstraint(&integer_multipliers, &tmp_scattered_vector_,
2046  &rc_ub);
2047 
2048  // Create the IntegerSumLE that will allow to propagate the objective and more
2049  // generally do the reduced cost fixing.
2050  LinearConstraint new_constraint;
2051  tmp_scattered_vector_.ConvertToLinearConstraint(integer_variables_, rc_ub,
2052  &new_constraint);
2053  new_constraint.vars.push_back(objective_cp_);
2054  new_constraint.coeffs.push_back(-obj_scale);
2055  DivideByGCD(&new_constraint);
2056  PreventOverflow(&new_constraint);
2057  DCHECK(!PossibleOverflow(new_constraint));
2058  DCHECK(constraint_manager_.DebugCheckConstraint(new_constraint));
2059 
2060  IntegerSumLE* cp_constraint =
2061  new IntegerSumLE({}, new_constraint.vars, new_constraint.coeffs,
2062  new_constraint.ub, model_);
2063  if (trail_->CurrentDecisionLevel() == 0) {
2064  // Since we will never ask the reason for a constraint at level 0, we just
2065  // keep the last one.
2066  optimal_constraints_.clear();
2067  }
2068  optimal_constraints_.emplace_back(cp_constraint);
2069  rev_optimal_constraints_size_ = optimal_constraints_.size();
2070  if (!cp_constraint->PropagateAtLevelZero()) return false;
2071  return cp_constraint->Propagate();
2072 }
2073 
2074 bool LinearProgrammingConstraint::FillExactDualRayReason() {
2075  Fractional scaling;
2076  std::vector<std::pair<RowIndex, IntegerValue>> integer_multipliers =
2077  ScaleLpMultiplier(/*take_objective_into_account=*/false,
2078  simplex_.GetDualRay(), &scaling);
2079 
2080  IntegerValue new_constraint_ub;
2081  if (!ComputeNewLinearConstraint(integer_multipliers, &tmp_scattered_vector_,
2082  &new_constraint_ub)) {
2083  VLOG(1) << "Isse while computing the exact dual ray reason. Aborting.";
2084  return false;
2085  }
2086 
2087  AdjustNewLinearConstraint(&integer_multipliers, &tmp_scattered_vector_,
2088  &new_constraint_ub);
2089 
2090  LinearConstraint new_constraint;
2091  tmp_scattered_vector_.ConvertToLinearConstraint(
2092  integer_variables_, new_constraint_ub, &new_constraint);
2093  DivideByGCD(&new_constraint);
2094  PreventOverflow(&new_constraint);
2095  DCHECK(!PossibleOverflow(new_constraint));
2096  DCHECK(constraint_manager_.DebugCheckConstraint(new_constraint));
2097 
2098  const IntegerValue implied_lb = GetImpliedLowerBound(new_constraint);
2099  if (implied_lb <= new_constraint.ub) {
2100  VLOG(1) << "LP exact dual ray not infeasible,"
2101  << " implied_lb: " << implied_lb.value() / scaling
2102  << " ub: " << new_constraint.ub.value() / scaling;
2103  return false;
2104  }
2105  const IntegerValue slack = (implied_lb - new_constraint.ub) - 1;
2106  SetImpliedLowerBoundReason(new_constraint, slack);
2107  return true;
2108 }
2109 
2110 int64 LinearProgrammingConstraint::CalculateDegeneracy() {
2111  const glop::ColIndex num_vars = simplex_.GetProblemNumCols();
2112  int num_non_basic_with_zero_rc = 0;
2113  for (glop::ColIndex i(0); i < num_vars; ++i) {
2114  const double rc = simplex_.GetReducedCost(i);
2115  if (rc != 0.0) continue;
2116  if (simplex_.GetVariableStatus(i) == glop::VariableStatus::BASIC) {
2117  continue;
2118  }
2119  num_non_basic_with_zero_rc++;
2120  }
2121  const int64 num_cols = simplex_.GetProblemNumCols().value();
2122  is_degenerate_ = num_non_basic_with_zero_rc >= 0.3 * num_cols;
2123  return num_non_basic_with_zero_rc;
2124 }
2125 
2126 void LinearProgrammingConstraint::ReducedCostStrengtheningDeductions(
2127  double cp_objective_delta) {
2128  deductions_.clear();
2129 
2130  // TRICKY: while simplex_.GetObjectiveValue() use the objective scaling factor
2131  // stored in the lp_data_, all the other functions like GetReducedCost() or
2132  // GetVariableValue() do not.
2133  const double lp_objective_delta =
2134  cp_objective_delta / lp_data_.objective_scaling_factor();
2135  const int num_vars = integer_variables_.size();
2136  for (int i = 0; i < num_vars; i++) {
2137  const IntegerVariable cp_var = integer_variables_[i];
2138  const glop::ColIndex lp_var = glop::ColIndex(i);
2139  const double rc = simplex_.GetReducedCost(lp_var);
2140  const double value = simplex_.GetVariableValue(lp_var);
2141 
2142  if (rc == 0.0) continue;
2143  const double lp_other_bound = value + lp_objective_delta / rc;
2144  const double cp_other_bound =
2145  scaler_.UnscaleVariableValue(lp_var, lp_other_bound);
2146 
2147  if (rc > kLpEpsilon) {
2148  const double ub = ToDouble(integer_trail_->UpperBound(cp_var));
2149  const double new_ub = std::floor(cp_other_bound + kCpEpsilon);
2150  if (new_ub < ub) {
2151  // TODO(user): Because rc > kLpEpsilon, the lower_bound of cp_var
2152  // will be part of the reason returned by FillReducedCostsReason(), but
2153  // we actually do not need it here. Same below.
2154  const IntegerValue new_ub_int(static_cast<IntegerValue>(new_ub));
2155  deductions_.push_back(IntegerLiteral::LowerOrEqual(cp_var, new_ub_int));
2156  }
2157  } else if (rc < -kLpEpsilon) {
2158  const double lb = ToDouble(integer_trail_->LowerBound(cp_var));
2159  const double new_lb = std::ceil(cp_other_bound - kCpEpsilon);
2160  if (new_lb > lb) {
2161  const IntegerValue new_lb_int(static_cast<IntegerValue>(new_lb));
2162  deductions_.push_back(
2163  IntegerLiteral::GreaterOrEqual(cp_var, new_lb_int));
2164  }
2165  }
2166  }
2167 }
2168 
2169 namespace {
2170 
2171 // Add a cut of the form Sum_{outgoing arcs from S} lp >= rhs_lower_bound.
2172 //
2173 // Note that we used to also add the same cut for the incoming arcs, but because
2174 // of flow conservation on these problems, the outgoing flow is always the same
2175 // as the incoming flow, so adding this extra cut doesn't seem relevant.
2176 void AddOutgoingCut(
2177  int num_nodes, int subset_size, const std::vector<bool>& in_subset,
2178  const std::vector<int>& tails, const std::vector<int>& heads,
2179  const std::vector<Literal>& literals,
2180  const std::vector<double>& literal_lp_values, int64 rhs_lower_bound,
2182  LinearConstraintManager* manager, Model* model) {
2183  // A node is said to be optional if it can be excluded from the subcircuit,
2184  // in which case there is a self-loop on that node.
2185  // If there are optional nodes, use extended formula:
2186  // sum(cut) >= 1 - optional_loop_in - optional_loop_out
2187  // where optional_loop_in's node is in subset, optional_loop_out's is out.
2188  // TODO(user): Favor optional loops fixed to zero at root.
2189  int num_optional_nodes_in = 0;
2190  int num_optional_nodes_out = 0;
2191  int optional_loop_in = -1;
2192  int optional_loop_out = -1;
2193  for (int i = 0; i < tails.size(); ++i) {
2194  if (tails[i] != heads[i]) continue;
2195  if (in_subset[tails[i]]) {
2196  num_optional_nodes_in++;
2197  if (optional_loop_in == -1 ||
2198  literal_lp_values[i] < literal_lp_values[optional_loop_in]) {
2199  optional_loop_in = i;
2200  }
2201  } else {
2202  num_optional_nodes_out++;
2203  if (optional_loop_out == -1 ||
2204  literal_lp_values[i] < literal_lp_values[optional_loop_out]) {
2205  optional_loop_out = i;
2206  }
2207  }
2208  }
2209 
2210  // TODO(user): The lower bound for CVRP is computed assuming all nodes must be
2211  // served, if it is > 1 we lower it to one in the presence of optional nodes.
2212  if (num_optional_nodes_in + num_optional_nodes_out > 0) {
2213  CHECK_GE(rhs_lower_bound, 1);
2214  rhs_lower_bound = 1;
2215  }
2216 
2217  LinearConstraintBuilder outgoing(model, IntegerValue(rhs_lower_bound),
2219  double sum_outgoing = 0.0;
2220 
2221  // Add outgoing arcs, compute outgoing flow.
2222  for (int i = 0; i < tails.size(); ++i) {
2223  if (in_subset[tails[i]] && !in_subset[heads[i]]) {
2224  sum_outgoing += literal_lp_values[i];
2225  CHECK(outgoing.AddLiteralTerm(literals[i], IntegerValue(1)));
2226  }
2227  }
2228 
2229  // Support optional nodes if any.
2230  if (num_optional_nodes_in + num_optional_nodes_out > 0) {
2231  // When all optionals of one side are excluded in lp solution, no cut.
2232  if (num_optional_nodes_in == subset_size &&
2233  (optional_loop_in == -1 ||
2234  literal_lp_values[optional_loop_in] > 1.0 - 1e-6)) {
2235  return;
2236  }
2237  if (num_optional_nodes_out == num_nodes - subset_size &&
2238  (optional_loop_out == -1 ||
2239  literal_lp_values[optional_loop_out] > 1.0 - 1e-6)) {
2240  return;
2241  }
2242 
2243  // There is no mandatory node in subset, add optional_loop_in.
2244  if (num_optional_nodes_in == subset_size) {
2245  CHECK(
2246  outgoing.AddLiteralTerm(literals[optional_loop_in], IntegerValue(1)));
2247  sum_outgoing += literal_lp_values[optional_loop_in];
2248  }
2249 
2250  // There is no mandatory node out of subset, add optional_loop_out.
2251  if (num_optional_nodes_out == num_nodes - subset_size) {
2252  CHECK(outgoing.AddLiteralTerm(literals[optional_loop_out],
2253  IntegerValue(1)));
2254  sum_outgoing += literal_lp_values[optional_loop_out];
2255  }
2256  }
2257 
2258  if (sum_outgoing < rhs_lower_bound - 1e-6) {
2259  manager->AddCut(outgoing.Build(), "Circuit", lp_values);
2260  }
2261 }
2262 
2263 } // namespace
2264 
2265 // We roughly follow the algorithm described in section 6 of "The Traveling
2266 // Salesman Problem, A computational Study", David L. Applegate, Robert E.
2267 // Bixby, Vasek Chvatal, William J. Cook.
2268 //
2269 // Note that this is mainly a "symmetric" case algo, but it does still work for
2270 // the asymmetric case.
2272  int num_nodes, const std::vector<int>& tails, const std::vector<int>& heads,
2273  const std::vector<Literal>& literals,
2275  absl::Span<const int64> demands, int64 capacity,
2276  LinearConstraintManager* manager, Model* model) {
2277  if (num_nodes <= 2) return;
2278 
2279  // We will collect only the arcs with a positive lp_values to speed up some
2280  // computation below.
2281  struct Arc {
2282  int tail;
2283  int head;
2284  double lp_value;
2285  };
2286  std::vector<Arc> relevant_arcs;
2287 
2288  // Sort the arcs by non-increasing lp_values.
2289  std::vector<double> literal_lp_values(literals.size());
2290  std::vector<std::pair<double, int>> arc_by_decreasing_lp_values;
2291  auto* encoder = model->GetOrCreate<IntegerEncoder>();
2292  for (int i = 0; i < literals.size(); ++i) {
2293  double lp_value;
2294  const IntegerVariable direct_view = encoder->GetLiteralView(literals[i]);
2295  if (direct_view != kNoIntegerVariable) {
2296  lp_value = lp_values[direct_view];
2297  } else {
2298  lp_value =
2299  1.0 - lp_values[encoder->GetLiteralView(literals[i].Negated())];
2300  }
2301  literal_lp_values[i] = lp_value;
2302 
2303  if (lp_value < 1e-6) continue;
2304  relevant_arcs.push_back({tails[i], heads[i], lp_value});
2305  arc_by_decreasing_lp_values.push_back({lp_value, i});
2306  }
2307  std::sort(arc_by_decreasing_lp_values.begin(),
2308  arc_by_decreasing_lp_values.end(),
2309  std::greater<std::pair<double, int>>());
2310 
2311  // We will do a union-find by adding one by one the arc of the lp solution
2312  // in the order above. Every intermediate set during this construction will
2313  // be a candidate for a cut.
2314  //
2315  // In parallel to the union-find, to efficiently reconstruct these sets (at
2316  // most num_nodes), we construct a "decomposition forest" of the different
2317  // connected components. Note that we don't exploit any asymmetric nature of
2318  // the graph here. This is exactly the algo 6.3 in the book above.
2319  int num_components = num_nodes;
2320  std::vector<int> parent(num_nodes);
2321  std::vector<int> root(num_nodes);
2322  for (int i = 0; i < num_nodes; ++i) {
2323  parent[i] = i;
2324  root[i] = i;
2325  }
2326  auto get_root_and_compress_path = [&root](int node) {
2327  int r = node;
2328  while (root[r] != r) r = root[r];
2329  while (root[node] != r) {
2330  const int next = root[node];
2331  root[node] = r;
2332  node = next;
2333  }
2334  return r;
2335  };
2336  for (const auto pair : arc_by_decreasing_lp_values) {
2337  if (num_components == 2) break;
2338  const int tail = get_root_and_compress_path(tails[pair.second]);
2339  const int head = get_root_and_compress_path(heads[pair.second]);
2340  if (tail != head) {
2341  // Update the decomposition forest, note that the number of nodes is
2342  // growing.
2343  const int new_node = parent.size();
2344  parent.push_back(new_node);
2345  parent[head] = new_node;
2346  parent[tail] = new_node;
2347  --num_components;
2348 
2349  // It is important that the union-find representative is the same node.
2350  root.push_back(new_node);
2351  root[head] = new_node;
2352  root[tail] = new_node;
2353  }
2354  }
2355 
2356  // For each node in the decomposition forest, try to add a cut for the set
2357  // formed by the nodes and its children. To do that efficiently, we first
2358  // order the nodes so that for each node in a tree, the set of children forms
2359  // a consecutive span in the pre_order vector. This vector just lists the
2360  // nodes in the "pre-order" graph traversal order. The Spans will point inside
2361  // the pre_order vector, it is why we initialize it once and for all.
2362  int new_size = 0;
2363  std::vector<int> pre_order(num_nodes);
2364  std::vector<absl::Span<const int>> subsets;
2365  {
2366  std::vector<absl::InlinedVector<int, 2>> graph(parent.size());
2367  for (int i = 0; i < parent.size(); ++i) {
2368  if (parent[i] != i) graph[parent[i]].push_back(i);
2369  }
2370  std::vector<int> queue;
2371  std::vector<bool> seen(graph.size(), false);
2372  std::vector<int> start_index(parent.size());
2373  for (int i = num_nodes; i < parent.size(); ++i) {
2374  // Note that because of the way we constructed 'parent', the graph is a
2375  // binary tree. This is not required for the correctness of the algorithm
2376  // here though.
2377  CHECK(graph[i].empty() || graph[i].size() == 2);
2378  if (parent[i] != i) continue;
2379 
2380  // Explore the subtree rooted at node i.
2381  CHECK(!seen[i]);
2382  queue.push_back(i);
2383  while (!queue.empty()) {
2384  const int node = queue.back();
2385  if (seen[node]) {
2386  queue.pop_back();
2387  // All the children of node are in the span [start, end) of the
2388  // pre_order vector.
2389  const int start = start_index[node];
2390  if (new_size - start > 1) {
2391  subsets.emplace_back(&pre_order[start], new_size - start);
2392  }
2393  continue;
2394  }
2395  seen[node] = true;
2396  start_index[node] = new_size;
2397  if (node < num_nodes) pre_order[new_size++] = node;
2398  for (const int child : graph[node]) {
2399  if (!seen[child]) queue.push_back(child);
2400  }
2401  }
2402  }
2403  }
2404 
2405  // Compute the total demands in order to know the minimum incoming/outgoing
2406  // flow.
2407  int64 total_demands = 0;
2408  if (!demands.empty()) {
2409  for (const int64 demand : demands) total_demands += demand;
2410  }
2411 
2412  // Process each subsets and add any violated cut.
2413  CHECK_EQ(pre_order.size(), num_nodes);
2414  std::vector<bool> in_subset(num_nodes, false);
2415  for (const absl::Span<const int> subset : subsets) {
2416  CHECK_GT(subset.size(), 1);
2417  CHECK_LT(subset.size(), num_nodes);
2418 
2419  // These fields will be left untouched if demands.empty().
2420  bool contain_depot = false;
2421  int64 subset_demand = 0;
2422 
2423  // Initialize "in_subset" and the subset demands.
2424  for (const int n : subset) {
2425  in_subset[n] = true;
2426  if (!demands.empty()) {
2427  if (n == 0) contain_depot = true;
2428  subset_demand += demands[n];
2429  }
2430  }
2431 
2432  // Compute a lower bound on the outgoing flow.
2433  //
2434  // TODO(user): This lower bound assume all nodes in subset must be served,
2435  // which is not the case. For TSP we do the correct thing in
2436  // AddOutgoingCut() but not for CVRP... Fix!!
2437  //
2438  // TODO(user): It could be very interesting to see if this "min outgoing
2439  // flow" cannot be automatically infered from the constraint in the
2440  // precedence graph. This might work if we assume that any kind of path
2441  // cumul constraint is encoded with constraints:
2442  // [edge => value_head >= value_tail + edge_weight].
2443  // We could take the minimum incoming edge weight per node in the set, and
2444  // use the cumul variable domain to infer some capacity.
2445  int64 min_outgoing_flow = 1;
2446  if (!demands.empty()) {
2447  min_outgoing_flow =
2448  contain_depot
2449  ? (total_demands - subset_demand + capacity - 1) / capacity
2450  : (subset_demand + capacity - 1) / capacity;
2451  }
2452 
2453  // We still need to serve nodes with a demand of zero, and in the corner
2454  // case where all node in subset have a zero demand, the formula above
2455  // result in a min_outgoing_flow of zero.
2456  min_outgoing_flow = std::max(min_outgoing_flow, int64{1});
2457 
2458  // Compute the current outgoing flow out of the subset.
2459  //
2460  // This can take a significant portion of the running time, it is why it is
2461  // faster to do it only on arcs with non-zero lp values which should be in
2462  // linear number rather than the total number of arc which can be quadratic.
2463  //
2464  // TODO(user): For the symmetric case there is an even faster algo. See if
2465  // it can be generalized to the asymmetric one if become needed.
2466  // Reference is algo 6.4 of the "The Traveling Salesman Problem" book
2467  // mentionned above.
2468  double outgoing_flow = 0.0;
2469  for (const auto arc : relevant_arcs) {
2470  if (in_subset[arc.tail] && !in_subset[arc.head]) {
2471  outgoing_flow += arc.lp_value;
2472  }
2473  }
2474 
2475  // Add a cut if the current outgoing flow is not enough.
2476  if (outgoing_flow < min_outgoing_flow - 1e-6) {
2477  AddOutgoingCut(num_nodes, subset.size(), in_subset, tails, heads,
2478  literals, literal_lp_values,
2479  /*rhs_lower_bound=*/min_outgoing_flow, lp_values, manager,
2480  model);
2481  }
2482 
2483  // Sparse clean up.
2484  for (const int n : subset) in_subset[n] = false;
2485  }
2486 }
2487 
2488 namespace {
2489 
2490 // Returns for each literal its integer view, or the view of its negation.
2491 std::vector<IntegerVariable> GetAssociatedVariables(
2492  const std::vector<Literal>& literals, Model* model) {
2493  auto* encoder = model->GetOrCreate<IntegerEncoder>();
2494  std::vector<IntegerVariable> result;
2495  for (const Literal l : literals) {
2496  const IntegerVariable direct_view = encoder->GetLiteralView(l);
2497  if (direct_view != kNoIntegerVariable) {
2498  result.push_back(direct_view);
2499  } else {
2500  result.push_back(encoder->GetLiteralView(l.Negated()));
2501  DCHECK_NE(result.back(), kNoIntegerVariable);
2502  }
2503  }
2504  return result;
2505 }
2506 
2507 } // namespace
2508 
2509 // We use a basic algorithm to detect components that are not connected to the
2510 // rest of the graph in the LP solution, and add cuts to force some arcs to
2511 // enter and leave this component from outside.
2513  int num_nodes, const std::vector<int>& tails, const std::vector<int>& heads,
2514  const std::vector<Literal>& literals, Model* model) {
2515  CutGenerator result;
2516  result.vars = GetAssociatedVariables(literals, model);
2517  result.generate_cuts =
2518  [num_nodes, tails, heads, literals, model](
2520  LinearConstraintManager* manager) {
2522  num_nodes, tails, heads, literals, lp_values,
2523  /*demands=*/{}, /*capacity=*/0, manager, model);
2524  };
2525  return result;
2526 }
2527 
2529  const std::vector<int>& tails,
2530  const std::vector<int>& heads,
2531  const std::vector<Literal>& literals,
2532  const std::vector<int64>& demands,
2533  int64 capacity, Model* model) {
2534  CutGenerator result;
2535  result.vars = GetAssociatedVariables(literals, model);
2536  result.generate_cuts =
2537  [num_nodes, tails, heads, demands, capacity, literals, model](
2539  LinearConstraintManager* manager) {
2540  SeparateSubtourInequalities(num_nodes, tails, heads, literals,
2541  lp_values, demands, capacity, manager,
2542  model);
2543  };
2544  return result;
2545 }
2546 
2547 std::function<IntegerLiteral()>
2549  // Gather all 0-1 variables that appear in this LP.
2550  std::vector<IntegerVariable> variables;
2551  for (IntegerVariable var : integer_variables_) {
2552  if (integer_trail_->LowerBound(var) == 0 &&
2553  integer_trail_->UpperBound(var) == 1) {
2554  variables.push_back(var);
2555  }
2556  }
2557  VLOG(1) << "HeuristicLPMostInfeasibleBinary has " << variables.size()
2558  << " variables.";
2559 
2560  return [this, variables]() {
2561  const double kEpsilon = 1e-6;
2562  // Find most fractional value.
2563  IntegerVariable fractional_var = kNoIntegerVariable;
2564  double fractional_distance_best = -1.0;
2565  for (const IntegerVariable var : variables) {
2566  // Skip ignored and fixed variables.
2567  if (integer_trail_->IsCurrentlyIgnored(var)) continue;
2568  const IntegerValue lb = integer_trail_->LowerBound(var);
2569  const IntegerValue ub = integer_trail_->UpperBound(var);
2570  if (lb == ub) continue;
2571 
2572  // Check variable's support is fractional.
2573  const double lp_value = this->GetSolutionValue(var);
2574  const double fractional_distance =
2575  std::min(std::ceil(lp_value - kEpsilon) - lp_value,
2576  lp_value - std::floor(lp_value + kEpsilon));
2577  if (fractional_distance < kEpsilon) continue;
2578 
2579  // Keep variable if it is farther from integrality than the previous.
2580  if (fractional_distance > fractional_distance_best) {
2581  fractional_var = var;
2582  fractional_distance_best = fractional_distance;
2583  }
2584  }
2585 
2586  if (fractional_var != kNoIntegerVariable) {
2587  IntegerLiteral::GreaterOrEqual(fractional_var, IntegerValue(1));
2588  }
2589  return IntegerLiteral();
2590  };
2591 }
2592 
2593 std::function<IntegerLiteral()>
2595  // Gather all 0-1 variables that appear in this LP.
2596  std::vector<IntegerVariable> variables;
2597  for (IntegerVariable var : integer_variables_) {
2598  if (integer_trail_->LowerBound(var) == 0 &&
2599  integer_trail_->UpperBound(var) == 1) {
2600  variables.push_back(var);
2601  }
2602  }
2603  VLOG(1) << "HeuristicLpReducedCostBinary has " << variables.size()
2604  << " variables.";
2605 
2606  // Store average of reduced cost from 1 to 0. The best heuristic only sets
2607  // variables to one and cares about cost to zero, even though classic
2608  // pseudocost will use max_var min(cost_to_one[var], cost_to_zero[var]).
2609  const int num_vars = variables.size();
2610  std::vector<double> cost_to_zero(num_vars, 0.0);
2611  std::vector<int> num_cost_to_zero(num_vars);
2612  int num_calls = 0;
2613 
2614  return [=]() mutable {
2615  const double kEpsilon = 1e-6;
2616 
2617  // Every 10000 calls, decay pseudocosts.
2618  num_calls++;
2619  if (num_calls == 10000) {
2620  for (int i = 0; i < num_vars; i++) {
2621  cost_to_zero[i] /= 2;
2622  num_cost_to_zero[i] /= 2;
2623  }
2624  num_calls = 0;
2625  }
2626 
2627  // Accumulate pseudo-costs of all unassigned variables.
2628  for (int i = 0; i < num_vars; i++) {
2629  const IntegerVariable var = variables[i];
2630  // Skip ignored and fixed variables.
2631  if (integer_trail_->IsCurrentlyIgnored(var)) continue;
2632  const IntegerValue lb = integer_trail_->LowerBound(var);
2633  const IntegerValue ub = integer_trail_->UpperBound(var);
2634  if (lb == ub) continue;
2635 
2636  const double rc = this->GetSolutionReducedCost(var);
2637  // Skip reduced costs that are nonzero because of numerical issues.
2638  if (std::abs(rc) < kEpsilon) continue;
2639 
2640  const double value = std::round(this->GetSolutionValue(var));
2641  if (value == 1.0 && rc < 0.0) {
2642  cost_to_zero[i] -= rc;
2643  num_cost_to_zero[i]++;
2644  }
2645  }
2646 
2647  // Select noninstantiated variable with highest pseudo-cost.
2648  int selected_index = -1;
2649  double best_cost = 0.0;
2650  for (int i = 0; i < num_vars; i++) {
2651  const IntegerVariable var = variables[i];
2652  // Skip ignored and fixed variables.
2653  if (integer_trail_->IsCurrentlyIgnored(var)) continue;
2654  if (integer_trail_->IsFixed(var)) continue;
2655 
2656  if (num_cost_to_zero[i] > 0 &&
2657  best_cost < cost_to_zero[i] / num_cost_to_zero[i]) {
2658  best_cost = cost_to_zero[i] / num_cost_to_zero[i];
2659  selected_index = i;
2660  }
2661  }
2662 
2663  if (selected_index >= 0) {
2664  return IntegerLiteral::GreaterOrEqual(variables[selected_index],
2665  IntegerValue(1));
2666  }
2667  return IntegerLiteral();
2668  };
2669 }
2670 
2671 void LinearProgrammingConstraint::UpdateAverageReducedCosts() {
2672  const int num_vars = integer_variables_.size();
2673  if (sum_cost_down_.size() < num_vars) {
2674  sum_cost_down_.resize(num_vars, 0.0);
2675  num_cost_down_.resize(num_vars, 0);
2676  sum_cost_up_.resize(num_vars, 0.0);
2677  num_cost_up_.resize(num_vars, 0);
2678  rc_scores_.resize(num_vars, 0.0);
2679  }
2680 
2681  // Decay averages.
2682  num_calls_since_reduced_cost_averages_reset_++;
2683  if (num_calls_since_reduced_cost_averages_reset_ == 10000) {
2684  for (int i = 0; i < num_vars; i++) {
2685  sum_cost_up_[i] /= 2;
2686  num_cost_up_[i] /= 2;
2687  sum_cost_down_[i] /= 2;
2688  num_cost_down_[i] /= 2;
2689  }
2690  num_calls_since_reduced_cost_averages_reset_ = 0;
2691  }
2692 
2693  // Accumulate reduced costs of all unassigned variables.
2694  for (int i = 0; i < num_vars; i++) {
2695  const IntegerVariable var = integer_variables_[i];
2696 
2697  // Skip ignored and fixed variables.
2698  if (integer_trail_->IsCurrentlyIgnored(var)) continue;
2699  if (integer_trail_->IsFixed(var)) continue;
2700 
2701  // Skip reduced costs that are zero or close.
2702  const double rc = lp_reduced_cost_[i];
2703  if (std::abs(rc) < kCpEpsilon) continue;
2704 
2705  if (rc < 0.0) {
2706  sum_cost_down_[i] -= rc;
2707  num_cost_down_[i]++;
2708  } else {
2709  sum_cost_up_[i] += rc;
2710  num_cost_up_[i]++;
2711  }
2712  }
2713 
2714  // Tricky, we artificially reset the rc_rev_int_repository_ to level zero
2715  // so that the rev_rc_start_ is zero.
2716  rc_rev_int_repository_.SetLevel(0);
2717  rc_rev_int_repository_.SetLevel(trail_->CurrentDecisionLevel());
2718  rev_rc_start_ = 0;
2719 
2720  // Cache the new score (higher is better) using the average reduced costs
2721  // as a signal.
2722  positions_by_decreasing_rc_score_.clear();
2723  for (int i = 0; i < num_vars; i++) {
2724  // If only one direction exist, we takes its value divided by 2, so that
2725  // such variable should have a smaller cost than the min of the two side
2726  // except if one direction have a really high reduced costs.
2727  const double a_up =
2728  num_cost_up_[i] > 0 ? sum_cost_up_[i] / num_cost_up_[i] : 0.0;
2729  const double a_down =
2730  num_cost_down_[i] > 0 ? sum_cost_down_[i] / num_cost_down_[i] : 0.0;
2731  if (num_cost_down_[i] > 0 && num_cost_up_[i] > 0) {
2732  rc_scores_[i] = std::min(a_up, a_down);
2733  } else {
2734  rc_scores_[i] = 0.5 * (a_down + a_up);
2735  }
2736 
2737  // We ignore scores of zero (i.e. no data) and will follow the default
2738  // search heuristic if all variables are like this.
2739  if (rc_scores_[i] > 0.0) {
2740  positions_by_decreasing_rc_score_.push_back({-rc_scores_[i], i});
2741  }
2742  }
2743  std::sort(positions_by_decreasing_rc_score_.begin(),
2744  positions_by_decreasing_rc_score_.end());
2745 }
2746 
2747 // TODO(user): Remove duplication with HeuristicLpReducedCostBinary().
2748 std::function<IntegerLiteral()>
2750  return [this]() { return this->LPReducedCostAverageDecision(); };
2751 }
2752 
2753 IntegerLiteral LinearProgrammingConstraint::LPReducedCostAverageDecision() {
2754  // Select noninstantiated variable with highest positive average reduced cost.
2755  int selected_index = -1;
2756  const int size = positions_by_decreasing_rc_score_.size();
2757  rc_rev_int_repository_.SaveState(&rev_rc_start_);
2758  for (int i = rev_rc_start_; i < size; ++i) {
2759  const int index = positions_by_decreasing_rc_score_[i].second;
2760  const IntegerVariable var = integer_variables_[index];
2761  if (integer_trail_->IsCurrentlyIgnored(var)) continue;
2762  if (integer_trail_->IsFixed(var)) continue;
2763  selected_index = index;
2764  rev_rc_start_ = i;
2765  break;
2766  }
2767 
2768  if (selected_index == -1) return IntegerLiteral();
2769  const IntegerVariable var = integer_variables_[selected_index];
2770 
2771  // If ceil(value) is current upper bound, try var == upper bound first.
2772  // Guarding with >= prevents numerical problems.
2773  // With 0/1 variables, this will tend to try setting to 1 first,
2774  // which produces more shallow trees.
2775  const IntegerValue ub = integer_trail_->UpperBound(var);
2776  const IntegerValue value_ceil(
2777  std::ceil(this->GetSolutionValue(var) - kCpEpsilon));
2778  if (value_ceil >= ub) {
2779  return IntegerLiteral::GreaterOrEqual(var, ub);
2780  }
2781 
2782  // If floor(value) is current lower bound, try var == lower bound first.
2783  // Guarding with <= prevents numerical problems.
2784  const IntegerValue lb = integer_trail_->LowerBound(var);
2785  const IntegerValue value_floor(
2786  std::floor(this->GetSolutionValue(var) + kCpEpsilon));
2787  if (value_floor <= lb) {
2788  return IntegerLiteral::LowerOrEqual(var, lb);
2789  }
2790 
2791  // Here lb < value_floor <= value_ceil < ub.
2792  // Try the most promising split between var <= floor or var >= ceil.
2793  const double a_up =
2794  num_cost_up_[selected_index] > 0
2795  ? sum_cost_up_[selected_index] / num_cost_up_[selected_index]
2796  : 0.0;
2797  const double a_down =
2798  num_cost_down_[selected_index] > 0
2799  ? sum_cost_down_[selected_index] / num_cost_down_[selected_index]
2800  : 0.0;
2801  if (a_down < a_up) {
2802  return IntegerLiteral::LowerOrEqual(var, value_floor);
2803  } else {
2804  return IntegerLiteral::GreaterOrEqual(var, value_ceil);
2805  }
2806 }
2807 
2808 } // namespace sat
2809 } // namespace operations_research
operations_research::sat::ZeroHalfCutHelper::InterestingCandidates
std::vector< std::vector< std::pair< glop::RowIndex, IntegerValue > > > InterestingCandidates(ModelRandomGenerator *random)
Definition: zero_half_cuts.cc:227
var
IntVar * var
Definition: expr_array.cc:1858
operations_research::sat::LinearProgrammingConstraint::GetSolutionValue
double GetSolutionValue(IntegerVariable variable) const
Definition: linear_programming_constraint.cc:626
tail
int64 tail
Definition: routing_flow.cc:127
operations_research::glop::LinearProgram::num_constraints
RowIndex num_constraints() const
Definition: lp_data.h:208
operations_research::glop::ConstraintStatus::FREE
@ FREE
operations_research::glop::RevisedSimplex::GetProblemStatus
ProblemStatus GetProblemStatus() const
Definition: revised_simplex.cc:423
operations_research::sat::GenericLiteralWatcher::Register
int Register(PropagatorInterface *propagator)
Definition: integer.cc:1910
operations_research::glop::LinearProgram::AddSlackVariablesWhereNecessary
void AddSlackVariablesWhereNecessary(bool detect_integer_constraints)
Definition: lp_data.cc:666
min
int64 min
Definition: alldiff_cst.cc:138
integral_types.h
operations_research::glop::DenseRow
StrictITIVector< ColIndex, Fractional > DenseRow
Definition: lp_types.h:299
map_util.h
operations_research::sat::IntegerTrail
Definition: integer.h:533
operations_research::glop::VariableStatus::BASIC
@ BASIC
VLOG
#define VLOG(verboselevel)
Definition: base/logging.h:978
operations_research::sat::LinearConstraintManager::AddCut
bool AddCut(LinearConstraint ct, std::string type_name, const absl::StrongVector< IntegerVariable, double > &lp_solution, std::string extra_info="")
Definition: linear_constraint_manager.cc:206
absl::StrongVector::push_back
void push_back(const value_type &x)
Definition: strong_vector.h:158
operations_research::Arc
std::pair< int64, int64 > Arc
Definition: search.cc:3361
operations_research::CapSub
int64 CapSub(int64 x, int64 y)
Definition: saturated_arithmetic.h:154
operations_research::sat::kNoIntegerVariable
const IntegerVariable kNoIntegerVariable(-1)
operations_research::sat::FloorRatio
IntegerValue FloorRatio(IntegerValue dividend, IntegerValue positive_divisor)
Definition: integer.h:90
operations_research::sat::VariableIsPositive
bool VariableIsPositive(IntegerVariable i)
Definition: integer.h:130
max
int64 max
Definition: alldiff_cst.cc:139
operations_research::sat::LinearConstraint::vars
std::vector< IntegerVariable > vars
Definition: linear_constraint.h:42
operations_research::glop::ProblemStatus::ABNORMAL
@ ABNORMAL
bound
int64 bound
Definition: routing_search.cc:971
operations_research::sat::LinearProgrammingConstraint::ConstraintIndex
glop::RowIndex ConstraintIndex
Definition: linear_programming_constraint.h:130
operations_research::sat::CutGenerator
Definition: cuts.h:40
operations_research::glop::LpScalingHelper::Scale
void Scale(LinearProgram *lp)
Definition: lp_data_utils.cc:76
operations_research::CapProd
int64 CapProd(int64 x, int64 y)
Definition: saturated_arithmetic.h:231
absl::StrongVector::size
size_type size() const
Definition: strong_vector.h:147
operations_research::sat::LinearProgrammingConstraint::GetSolutionReducedCost
double GetSolutionReducedCost(IntegerVariable variable) const
Definition: linear_programming_constraint.cc:631
operations_research::sat::CreateStronglyConnectedGraphCutGenerator
CutGenerator CreateStronglyConnectedGraphCutGenerator(int num_nodes, const std::vector< int > &tails, const std::vector< int > &heads, const std::vector< Literal > &literals, Model *model)
Definition: linear_programming_constraint.cc:2512
operations_research::glop::ConstraintStatus::FIXED_VALUE
@ FIXED_VALUE
operations_research::sat::CoverCutHelper::mutable_cut
LinearConstraint * mutable_cut()
Definition: cuts.h:250
operations_research::sat::IntegerTrail::LevelZeroUpperBound
IntegerValue LevelZeroUpperBound(IntegerVariable var) const
Definition: integer.h:1338
operations_research::sat::IntegerTrail::LevelZeroLowerBound
IntegerValue LevelZeroLowerBound(IntegerVariable var) const
Definition: integer.h:1333
CHECK_GE
#define CHECK_GE(val1, val2)
Definition: base/logging.h:701
operations_research::glop::RevisedSimplex::LoadStateForNextSolve
void LoadStateForNextSolve(const BasisState &state)
Definition: revised_simplex.cc:124
operations_research::glop::kEpsilon
const double kEpsilon
Definition: lp_types.h:86
operations_research::glop::RevisedSimplex::GetParameters
const GlopParameters & GetParameters() const
Definition: revised_simplex.h:153
logging.h
operations_research::sat::SatSolver
Definition: sat_solver.h:58
operations_research::sat::IntegerEncoder::GetLiteralView
const IntegerVariable GetLiteralView(Literal lit) const
Definition: integer.h:420
operations_research::sat::ModelRandomGenerator
Definition: sat/util.h:33
operations_research::glop::LinearProgram::SetConstraintBounds
void SetConstraintBounds(RowIndex row, Fractional lower_bound, Fractional upper_bound)
Definition: lp_data.cc:307
operations_research::sat::GenericLiteralWatcher::RegisterReversibleInt
void RegisterReversibleInt(int id, int *rev)
Definition: integer.cc:1954
DCHECK_GT
#define DCHECK_GT(val1, val2)
Definition: base/logging.h:890
value
int64 value
Definition: demon_profiler.cc:43
CHECK_GT
#define CHECK_GT(val1, val2)
Definition: base/logging.h:702
operations_research::sat::CoverCutHelper::TrySimpleKnapsack
bool TrySimpleKnapsack(const LinearConstraint base_ct, const std::vector< double > &lp_values, const std::vector< IntegerValue > &lower_bounds, const std::vector< IntegerValue > &upper_bounds)
Definition: cuts.cc:1154
operations_research::sat::ScatteredIntegerVector::ConvertToLinearConstraint
void ConvertToLinearConstraint(const std::vector< IntegerVariable > &integer_variables, IntegerValue upper_bound, LinearConstraint *result)
Definition: linear_programming_constraint.cc:109
CHECK_LT
#define CHECK_LT(val1, val2)
Definition: base/logging.h:700
operations_research::sat::LinearProgrammingConstraint::~LinearProgrammingConstraint
~LinearProgrammingConstraint() override
Definition: linear_programming_constraint.cc:183
operations_research::sat::LinearConstraintManager::ChangeLp
bool ChangeLp(const absl::StrongVector< IntegerVariable, double > &lp_solution, glop::BasisState *solution_state)
Definition: linear_constraint_manager.cc:440
saturated_arithmetic.h
operations_research
The vehicle routing library lets one model and solve generic vehicle routing problems ranging from th...
Definition: dense_doubly_linked_list.h:21
operations_research::MathUtil::GCD64
static int64 GCD64(int64 x, int64 y)
Definition: mathutil.h:107
operations_research::sat::NegationOf
std::vector< IntegerVariable > NegationOf(const std::vector< IntegerVariable > &vars)
Definition: integer.cc:27
kint64min
static const int64 kint64min
Definition: integral_types.h:60
operations_research::sat::LinearProgrammingConstraint::RegisterWith
void RegisterWith(Model *model)
Definition: linear_programming_constraint.cc:523
operations_research::sat::IntegerLiteral::GreaterOrEqual
static IntegerLiteral GreaterOrEqual(IntegerVariable i, IntegerValue bound)
Definition: integer.h:1247
operations_research::sat::LinearProgrammingConstraint::IncrementalPropagate
bool IncrementalPropagate(const std::vector< int > &watch_indices) override
Definition: linear_programming_constraint.cc:589
operations_research::glop::LinearProgram::NotifyThatColumnsAreClean
void NotifyThatColumnsAreClean()
Definition: lp_data.h:539
operations_research::sat::PositiveVariable
IntegerVariable PositiveVariable(IntegerVariable i)
Definition: integer.h:134
operations_research::glop::LpScalingHelper::UnscaleDualValue
Fractional UnscaleDualValue(RowIndex row, Fractional value) const
Definition: lp_data_utils.cc:109
int64
int64_t int64
Definition: integral_types.h:34
operations_research::glop::RevisedSimplex::SetIntegralityScale
void SetIntegralityScale(ColIndex col, Fractional scale)
Definition: revised_simplex.cc:2345
operations_research::sat::ImpliedBoundsProcessor::SeparateSomeImpliedBoundCuts
void SeparateSomeImpliedBoundCuts(const absl::StrongVector< IntegerVariable, double > &lp_values)
Definition: cuts.cc:1574
operations_research::sat::IncrementalAverage::CurrentAverage
double CurrentAverage() const
Definition: sat/util.h:113
operations_research::TimeLimit
A simple class to enforce both an elapsed time limit and a deterministic time limit in the same threa...
Definition: time_limit.h:105
index
int index
Definition: pack.cc:508
absl::StrongVector::assign
void assign(size_type n, const value_type &val)
Definition: strong_vector.h:131
operations_research::glop::RevisedSimplex::GetObjectiveValue
Fractional GetObjectiveValue() const
Definition: revised_simplex.cc:427
operations_research::glop::Fractional
double Fractional
Definition: lp_types.h:77
operations_research::glop::ConstraintStatus::AT_UPPER_BOUND
@ AT_UPPER_BOUND
operations_research::sat::GenericLiteralWatcher
Definition: integer.h:1091
operations_research::glop::LinearProgram::CreateNewVariable
ColIndex CreateNewVariable()
Definition: lp_data.cc:160
operations_research::sat::ScatteredIntegerVector::GetTerms
std::vector< std::pair< glop::ColIndex, IntegerValue > > GetTerms()
Definition: linear_programming_constraint.cc:136
operations_research::glop::LinearProgram::CreateNewConstraint
RowIndex CreateNewConstraint()
Definition: lp_data.cc:189
operations_research::sat::ScatteredIntegerVector::ClearAndResize
void ClearAndResize(int size)
Definition: linear_programming_constraint.cc:53
gtl::FindOrDie
const Collection::value_type::second_type & FindOrDie(const Collection &collection, const typename Collection::value_type::first_type &key)
Definition: map_util.h:176
operations_research::glop::RevisedSimplex::Solve
ABSL_MUST_USE_RESULT Status Solve(const LinearProgram &lp, TimeLimit *time_limit)
Definition: revised_simplex.cc:134
strongly_connected_components.h
operations_research::glop::LinearProgram::Clear
void Clear()
Definition: lp_data.cc:132
operations_research::sat::LinearConstraintManager
Definition: linear_constraint_manager.h:40
DCHECK_NE
#define DCHECK_NE(val1, val2)
Definition: base/logging.h:886
demand
int64 demand
Definition: resource.cc:123
operations_research::glop::RowToColIndex
ColIndex RowToColIndex(RowIndex row)
Definition: lp_types.h:48
DEBUG_MODE
const bool DEBUG_MODE
Definition: macros.h:24
a
int64 a
Definition: constraint_solver/table.cc:42
operations_research::sat::IntegerTrail::UpperBoundAsLiteral
IntegerLiteral UpperBoundAsLiteral(IntegerVariable i) const
Definition: integer.h:1318
operations_research::RevRepository::SetLevel
void SetLevel(int level) final
Definition: rev.h:134
operations_research::sat::LinearConstraint
Definition: linear_constraint.h:39
operations_research::glop::RevisedSimplex::GetDualRayRowCombination
const DenseRow & GetDualRayRowCombination() const
Definition: revised_simplex.cc:487
operations_research::sat::IntTypeAbs
IntType IntTypeAbs(IntType t)
Definition: integer.h:77
operations_research::glop::BasisState
Definition: revised_simplex.h:132
operations_research::glop::LinearProgram::GetSparseColumn
const SparseColumn & GetSparseColumn(ColIndex col) const
Definition: lp_data.cc:407
operations_research::sat::GenericLiteralWatcher::SetPropagatorPriority
void SetPropagatorPriority(int id, int priority)
Definition: integer.cc:1933
operations_research::glop::LinearProgram::SetVariableBounds
void SetVariableBounds(ColIndex col, Fractional lower_bound, Fractional upper_bound)
Definition: lp_data.cc:247
operations_research::sat::IntegerTrail::UpperBound
IntegerValue UpperBound(IntegerVariable i) const
Definition: integer.h:1287
operations_research::sat::IntegerTrail::Enqueue
ABSL_MUST_USE_RESULT bool Enqueue(IntegerLiteral i_lit, absl::Span< const Literal > literal_reason, absl::Span< const IntegerLiteral > integer_reason)
Definition: integer.cc:989
operations_research::glop::RevisedSimplex::ClearIntegralityScales
void ClearIntegralityScales()
Definition: revised_simplex.h:250
operations_research::CapAdd
int64 CapAdd(int64 x, int64 y)
Definition: saturated_arithmetic.h:124
operations_research::sat::ComputeActivity
double ComputeActivity(const LinearConstraint &constraint, const absl::StrongVector< IntegerVariable, double > &values)
Definition: linear_constraint.cc:121
operations_research::glop::RevisedSimplex::GetConstraintStatus
ConstraintStatus GetConstraintStatus(RowIndex row) const
Definition: revised_simplex.cc:465
operations_research::glop::RevisedSimplex::GetState
const BasisState & GetState() const
Definition: revised_simplex.cc:457
operations_research::sat::LinearConstraintManager::LpConstraints
const std::vector< ConstraintIndex > & LpConstraints() const
Definition: linear_constraint_manager.h:117
operations_research::sat::LinearProgrammingConstraint::SetLevel
void SetLevel(int level) override
Definition: linear_programming_constraint.cc:559
operations_research::sat::kMaxIntegerValue
constexpr IntegerValue kMaxIntegerValue(std::numeric_limits< IntegerValue::ValueType >::max() - 1)
operations_research::sat::ImpliedBoundsProcessor::ProcessUpperBoundedConstraintWithSlackCreation
void ProcessUpperBoundedConstraintWithSlackCreation(bool substitute_only_inner_variables, IntegerVariable first_slack, const absl::StrongVector< IntegerVariable, double > &lp_values, LinearConstraint *cut, std::vector< SlackInfo > *slack_infos)
Definition: cuts.cc:1583
mathutil.h
operations_research::glop::LpScalingHelper::UnscaleReducedCost
Fractional UnscaleReducedCost(ColIndex col, Fractional value) const
Definition: lp_data_utils.cc:103
CHECK_EQ
#define CHECK_EQ(val1, val2)
Definition: base/logging.h:697
operations_research::sat::LinearConstraintManager::Add
ConstraintIndex Add(LinearConstraint ct, bool *added=nullptr)
Definition: linear_constraint_manager.cc:124
operations_research::sat::LinearConstraint::lb
IntegerValue lb
Definition: linear_constraint.h:40
operations_research::sat::ImpliedBoundsProcessor::IbCutPool
TopNCuts & IbCutPool()
Definition: cuts.h:124
operations_research::sat::IncrementalAverage::AddData
void AddData(double new_record)
Definition: sat/util.cc:68
operations_research::glop::ConstraintStatus::BASIC
@ BASIC
operations_research::sat::IntegerTrail::RegisterReversibleClass
void RegisterReversibleClass(ReversibleInterface *rev)
Definition: integer.h:816
operations_research::sat::Model
Class that owns everything related to a particular optimization model.
Definition: sat/model.h:38
operations_research::glop::RevisedSimplex::GetNumberOfIterations
int64 GetNumberOfIterations() const
Definition: revised_simplex.cc:431
ct
const Constraint * ct
Definition: demon_profiler.cc:42
operations_research::sat::LinearConstraintManager::SetObjectiveCoefficient
void SetObjectiveCoefficient(IntegerVariable var, IntegerValue coeff)
Definition: linear_constraint_manager.cc:301
operations_research::glop::LinearProgram::SetCoefficient
void SetCoefficient(RowIndex row, ColIndex col, Fractional value)
Definition: lp_data.cc:315
operations_research::sat::IntegerTrail::LowerBoundAsLiteral
IntegerLiteral LowerBoundAsLiteral(IntegerVariable i) const
Definition: integer.h:1313
operations_research::sat::AddProductTo
bool AddProductTo(IntegerValue a, IntegerValue b, IntegerValue *result)
Definition: integer.h:110
operations_research::sat::IntegerLiteral
Definition: integer.h:153
operations_research::sat::LinearProgrammingConstraintCollection
Definition: linear_programming_constraint.h:533
operations_research::glop::RevisedSimplex::GetUnitRowLeftInverse
const ScatteredRow & GetUnitRowLeftInverse(RowIndex row)
Definition: revised_simplex.h:223
operations_research::sat::ScatteredIntegerVector::AddLinearExpressionMultiple
bool AddLinearExpressionMultiple(IntegerValue multiplier, const std::vector< std::pair< glop::ColIndex, IntegerValue >> &terms)
Definition: linear_programming_constraint.cc:81
operations_research::sat::LinearProgrammingConstraintLpSolution
Definition: linear_programming_constraint.h:51
operations_research::sat::GenericLiteralWatcher::WatchIntegerVariable
void WatchIntegerVariable(IntegerVariable i, int id, int watch_index=-1)
Definition: integer.h:1380
operations_research::sat::ImpliedBounds
Definition: implied_bounds.h:77
operations_research::glop::LinearProgram::objective_scaling_factor
Fractional objective_scaling_factor() const
Definition: lp_data.h:261
operations_research::glop::RevisedSimplex::GetDualValue
Fractional GetDualValue(RowIndex row) const
Definition: revised_simplex.cc:449
DCHECK
#define DCHECK(condition)
Definition: base/logging.h:884
operations_research::glop::RevisedSimplex::GetVariableStatus
VariableStatus GetVariableStatus(ColIndex col) const
Definition: revised_simplex.cc:453
operations_research::sat::ZeroHalfCutHelper::ProcessVariables
void ProcessVariables(const std::vector< double > &lp_values, const std::vector< IntegerValue > &lower_bounds, const std::vector< IntegerValue > &upper_bounds)
Definition: zero_half_cuts.cc:28
operations_research::sat::IntegerLiteral::LowerOrEqual
static IntegerLiteral LowerOrEqual(IntegerVariable i, IntegerValue bound)
Definition: integer.h:1253
implied_bounds.h
operations_research::glop::RevisedSimplex::SetParameters
void SetParameters(const GlopParameters &parameters)
Definition: revised_simplex.cc:3057
zero_half_cuts.h
operations_research::glop::RevisedSimplex::NotifyThatMatrixIsUnchangedForNextSolve
void NotifyThatMatrixIsUnchangedForNextSolve()
Definition: revised_simplex.cc:130
operations_research::sat::IntegerTrail::Index
int Index() const
Definition: integer.h:820
model
GRBmodel * model
Definition: gurobi_interface.cc:269
absl::StrongVector::clear
void clear()
Definition: strong_vector.h:170
operations_research::sat::Trail::CurrentDecisionLevel
int CurrentDecisionLevel() const
Definition: sat_base.h:355
operations_research::glop::LinearProgram::SetObjectiveOffset
void SetObjectiveOffset(Fractional objective_offset)
Definition: lp_data.cc:329
operations_research::sat::IntegerTrail::RemoveLevelZeroBounds
void RemoveLevelZeroBounds(std::vector< IntegerLiteral > *reason) const
Definition: integer.cc:919
operations_research::sat::LinearProgrammingConstraint::AddCutGenerator
void AddCutGenerator(CutGenerator generator)
Definition: linear_programming_constraint.cc:582
operations_research::glop::LpScalingHelper::UnscaleVariableValue
Fractional UnscaleVariableValue(ColIndex col, Fractional value) const
Definition: lp_data_utils.cc:97
operations_research::glop::LpScalingHelper::VariableScalingFactor
Fractional VariableScalingFactor(ColIndex col) const
Definition: lp_data_utils.cc:91
operations_research::glop::LinearProgram::GetDimensionString
std::string GetDimensionString() const
Definition: lp_data.cc:423
operations_research::sat::ToDouble
double ToDouble(IntegerValue value)
Definition: integer.h:69
operations_research::sat::ImpliedBoundsProcessor::AddLpVariable
void AddLpVariable(IntegerVariable var)
Definition: cuts.h:107
absl::StrongVector
Definition: strong_vector.h:76
operations_research::glop::ProblemStatus::OPTIMAL
@ OPTIMAL
col
ColIndex col
Definition: markowitz.cc:176
operations_research::sat::IntegerTrail::ReportConflict
bool ReportConflict(absl::Span< const Literal > literal_reason, absl::Span< const IntegerLiteral > integer_reason)
Definition: integer.h:793
operations_research::glop::SparseVector::num_entries
EntryIndex num_entries() const
Definition: sparse_vector.h:270
operations_research::sat::kMinIntegerValue
constexpr IntegerValue kMinIntegerValue(-kMaxIntegerValue)
operations_research::glop::ProblemStatus
ProblemStatus
Definition: lp_types.h:101
operations_research::sat::CutGenerator::generate_cuts
std::function< void(const absl::StrongVector< IntegerVariable, double > &lp_values, LinearConstraintManager *manager)> generate_cuts
Definition: cuts.h:45
stl_util.h
operations_research::sat::TopNCuts::TransferToManager
void TransferToManager(const absl::StrongVector< IntegerVariable, double > &lp_solution, LinearConstraintManager *manager)
Definition: linear_constraint_manager.cc:721
operations_research::sat::LinearProgrammingConstraint::HeuristicLpReducedCostAverageBranching
std::function< IntegerLiteral()> HeuristicLpReducedCostAverageBranching()
Definition: linear_programming_constraint.cc:2749
row
RowIndex row
Definition: markowitz.cc:175
operations_research::glop::DenseColumn
StrictITIVector< RowIndex, Fractional > DenseColumn
Definition: lp_types.h:328
operations_research::glop::LinearProgram::SetObjectiveCoefficient
void SetObjectiveCoefficient(ColIndex col, Fractional value)
Definition: lp_data.cc:324
operations_research::sat::IntegerTrail::RelaxLinearReason
void RelaxLinearReason(IntegerValue slack, absl::Span< const IntegerValue > coeffs, std::vector< IntegerLiteral > *reason) const
Definition: integer.cc:785
operations_research::sat::LinearProgrammingConstraint::HeuristicLpReducedCostBinary
std::function< IntegerLiteral()> HeuristicLpReducedCostBinary(Model *model)
Definition: linear_programming_constraint.cc:2594
absl::StrongVector::back
reference back()
Definition: strong_vector.h:174
operations_research::glop::ConstraintStatus::AT_LOWER_BOUND
@ AT_LOWER_BOUND
operations_research::glop::RevisedSimplex::ClearStateForNextSolve
void ClearStateForNextSolve()
Definition: revised_simplex.cc:119
operations_research::sat::LinearProgrammingConstraint::SetObjectiveCoefficient
void SetObjectiveCoefficient(IntegerVariable ivar, IntegerValue coeff)
Definition: linear_programming_constraint.cc:231
operations_research::sat::ImpliedBoundsProcessor::ClearCache
void ClearCache() const
Definition: cuts.h:111
operations_research::sat::LinearProgrammingConstraint::LinearProgrammingConstraint
LinearProgrammingConstraint(Model *model)
Definition: linear_programming_constraint.cc:156
operations_research::sat::IntegerRoundingCutHelper::ComputeCut
void ComputeCut(RoundingOptions options, const std::vector< double > &lp_values, const std::vector< IntegerValue > &lower_bounds, const std::vector< IntegerValue > &upper_bounds, ImpliedBoundsProcessor *ib_processor, LinearConstraint *cut)
Definition: cuts.cc:706
operations_research::sat::LinearProgrammingConstraint::AddLinearConstraint
void AddLinearConstraint(const LinearConstraint &ct)
Definition: linear_programming_constraint.cc:193
operations_research::sat::ImpliedBoundsProcessor::DebugSlack
bool DebugSlack(IntegerVariable first_slack, const LinearConstraint &initial_cut, const LinearConstraint &cut, const std::vector< SlackInfo > &info)
Definition: cuts.cc:1724
operations_research::sat::CutGenerator::vars
std::vector< IntegerVariable > vars
Definition: cuts.h:41
operations_research::sat::LinearConstraintManager::AllConstraints
const absl::StrongVector< ConstraintIndex, ConstraintInfo > & AllConstraints() const
Definition: linear_constraint_manager.h:110
operations_research::sat::IntegerTrail::IsFixed
bool IsFixed(IntegerVariable i) const
Definition: integer.h:1291
b
int64 b
Definition: constraint_solver/table.cc:43
absl::StrongVector::resize
void resize(size_type new_size)
Definition: strong_vector.h:150
operations_research::sat::GenericLiteralWatcher::AlwaysCallAtLevelZero
void AlwaysCallAtLevelZero(int id)
Definition: integer.cc:1945
strong_vector.h
operations_research::glop::RevisedSimplex::GetProblemNumRows
RowIndex GetProblemNumRows() const
Definition: revised_simplex.cc:433
operations_research::sat::LinearProgrammingDispatcher
Definition: linear_programming_constraint.h:526
operations_research::sat::SeparateSubtourInequalities
void SeparateSubtourInequalities(int num_nodes, const std::vector< int > &tails, const std::vector< int > &heads, const std::vector< Literal > &literals, const absl::StrongVector< IntegerVariable, double > &lp_values, absl::Span< const int64 > demands, int64 capacity, LinearConstraintManager *manager, Model *model)
Definition: linear_programming_constraint.cc:2271
capacity
int64 capacity
Definition: routing_flow.cc:129
operations_research::sat::ScatteredIntegerVector::Add
bool Add(glop::ColIndex col, IntegerValue value)
Definition: linear_programming_constraint.cc:70
next
Block * next
Definition: constraint_solver.cc:674
operations_research::sat::IntegerTrail::LowerBound
IntegerValue LowerBound(IntegerVariable i) const
Definition: integer.h:1283
operations_research::sat::CoverCutHelper::Info
const std::string Info()
Definition: cuts.h:254
operations_research::sat::CreateCVRPCutGenerator
CutGenerator CreateCVRPCutGenerator(int num_nodes, const std::vector< int > &tails, const std::vector< int > &heads, const std::vector< Literal > &literals, const std::vector< int64 > &demands, int64 capacity, Model *model)
Definition: linear_programming_constraint.cc:2528
operations_research::sat::ComputeInfinityNorm
IntegerValue ComputeInfinityNorm(const LinearConstraint &constraint)
Definition: linear_constraint.cc:141
operations_research::glop::RevisedSimplex::GetVariableValue
Fractional GetVariableValue(ColIndex col) const
Definition: revised_simplex.cc:437
CHECK_NE
#define CHECK_NE(val1, val2)
Definition: base/logging.h:698
operations_research::sat::IntegerTrail::IsCurrentlyIgnored
bool IsCurrentlyIgnored(IntegerVariable i) const
Definition: integer.h:618
operations_research::glop::RevisedSimplex::GetDualRay
const DenseColumn & GetDualRay() const
Definition: revised_simplex.cc:482
operations_research::sat::LinearConstraint::coeffs
std::vector< IntegerValue > coeffs
Definition: linear_constraint.h:43
operations_research::sat::IntegerRoundingCutHelper::NumLiftedBooleans
int NumLiftedBooleans() const
Definition: cuts.h:220
operations_research::sat::ZeroHalfCutHelper::AddOneConstraint
void AddOneConstraint(glop::RowIndex, const std::vector< std::pair< glop::ColIndex, IntegerValue >> &terms, IntegerValue lb, IntegerValue ub)
Definition: zero_half_cuts.cc:58
operations_research::glop::RevisedSimplex::GetBasis
ColIndex GetBasis(RowIndex row) const
Definition: revised_simplex.cc:492
lp_types.h
operations_research::sat::LinearProgrammingConstraint::HeuristicLpMostInfeasibleBinary
std::function< IntegerLiteral()> HeuristicLpMostInfeasibleBinary(Model *model)
Definition: linear_programming_constraint.cc:2548
head
int64 head
Definition: routing_flow.cc:128
operations_research::sat::DivideByGCD
void DivideByGCD(LinearConstraint *constraint)
Definition: linear_constraint.cc:188
operations_research::sat::IntegerTrail::IsFixedAtLevelZero
bool IsFixedAtLevelZero(IntegerVariable var) const
Definition: integer.h:1343
operations_research::sat::LinearProgrammingConstraint::Propagate
bool Propagate() override
Definition: linear_programming_constraint.cc:1353
operations_research::glop::RevisedSimplex::GetReducedCosts
const DenseRow & GetReducedCosts() const
Definition: revised_simplex.cc:445
operations_research::TimeLimit::LimitReached
bool LimitReached()
Returns true when the external limit is true, or the deterministic time is over the deterministic lim...
Definition: time_limit.h:532
preprocessor.h
operations_research::sat::LinearConstraint::ub
IntegerValue ub
Definition: linear_constraint.h:41
operations_research::glop::RevisedSimplex::GetReducedCost
Fractional GetReducedCost(ColIndex col) const
Definition: revised_simplex.cc:441
operations_research::RevRepository::SaveState
void SaveState(T *object)
Definition: rev.h:61
operations_research::sat::LinearConstraintManager::AddAllConstraintsToLp
void AddAllConstraintsToLp()
Definition: linear_constraint_manager.cc:682
operations_research::sat::LinearConstraintManager::DebugCheckConstraint
bool DebugCheckConstraint(const LinearConstraint &cut)
Definition: linear_constraint_manager.cc:690
CHECK
#define CHECK(condition)
Definition: base/logging.h:495
commandlineflags.h
parameters
SatParameters parameters
Definition: cp_model_fz_solver.cc:108
operations_research::sat::IntegerEncoder
Definition: integer.h:276
integer.h
name
const std::string name
Definition: default_search.cc:808
operations_research::sat::GenericLiteralWatcher::WatchUpperBound
void WatchUpperBound(IntegerVariable var, int id, int watch_index=-1)
Definition: integer.h:1374
status.h
operations_research::sat::Trail
Definition: sat_base.h:233
parameters.pb.h
linear_programming_constraint.h
operations_research::glop::RevisedSimplex::GetProblemNumCols
ColIndex GetProblemNumCols() const
Definition: revised_simplex.cc:435
kint64max
static const int64 kint64max
Definition: integral_types.h:62