OR-Tools  8.0
linear_programming_constraint.cc
Go to the documentation of this file.
1 // Copyright 2010-2018 Google LLC
2 // Licensed under the Apache License, Version 2.0 (the "License");
3 // you may not use this file except in compliance with the License.
4 // You may obtain a copy of the License at
5 //
6 // http://www.apache.org/licenses/LICENSE-2.0
7 //
8 // Unless required by applicable law or agreed to in writing, software
9 // distributed under the License is distributed on an "AS IS" BASIS,
10 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 // See the License for the specific language governing permissions and
12 // limitations under the License.
13 
15 
16 #include <algorithm>
17 #include <cmath>
18 #include <iterator>
19 #include <limits>
20 #include <string>
21 #include <utility>
22 #include <vector>
23 
24 #include "absl/container/flat_hash_map.h"
25 #include "absl/numeric/int128.h"
29 #include "ortools/base/logging.h"
30 #include "ortools/base/map_util.h"
31 #include "ortools/base/mathutil.h"
32 #include "ortools/base/stl_util.h"
35 #include "ortools/glop/status.h"
39 #include "ortools/sat/integer.h"
41 
42 namespace operations_research {
43 namespace sat {
44 
45 using glop::ColIndex;
46 using glop::Fractional;
47 using glop::RowIndex;
48 
49 const double LinearProgrammingConstraint::kCpEpsilon = 1e-4;
50 const double LinearProgrammingConstraint::kLpEpsilon = 1e-6;
51 
52 // TODO(user): make SatParameters singleton too, otherwise changing them after
53 // a constraint was added will have no effect on this class.
55  : constraint_manager_(model),
56  sat_parameters_(*(model->GetOrCreate<SatParameters>())),
57  model_(model),
58  time_limit_(model->GetOrCreate<TimeLimit>()),
59  integer_trail_(model->GetOrCreate<IntegerTrail>()),
60  trail_(model->GetOrCreate<Trail>()),
61  model_heuristics_(model->GetOrCreate<SearchHeuristicsVector>()),
62  integer_encoder_(model->GetOrCreate<IntegerEncoder>()),
63  random_(model->GetOrCreate<ModelRandomGenerator>()),
64  implied_bounds_processor_({}, integer_trail_,
65  model->GetOrCreate<ImpliedBounds>()),
66  dispatcher_(model->GetOrCreate<LinearProgrammingDispatcher>()),
67  expanded_lp_solution_(
69  // Tweak the default parameters to make the solve incremental.
70  glop::GlopParameters parameters;
71  parameters.set_use_dual_simplex(true);
72  simplex_.SetParameters(parameters);
73  if (sat_parameters_.use_branching_in_lp() ||
74  sat_parameters_.search_branching() == SatParameters::LP_SEARCH) {
75  compute_reduced_cost_averages_ = true;
76  }
77 
78  // Register our local rev int repository.
79  integer_trail_->RegisterReversibleClass(&rc_rev_int_repository_);
80 }
81 
83  VLOG(1) << "Total number of simplex iterations: "
84  << total_num_simplex_iterations_;
85 }
86 
88  const LinearConstraint& ct) {
89  DCHECK(!lp_constraint_is_registered_);
90  constraint_manager_.Add(ct);
91 
92  // We still create the mirror variable right away though.
93  //
94  // TODO(user): clean this up? Note that it is important that the variable
95  // in lp_data_ never changes though, so we can restart from the current
96  // lp solution and be incremental (even if the constraints changed).
97  for (const IntegerVariable var : ct.vars) {
98  GetOrCreateMirrorVariable(PositiveVariable(var));
99  }
100 }
101 
102 glop::ColIndex LinearProgrammingConstraint::GetOrCreateMirrorVariable(
103  IntegerVariable positive_variable) {
104  DCHECK(VariableIsPositive(positive_variable));
105  const auto it = mirror_lp_variable_.find(positive_variable);
106  if (it == mirror_lp_variable_.end()) {
107  const glop::ColIndex col(integer_variables_.size());
108  implied_bounds_processor_.AddLpVariable(positive_variable);
109  mirror_lp_variable_[positive_variable] = col;
110  integer_variables_.push_back(positive_variable);
111  lp_solution_.push_back(std::numeric_limits<double>::infinity());
112  lp_reduced_cost_.push_back(0.0);
113  (*dispatcher_)[positive_variable] = this;
114 
115  const int index = std::max(positive_variable.value(),
116  NegationOf(positive_variable).value());
117  if (index >= expanded_lp_solution_.size()) {
118  expanded_lp_solution_.resize(index + 1, 0.0);
119  }
120  return col;
121  }
122  return it->second;
123 }
124 
126  IntegerValue coeff) {
127  CHECK(!lp_constraint_is_registered_);
128  objective_is_defined_ = true;
129  IntegerVariable pos_var = VariableIsPositive(ivar) ? ivar : NegationOf(ivar);
130  if (ivar != pos_var) coeff = -coeff;
131 
132  constraint_manager_.SetObjectiveCoefficient(pos_var, coeff);
133  const glop::ColIndex col = GetOrCreateMirrorVariable(pos_var);
134  integer_objective_.push_back({col, coeff});
135  objective_infinity_norm_ =
136  std::max(objective_infinity_norm_, IntTypeAbs(coeff));
137 }
138 
139 // TODO(user): As the search progress, some variables might get fixed. Exploit
140 // this to reduce the number of variables in the LP and in the
141 // ConstraintManager? We might also detect during the search that two variable
142 // are equivalent.
143 //
144 // TODO(user): On TSP/VRP with a lot of cuts, this can take 20% of the overall
145 // running time. We should be able to almost remove most of this from the
146 // profile by being more incremental (modulo LP scaling).
147 //
148 // TODO(user): A longer term idea for LP with a lot of variables is to not
149 // add all variables to each LP solve and do some "sifting". That can be useful
150 // for TSP for instance where the number of edges is large, but only a small
151 // fraction will be used in the optimal solution.
152 bool LinearProgrammingConstraint::CreateLpFromConstraintManager() {
153  // Fill integer_lp_.
154  integer_lp_.clear();
155  infinity_norms_.clear();
156  const auto& all_constraints = constraint_manager_.AllConstraints();
157  for (const auto index : constraint_manager_.LpConstraints()) {
158  const LinearConstraint& ct = all_constraints[index].constraint;
159 
160  integer_lp_.push_back(LinearConstraintInternal());
161  LinearConstraintInternal& new_ct = integer_lp_.back();
162  new_ct.lb = ct.lb;
163  new_ct.ub = ct.ub;
164  const int size = ct.vars.size();
165  IntegerValue infinity_norm(0);
166  if (ct.lb > ct.ub) {
167  LOG(INFO) << "Trivial infeasible bound in an LP constraint";
168  return false;
169  }
170  if (ct.lb > kMinIntegerValue) {
171  infinity_norm = std::max(infinity_norm, IntTypeAbs(ct.lb));
172  }
173  if (ct.ub < kMaxIntegerValue) {
174  infinity_norm = std::max(infinity_norm, IntTypeAbs(ct.ub));
175  }
176  for (int i = 0; i < size; ++i) {
177  // We only use positive variable inside this class.
178  IntegerVariable var = ct.vars[i];
179  IntegerValue coeff = ct.coeffs[i];
180  if (!VariableIsPositive(var)) {
181  var = NegationOf(var);
182  coeff = -coeff;
183  }
184  infinity_norm = std::max(infinity_norm, IntTypeAbs(coeff));
185  new_ct.terms.push_back({GetOrCreateMirrorVariable(var), coeff});
186  }
187  infinity_norms_.push_back(infinity_norm);
188 
189  // Important to keep lp_data_ "clean".
190  std::sort(new_ct.terms.begin(), new_ct.terms.end());
191  }
192 
193  // Copy the integer_lp_ into lp_data_.
194  lp_data_.Clear();
195  for (int i = 0; i < integer_variables_.size(); ++i) {
196  CHECK_EQ(glop::ColIndex(i), lp_data_.CreateNewVariable());
197  }
198  for (const auto entry : integer_objective_) {
199  lp_data_.SetObjectiveCoefficient(entry.first, ToDouble(entry.second));
200  }
201  for (const LinearConstraintInternal& ct : integer_lp_) {
202  const ConstraintIndex row = lp_data_.CreateNewConstraint();
203  lp_data_.SetConstraintBounds(row, ToDouble(ct.lb), ToDouble(ct.ub));
204  for (const auto& term : ct.terms) {
205  lp_data_.SetCoefficient(row, term.first, ToDouble(term.second));
206  }
207  }
208  lp_data_.NotifyThatColumnsAreClean();
209 
210  // We scale the LP using the level zero bounds that we later override
211  // with the current ones.
212  //
213  // TODO(user): As part of the scaling, we may also want to shift the initial
214  // variable bounds so that each variable contain the value zero in their
215  // domain. Maybe just once and for all at the beginning.
216  const int num_vars = integer_variables_.size();
217  for (int i = 0; i < num_vars; i++) {
218  const IntegerVariable cp_var = integer_variables_[i];
219  const double lb = ToDouble(integer_trail_->LevelZeroLowerBound(cp_var));
220  const double ub = ToDouble(integer_trail_->LevelZeroUpperBound(cp_var));
221  lp_data_.SetVariableBounds(glop::ColIndex(i), lb, ub);
222  }
223 
224  scaler_.Scale(&lp_data_);
225  UpdateBoundsOfLpVariables();
226 
227  lp_data_.NotifyThatColumnsAreClean();
228  lp_data_.AddSlackVariablesWhereNecessary(false);
229  VLOG(1) << "LP relaxation: " << lp_data_.GetDimensionString() << ". "
230  << constraint_manager_.AllConstraints().size()
231  << " Managed constraints.";
232  return true;
233 }
234 
235 LPSolveInfo LinearProgrammingConstraint::SolveLpForBranching() {
236  LPSolveInfo info;
237  glop::BasisState basis_state = simplex_.GetState();
238 
239  const glop::Status status = simplex_.Solve(lp_data_, time_limit_);
240  total_num_simplex_iterations_ += simplex_.GetNumberOfIterations();
241  simplex_.LoadStateForNextSolve(basis_state);
242  if (!status.ok()) {
243  VLOG(1) << "The LP solver encountered an error: " << status.error_message();
244  info.status = glop::ProblemStatus::ABNORMAL;
245  return info;
246  }
247  info.status = simplex_.GetProblemStatus();
248  if (info.status == glop::ProblemStatus::OPTIMAL ||
249  info.status == glop::ProblemStatus::DUAL_FEASIBLE) {
250  // Record the objective bound.
251  info.lp_objective = simplex_.GetObjectiveValue();
252  info.new_obj_bound = IntegerValue(
253  static_cast<int64>(std::ceil(info.lp_objective - kCpEpsilon)));
254  }
255  return info;
256 }
257 
258 void LinearProgrammingConstraint::FillReducedCostReasonIn(
259  const glop::DenseRow& reduced_costs,
260  std::vector<IntegerLiteral>* integer_reason) {
261  integer_reason->clear();
262  const int num_vars = integer_variables_.size();
263  for (int i = 0; i < num_vars; i++) {
264  const double rc = reduced_costs[glop::ColIndex(i)];
265  if (rc > kLpEpsilon) {
266  integer_reason->push_back(
267  integer_trail_->LowerBoundAsLiteral(integer_variables_[i]));
268  } else if (rc < -kLpEpsilon) {
269  integer_reason->push_back(
270  integer_trail_->UpperBoundAsLiteral(integer_variables_[i]));
271  }
272  }
273 
274  integer_trail_->RemoveLevelZeroBounds(integer_reason);
275 }
276 
277 bool LinearProgrammingConstraint::BranchOnVar(IntegerVariable positive_var) {
278  // From the current LP solution, branch on the given var if fractional.
279  DCHECK(lp_solution_is_set_);
280  const double current_value = GetSolutionValue(positive_var);
281  DCHECK_GT(std::abs(current_value - std::round(current_value)), kCpEpsilon);
282 
283  // Used as empty reason in this method.
284  integer_reason_.clear();
285 
286  bool deductions_were_made = false;
287 
288  UpdateBoundsOfLpVariables();
289 
290  const IntegerValue current_obj_lb = integer_trail_->LowerBound(objective_cp_);
291  // This will try to branch in both direction around the LP value of the
292  // given variable and push any deduction done this way.
293 
294  const glop::ColIndex lp_var = GetOrCreateMirrorVariable(positive_var);
295  const double current_lb = ToDouble(integer_trail_->LowerBound(positive_var));
296  const double current_ub = ToDouble(integer_trail_->UpperBound(positive_var));
297  const double factor = scaler_.VariableScalingFactor(lp_var);
298  if (current_value < current_lb || current_value > current_ub) {
299  return false;
300  }
301 
302  // Form LP1 var <= floor(current_value)
303  const double new_ub = std::floor(current_value);
304  lp_data_.SetVariableBounds(lp_var, current_lb * factor, new_ub * factor);
305 
306  LPSolveInfo lower_branch_info = SolveLpForBranching();
307  if (lower_branch_info.status != glop::ProblemStatus::OPTIMAL &&
308  lower_branch_info.status != glop::ProblemStatus::DUAL_FEASIBLE &&
309  lower_branch_info.status != glop::ProblemStatus::DUAL_UNBOUNDED) {
310  return false;
311  }
312 
313  if (lower_branch_info.status == glop::ProblemStatus::DUAL_UNBOUNDED) {
314  // Push the other branch.
315  const IntegerLiteral deduction = IntegerLiteral::GreaterOrEqual(
316  positive_var, IntegerValue(std::ceil(current_value)));
317  if (!integer_trail_->Enqueue(deduction, {}, integer_reason_)) {
318  return false;
319  }
320  deductions_were_made = true;
321  } else if (lower_branch_info.new_obj_bound <= current_obj_lb) {
322  return false;
323  }
324 
325  // Form LP2 var >= ceil(current_value)
326  const double new_lb = std::ceil(current_value);
327  lp_data_.SetVariableBounds(lp_var, new_lb * factor, current_ub * factor);
328 
329  LPSolveInfo upper_branch_info = SolveLpForBranching();
330  if (upper_branch_info.status != glop::ProblemStatus::OPTIMAL &&
331  upper_branch_info.status != glop::ProblemStatus::DUAL_FEASIBLE &&
332  upper_branch_info.status != glop::ProblemStatus::DUAL_UNBOUNDED) {
333  return deductions_were_made;
334  }
335 
336  if (upper_branch_info.status == glop::ProblemStatus::DUAL_UNBOUNDED) {
337  // Push the other branch if not infeasible.
338  if (lower_branch_info.status != glop::ProblemStatus::DUAL_UNBOUNDED) {
339  const IntegerLiteral deduction = IntegerLiteral::LowerOrEqual(
340  positive_var, IntegerValue(std::floor(current_value)));
341  if (!integer_trail_->Enqueue(deduction, {}, integer_reason_)) {
342  return deductions_were_made;
343  }
344  deductions_were_made = true;
345  }
346  } else if (upper_branch_info.new_obj_bound <= current_obj_lb) {
347  return deductions_were_made;
348  }
349 
350  IntegerValue approximate_obj_lb = kMinIntegerValue;
351 
352  if (lower_branch_info.status == glop::ProblemStatus::DUAL_UNBOUNDED &&
353  upper_branch_info.status == glop::ProblemStatus::DUAL_UNBOUNDED) {
354  return integer_trail_->ReportConflict(integer_reason_);
355  } else if (lower_branch_info.status == glop::ProblemStatus::DUAL_UNBOUNDED) {
356  approximate_obj_lb = upper_branch_info.new_obj_bound;
357  } else if (upper_branch_info.status == glop::ProblemStatus::DUAL_UNBOUNDED) {
358  approximate_obj_lb = lower_branch_info.new_obj_bound;
359  } else {
360  approximate_obj_lb = std::min(lower_branch_info.new_obj_bound,
361  upper_branch_info.new_obj_bound);
362  }
363 
364  // NOTE: On some problems, the approximate_obj_lb could be inexact which add
365  // some tolerance to CP-SAT where currently there is none.
366  if (approximate_obj_lb <= current_obj_lb) return deductions_were_made;
367 
368  // Push the bound to the trail.
369  const IntegerLiteral deduction =
370  IntegerLiteral::GreaterOrEqual(objective_cp_, approximate_obj_lb);
371  if (!integer_trail_->Enqueue(deduction, {}, integer_reason_)) {
372  return deductions_were_made;
373  }
374 
375  return true;
376 }
377 
379  DCHECK(!lp_constraint_is_registered_);
380  lp_constraint_is_registered_ = true;
381  model->GetOrCreate<LinearProgrammingConstraintCollection>()->push_back(this);
382 
383  // Note fdid, this is not really needed by should lead to better cache
384  // locality.
385  std::sort(integer_objective_.begin(), integer_objective_.end());
386 
387  // Set the LP to its initial content.
388  if (!sat_parameters_.add_lp_constraints_lazily()) {
389  constraint_manager_.AddAllConstraintsToLp();
390  }
391  if (!CreateLpFromConstraintManager()) {
392  model->GetOrCreate<SatSolver>()->NotifyThatModelIsUnsat();
393  return;
394  }
395 
396  GenericLiteralWatcher* watcher = model->GetOrCreate<GenericLiteralWatcher>();
397  const int watcher_id = watcher->Register(this);
398  const int num_vars = integer_variables_.size();
399  for (int i = 0; i < num_vars; i++) {
400  watcher->WatchIntegerVariable(integer_variables_[i], watcher_id, i);
401  }
402  if (objective_is_defined_) {
403  watcher->WatchUpperBound(objective_cp_, watcher_id);
404  }
405  watcher->SetPropagatorPriority(watcher_id, 2);
406  watcher->AlwaysCallAtLevelZero(watcher_id);
407 
408  if (integer_variables_.size() >= 20) { // Do not use on small subparts.
409  auto* container = model->GetOrCreate<SearchHeuristicsVector>();
410  container->push_back(HeuristicLPPseudoCostBinary(model));
411  container->push_back(HeuristicLPMostInfeasibleBinary(model));
412  }
413 
414  // Registering it with the trail make sure this class is always in sync when
415  // it is used in the decision heuristics.
416  integer_trail_->RegisterReversibleClass(this);
417  watcher->RegisterReversibleInt(watcher_id, &rev_optimal_constraints_size_);
418 }
419 
421  optimal_constraints_.resize(rev_optimal_constraints_size_);
422  if (lp_solution_is_set_ && level < lp_solution_level_) {
423  lp_solution_is_set_ = false;
424  }
425 
426  // Special case for level zero, we "reload" any previously known optimal
427  // solution from that level.
428  //
429  // TODO(user): Keep all optimal solution in the current branch?
430  // TODO(user): Still try to add cuts/constraints though!
431  if (level == 0 && !level_zero_lp_solution_.empty()) {
432  lp_solution_is_set_ = true;
433  lp_solution_ = level_zero_lp_solution_;
434  lp_solution_level_ = 0;
435  for (int i = 0; i < lp_solution_.size(); i++) {
436  expanded_lp_solution_[integer_variables_[i]] = lp_solution_[i];
437  expanded_lp_solution_[NegationOf(integer_variables_[i])] =
438  -lp_solution_[i];
439  }
440  }
441 }
442 
444  for (const IntegerVariable var : generator.vars) {
445  GetOrCreateMirrorVariable(VariableIsPositive(var) ? var : NegationOf(var));
446  }
447  cut_generators_.push_back(std::move(generator));
448 }
449 
451  const std::vector<int>& watch_indices) {
452  if (!lp_solution_is_set_) return Propagate();
453 
454  // At level zero, if there is still a chance to add cuts or lazy constraints,
455  // we re-run the LP.
456  if (trail_->CurrentDecisionLevel() == 0 && !lp_at_level_zero_is_final_) {
457  return Propagate();
458  }
459 
460  // Check whether the change breaks the current LP solution. If it does, call
461  // Propagate() on the current LP.
462  for (const int index : watch_indices) {
463  const double lb =
464  ToDouble(integer_trail_->LowerBound(integer_variables_[index]));
465  const double ub =
466  ToDouble(integer_trail_->UpperBound(integer_variables_[index]));
467  const double value = lp_solution_[index];
468  if (value < lb - kCpEpsilon || value > ub + kCpEpsilon) return Propagate();
469  }
470 
471  // TODO(user): The saved lp solution is still valid given the current variable
472  // bounds, so the LP optimal didn't change. However we might still want to add
473  // new cuts or new lazy constraints?
474  //
475  // TODO(user): Propagate the last optimal_constraint? Note that we need
476  // to be careful since the reversible int in IntegerSumLE are not registered.
477  // However, because we delete "optimalconstraints" on backtrack, we might not
478  // care.
479  return true;
480 }
481 
482 glop::Fractional LinearProgrammingConstraint::GetVariableValueAtCpScale(
483  glop::ColIndex var) {
484  return scaler_.UnscaleVariableValue(var, simplex_.GetVariableValue(var));
485 }
486 
488  IntegerVariable variable) const {
489  return lp_solution_[gtl::FindOrDie(mirror_lp_variable_, variable).value()];
490 }
491 
493  IntegerVariable variable) const {
494  return lp_reduced_cost_[gtl::FindOrDie(mirror_lp_variable_, variable)
495  .value()];
496 }
497 
498 void LinearProgrammingConstraint::UpdateBoundsOfLpVariables() {
499  const int num_vars = integer_variables_.size();
500  for (int i = 0; i < num_vars; i++) {
501  const IntegerVariable cp_var = integer_variables_[i];
502  const double lb = ToDouble(integer_trail_->LowerBound(cp_var));
503  const double ub = ToDouble(integer_trail_->UpperBound(cp_var));
504  const double factor = scaler_.VariableScalingFactor(glop::ColIndex(i));
505  lp_data_.SetVariableBounds(glop::ColIndex(i), lb * factor, ub * factor);
506  }
507 }
508 
509 bool LinearProgrammingConstraint::SolveLp() {
510  if (trail_->CurrentDecisionLevel() == 0) {
511  lp_at_level_zero_is_final_ = false;
512  }
513 
514  const auto status = simplex_.Solve(lp_data_, time_limit_);
515  total_num_simplex_iterations_ += simplex_.GetNumberOfIterations();
516  if (!status.ok()) {
517  VLOG(1) << "The LP solver encountered an error: " << status.error_message();
518  simplex_.ClearStateForNextSolve();
519  return false;
520  }
521  average_degeneracy_.AddData(CalculateDegeneracy());
522  if (average_degeneracy_.CurrentAverage() >= 1000.0) {
523  VLOG(2) << "High average degeneracy: "
524  << average_degeneracy_.CurrentAverage();
525  }
526 
528  lp_solution_is_set_ = true;
529  lp_solution_level_ = trail_->CurrentDecisionLevel();
530  const int num_vars = integer_variables_.size();
531  for (int i = 0; i < num_vars; i++) {
532  const glop::Fractional value =
533  GetVariableValueAtCpScale(glop::ColIndex(i));
534  lp_solution_[i] = value;
535  expanded_lp_solution_[integer_variables_[i]] = value;
536  expanded_lp_solution_[NegationOf(integer_variables_[i])] = -value;
537  }
538 
539  if (lp_solution_level_ == 0) {
540  level_zero_lp_solution_ = lp_solution_;
541  }
542  }
543  return true;
544 }
545 
546 void LinearProgrammingConstraint::ConvertToLinearConstraint(
547  const gtl::ITIVector<ColIndex, IntegerValue>& dense_vector,
548  IntegerValue upper_bound, LinearConstraint* result) {
549  result->vars.clear();
550  result->coeffs.clear();
551  const int size = dense_vector.size();
552  for (ColIndex col(0); col < size; ++col) {
553  const IntegerValue coeff = dense_vector[col];
554  if (coeff == 0) continue;
555  const IntegerVariable var = integer_variables_[col.value()];
556  result->vars.push_back(var);
557  result->coeffs.push_back(coeff);
558  }
559  result->lb = kMinIntegerValue;
560  result->ub = upper_bound;
561 }
562 
563 namespace {
564 
565 // Returns false in case of overflow
566 bool AddLinearExpressionMultiple(
567  IntegerValue multiplier,
568  const std::vector<std::pair<ColIndex, IntegerValue>>& terms,
570  for (const std::pair<ColIndex, IntegerValue> term : terms) {
571  if (!AddProductTo(multiplier, term.second, &(*dense_vector)[term.first])) {
572  return false;
573  }
574  }
575  return true;
576 }
577 
578 } // namespace
579 
580 bool LinearProgrammingConstraint::AddCutFromConstraints(
581  const std::string& name,
582  const std::vector<std::pair<RowIndex, IntegerValue>>& integer_multipliers) {
583  // This is initialized to a valid linear contraint (by taking linear
584  // combination of the LP rows) and will be transformed into a cut if
585  // possible.
586  //
587  // TODO(user): For CG cuts, Ideally this linear combination should have only
588  // one fractional variable (basis_col). But because of imprecision, we get a
589  // bunch of fractional entry with small coefficient (relative to the one of
590  // basis_col). We try to handle that in IntegerRoundingCut(), but it might be
591  // better to add small multiple of the involved rows to get rid of them.
592  IntegerValue cut_ub;
593  if (!ComputeNewLinearConstraint(integer_multipliers, &tmp_dense_vector_,
594  &cut_ub)) {
595  VLOG(1) << "Issue, overflow!";
596  return false;
597  }
598 
599  // Important: because we use integer_multipliers below, we cannot just
600  // divide by GCD or call PreventOverflow() here.
601  ConvertToLinearConstraint(tmp_dense_vector_, cut_ub, &cut_);
602 
603  // This should be tight.
604  const double norm = ToDouble(ComputeInfinityNorm(cut_));
605  if (std::abs(ComputeActivity(cut_, expanded_lp_solution_) -
606  ToDouble(cut_.ub)) /
607  norm >
608  1e-4) {
609  VLOG(1) << "Cut not tight " << ComputeActivity(cut_, expanded_lp_solution_)
610  << " " << ToDouble(cut_.ub);
611  return false;
612  }
613  CHECK(constraint_manager_.DebugCheckConstraint(cut_));
614 
615  // We will create "artificial" variables after this index that will be
616  // substitued back into LP variables afterwards. Also not that we only use
617  // positive variable indices for these new variables, so that algorithm that
618  // take their negation will not mess up the indexing.
619  const IntegerVariable first_new_var(expanded_lp_solution_.size());
620  CHECK_EQ(first_new_var.value() % 2, 0);
621 
622  LinearConstraint copy_in_debug;
623  if (DEBUG_MODE) {
624  copy_in_debug = cut_;
625  }
626 
627  // Unlike for the knapsack cuts, it might not be always beneficial to
628  // process the implied bounds even though it seems to be better in average.
629  //
630  // TODO(user): Perform more experiments, in particular with which bound we use
631  // and if we complement or not before the MIR rounding. Other solvers seems
632  // to try different complementation strategies in a "potprocessing" and we
633  // don't. Try this too.
634  std::vector<ImpliedBoundsProcessor::SlackInfo> ib_slack_infos;
635  std::vector<LinearConstraint> implied_bound_cuts;
636  implied_bounds_processor_.ProcessUpperBoundedConstraintWithSlackCreation(
637  /*substitute_only_inner_variables=*/false, first_new_var,
638  expanded_lp_solution_, &cut_, &ib_slack_infos, &implied_bound_cuts);
639  for (LinearConstraint& ib_cut : implied_bound_cuts) {
640  DivideByGCD(&ib_cut);
641  CHECK(constraint_manager_.DebugCheckConstraint(ib_cut));
642  constraint_manager_.AddCut(ib_cut, "IB", expanded_lp_solution_);
643  }
644  DCHECK(implied_bounds_processor_.DebugSlack(first_new_var, copy_in_debug,
645  cut_, ib_slack_infos));
646 
647  // Fills data for IntegerRoundingCut().
648  //
649  // Note(user): we use the current bound here, so the reasonement will only
650  // produce locally valid cut if we call this at a non-root node. We could
651  // use the level zero bounds if we wanted to generate a globally valid cut
652  // at another level. For now this is only called at level zero anyway.
653  tmp_lp_values_.clear();
654  tmp_var_lbs_.clear();
655  tmp_var_ubs_.clear();
656  for (const IntegerVariable var : cut_.vars) {
657  if (var >= first_new_var) {
658  CHECK(VariableIsPositive(var));
659  const auto& info =
660  ib_slack_infos[(var.value() - first_new_var.value()) / 2];
661  tmp_lp_values_.push_back(info.lp_value);
662  tmp_var_lbs_.push_back(info.lb);
663  tmp_var_ubs_.push_back(info.ub);
664  } else {
665  tmp_lp_values_.push_back(expanded_lp_solution_[var]);
666  tmp_var_lbs_.push_back(integer_trail_->LowerBound(var));
667  tmp_var_ubs_.push_back(integer_trail_->UpperBound(var));
668  }
669  }
670 
671  // Add slack.
672  // definition: integer_lp_[row] + slack_row == bound;
673  const IntegerVariable first_slack(first_new_var +
674  IntegerVariable(2 * ib_slack_infos.size()));
675  tmp_slack_rows_.clear();
676  tmp_slack_bounds_.clear();
677  for (const auto pair : integer_multipliers) {
678  const RowIndex row = pair.first;
679  const IntegerValue coeff = pair.second;
680  const auto status = simplex_.GetConstraintStatus(row);
681  if (status == glop::ConstraintStatus::FIXED_VALUE) continue;
682 
683  tmp_lp_values_.push_back(0.0);
684  cut_.vars.push_back(first_slack +
685  2 * IntegerVariable(tmp_slack_rows_.size()));
686  tmp_slack_rows_.push_back(row);
687  cut_.coeffs.push_back(coeff);
688 
689  const IntegerValue diff(
690  CapSub(integer_lp_[row].ub.value(), integer_lp_[row].lb.value()));
691  if (coeff > 0) {
692  tmp_slack_bounds_.push_back(integer_lp_[row].ub);
693  tmp_var_lbs_.push_back(IntegerValue(0));
694  tmp_var_ubs_.push_back(diff);
695  } else {
696  tmp_slack_bounds_.push_back(integer_lp_[row].lb);
697  tmp_var_lbs_.push_back(-diff);
698  tmp_var_ubs_.push_back(IntegerValue(0));
699  }
700  }
701 
702  // Get the cut using some integer rounding heuristic.
703  RoundingOptions options;
704  options.max_scaling = sat_parameters_.max_integer_rounding_scaling();
705  integer_rounding_cut_helper_.ComputeCut(options, tmp_lp_values_, tmp_var_lbs_,
706  tmp_var_ubs_,
707  &implied_bounds_processor_, &cut_);
708 
709  // Compute the activity. Warning: the cut no longer have the same size so we
710  // cannot use tmp_lp_values_. Note that the substitution below shouldn't
711  // change the activity by definition.
712  double activity = 0.0;
713  for (int i = 0; i < cut_.vars.size(); ++i) {
714  if (cut_.vars[i] < first_new_var) {
715  activity +=
716  ToDouble(cut_.coeffs[i]) * expanded_lp_solution_[cut_.vars[i]];
717  }
718  }
719  const double kMinViolation = 1e-4;
720  const double violation = activity - ToDouble(cut_.ub);
721  if (violation < kMinViolation) {
722  VLOG(3) << "Bad cut " << activity << " <= " << ToDouble(cut_.ub);
723  return false;
724  }
725 
726  // Substitute any slack left.
727  {
728  int num_slack = 0;
729  tmp_dense_vector_.assign(integer_variables_.size(), IntegerValue(0));
730  IntegerValue cut_ub = cut_.ub;
731  bool overflow = false;
732  for (int i = 0; i < cut_.vars.size(); ++i) {
733  const IntegerVariable var = cut_.vars[i];
734 
735  // Simple copy for non-slack variables.
736  if (var < first_new_var) {
737  const glop::ColIndex col =
738  gtl::FindOrDie(mirror_lp_variable_, PositiveVariable(var));
739  if (VariableIsPositive(var)) {
740  tmp_dense_vector_[col] += cut_.coeffs[i];
741  } else {
742  tmp_dense_vector_[col] -= cut_.coeffs[i];
743  }
744  continue;
745  }
746 
747  // Replace slack from bound substitution.
748  if (var < first_slack) {
749  const IntegerValue multiplier = cut_.coeffs[i];
750  const int index = (var.value() - first_new_var.value()) / 2;
751  CHECK_LT(index, ib_slack_infos.size());
752 
753  std::vector<std::pair<ColIndex, IntegerValue>> terms;
754  for (const std::pair<IntegerVariable, IntegerValue>& term :
755  ib_slack_infos[index].terms) {
756  terms.push_back(
757  {gtl::FindOrDie(mirror_lp_variable_,
758  PositiveVariable(term.first)),
759  VariableIsPositive(term.first) ? term.second : -term.second});
760  }
761  if (!AddLinearExpressionMultiple(multiplier, terms,
762  &tmp_dense_vector_)) {
763  overflow = true;
764  break;
765  }
766  if (!AddProductTo(multiplier, -ib_slack_infos[index].offset, &cut_ub)) {
767  overflow = true;
768  break;
769  }
770  continue;
771  }
772 
773  // Replace slack from LP constraints.
774  ++num_slack;
775  const int slack_index = (var.value() - first_slack.value()) / 2;
776  const glop::RowIndex row = tmp_slack_rows_[slack_index];
777  const IntegerValue multiplier = -cut_.coeffs[i];
778  if (!AddLinearExpressionMultiple(multiplier, integer_lp_[row].terms,
779  &tmp_dense_vector_)) {
780  overflow = true;
781  break;
782  }
783 
784  // Update rhs.
785  if (!AddProductTo(multiplier, tmp_slack_bounds_[slack_index], &cut_ub)) {
786  overflow = true;
787  break;
788  }
789  }
790 
791  if (overflow) {
792  VLOG(1) << "Overflow in slack removal.";
793  return false;
794  }
795 
796  VLOG(3) << " num_slack: " << num_slack;
797  ConvertToLinearConstraint(tmp_dense_vector_, cut_ub, &cut_);
798  }
799 
800  // Display some stats used for investigation of cut generation.
801  const std::string extra_info = absl::StrCat(
802  "num_ib_substitutions=", ib_slack_infos.size(), " num_lifted_booleans=",
803  integer_rounding_cut_helper_.NumLiftedBooleans());
804 
805  const double new_violation =
806  ComputeActivity(cut_, expanded_lp_solution_) - ToDouble(cut_.ub);
807  if (std::abs(violation - new_violation) >= 1e-4) {
808  VLOG(1) << "Violation discrepancy after slack removal. "
809  << " before = " << violation << " after = " << new_violation;
810  }
811 
812  DivideByGCD(&cut_);
813  return constraint_manager_.AddCut(cut_, name, expanded_lp_solution_,
814  extra_info);
815 }
816 
817 void LinearProgrammingConstraint::AddCGCuts() {
818  CHECK_EQ(trail_->CurrentDecisionLevel(), 0);
819  const RowIndex num_rows = lp_data_.num_constraints();
820  for (RowIndex row(0); row < num_rows; ++row) {
821  ColIndex basis_col = simplex_.GetBasis(row);
822  const Fractional lp_value = GetVariableValueAtCpScale(basis_col);
823 
824  // TODO(user): We could just look at the diff with std::floor() in the hope
825  // that when we are just under an integer, the exact computation below will
826  // also be just under it.
827  if (std::abs(lp_value - std::round(lp_value)) < 0.01) continue;
828 
829  // If this variable is a slack, we ignore it. This is because the
830  // corresponding row is not tight under the given lp values.
831  if (basis_col >= integer_variables_.size()) continue;
832 
833  const glop::ScatteredRow& lambda = simplex_.GetUnitRowLeftInverse(row);
834  glop::DenseColumn lp_multipliers(num_rows, 0.0);
835  double magnitude = 0.0;
836  int num_non_zeros = 0;
837  for (RowIndex row(0); row < num_rows; ++row) {
838  lp_multipliers[row] = lambda.values[glop::RowToColIndex(row)];
839  if (std::abs(lp_multipliers[row]) < 1e-12) {
840  lp_multipliers[row] = 0.0;
841  continue;
842  }
843 
844  // There should be no BASIC status, but they could be imprecision
845  // in the GetUnitRowLeftInverse() code? not sure, so better be safe.
846  const auto status = simplex_.GetConstraintStatus(row);
847  if (status == glop::ConstraintStatus::BASIC) {
848  VLOG(1) << "BASIC row not expected! " << lp_multipliers[row];
849  lp_multipliers[row] = 0.0;
850  }
851 
852  magnitude = std::max(magnitude, std::abs(lp_multipliers[row]));
853  if (lp_multipliers[row] != 0.0) ++num_non_zeros;
854  }
855  if (num_non_zeros == 0) continue;
856 
857  Fractional scaling;
858  for (int i = 0; i < 2; ++i) {
859  if (i == 1) {
860  // Try other sign.
861  //
862  // TODO(user): Maybe add an heuristic to know beforehand which sign to
863  // use?
864  for (RowIndex row(0); row < num_rows; ++row) {
865  lp_multipliers[row] = -lp_multipliers[row];
866  }
867  }
868 
869  // TODO(user): We use a lower value here otherwise we might run into
870  // overflow while computing the cut. This should be fixable.
871  const std::vector<std::pair<RowIndex, IntegerValue>> integer_multipliers =
872  ScaleLpMultiplier(/*take_objective_into_account=*/false,
873  lp_multipliers, &scaling, /*max_pow=*/52);
874  AddCutFromConstraints("CG", integer_multipliers);
875  }
876  }
877 }
878 
879 namespace {
880 
881 // For each element of a, adds a random one in b and append the pair to output.
882 void RandomPick(const std::vector<RowIndex>& a, const std::vector<RowIndex>& b,
883  ModelRandomGenerator* random,
884  std::vector<std::pair<RowIndex, RowIndex>>* output) {
885  if (a.empty() || b.empty()) return;
886  for (const RowIndex row : a) {
887  const RowIndex other = b[absl::Uniform<int>(*random, 0, b.size())];
888  if (other != row) {
889  output->push_back({row, other});
890  }
891  }
892 }
893 
894 template <class ListOfTerms>
895 IntegerValue GetCoeff(ColIndex col, const ListOfTerms& terms) {
896  for (const auto& term : terms) {
897  if (term.first == col) return term.second;
898  }
899  return IntegerValue(0);
900 }
901 
902 } // namespace
903 
904 void LinearProgrammingConstraint::AddMirCuts() {
905  CHECK_EQ(trail_->CurrentDecisionLevel(), 0);
906 
907  // Heuristic to generate MIR_n cuts by combining a small number of rows. This
908  // works greedily and follow more or less the MIR cut description in the
909  // literature. We have a current cut, and we add one more row to it while
910  // eliminating a variable of the current cut whose LP value is far from its
911  // bound.
912  //
913  // A notable difference is that we randomize the variable we eliminate and
914  // the row we use to do so. We still have weights to indicate our preferred
915  // choices. This allows to generate different cuts when called again and
916  // again.
917  //
918  // TODO(user): We could combine n rows to make sure we eliminate n variables
919  // far away from their bounds by solving exactly in integer small linear
920  // system.
921  gtl::ITIVector<ColIndex, IntegerValue> dense_cut(integer_variables_.size(),
922  IntegerValue(0));
923  SparseBitset<ColIndex> non_zeros(ColIndex(integer_variables_.size()));
924 
925  // We compute all the rows that are tight, these will be used as the base row
926  // for the MIR_n procedure below.
927  const RowIndex num_rows = lp_data_.num_constraints();
928  std::vector<std::pair<RowIndex, IntegerValue>> base_rows;
929  gtl::ITIVector<RowIndex, double> row_weights(num_rows.value(), 0.0);
930  for (RowIndex row(0); row < num_rows; ++row) {
931  const auto status = simplex_.GetConstraintStatus(row);
932  if (status == glop::ConstraintStatus::BASIC) continue;
933  if (status == glop::ConstraintStatus::FREE) continue;
934 
937  base_rows.push_back({row, IntegerValue(1)});
938  }
941  base_rows.push_back({row, IntegerValue(-1)});
942  }
943 
944  // For now, we use the dual values for the row "weights".
945  //
946  // Note that we use the dual at LP scale so that it make more sense when we
947  // compare different rows since the LP has been scaled.
948  //
949  // TODO(user): In Kati Wolter PhD "Implementation of Cutting Plane
950  // Separators for Mixed Integer Programs" which describe SCIP's MIR cuts
951  // implementation (or at least an early version of it), a more complex score
952  // is used.
953  //
954  // Note(user): Because we only consider tight rows under the current lp
955  // solution (i.e. non-basic rows), most should have a non-zero dual values.
956  // But there is some degenerate problem where these rows have a really low
957  // weight (or even zero), and having only weight of exactly zero in
958  // std::discrete_distribution will result in a crash.
959  row_weights[row] = std::max(1e-8, std::abs(simplex_.GetDualValue(row)));
960  }
961 
962  std::vector<double> weights;
964  std::vector<std::pair<RowIndex, IntegerValue>> integer_multipliers;
965  for (const std::pair<RowIndex, IntegerValue>& entry : base_rows) {
966  // First try to generate a cut directly from this base row (MIR1).
967  //
968  // Note(user): We abort on success like it seems to be done in the
969  // literature. Note that we don't succeed that often in generating an
970  // efficient cut, so I am not sure aborting will make a big difference
971  // speedwise. We might generate similar cuts though, but hopefully the cut
972  // management can deal with that.
973  integer_multipliers = {entry};
974  if (AddCutFromConstraints("MIR_1", integer_multipliers)) {
975  continue;
976  }
977 
978  // Cleanup.
979  for (const ColIndex col : non_zeros.PositionsSetAtLeastOnce()) {
980  dense_cut[col] = IntegerValue(0);
981  }
982  non_zeros.SparseClearAll();
983 
984  // Copy cut.
985  const IntegerValue multiplier = entry.second;
986  for (const std::pair<ColIndex, IntegerValue> term :
987  integer_lp_[entry.first].terms) {
988  const ColIndex col = term.first;
989  const IntegerValue coeff = term.second;
990  non_zeros.Set(col);
991  dense_cut[col] += coeff * multiplier;
992  }
993 
994  used_rows.assign(num_rows.value(), false);
995  used_rows[entry.first] = true;
996 
997  // We will aggregate at most kMaxAggregation more rows.
998  //
999  // TODO(user): optim + tune.
1000  const int kMaxAggregation = 5;
1001  for (int i = 0; i < kMaxAggregation; ++i) {
1002  // First pick a variable to eliminate. We currently pick a random one with
1003  // a weight that depend on how far it is from its closest bound.
1004  IntegerValue max_magnitude(0);
1005  weights.clear();
1006  std::vector<ColIndex> col_candidates;
1007  for (const ColIndex col : non_zeros.PositionsSetAtLeastOnce()) {
1008  if (dense_cut[col] == 0) continue;
1009 
1010  max_magnitude = std::max(max_magnitude, IntTypeAbs(dense_cut[col]));
1011  const int col_degree =
1012  lp_data_.GetSparseColumn(col).num_entries().value();
1013  if (col_degree <= 1) continue;
1015  continue;
1016  }
1017 
1018  const IntegerVariable var = integer_variables_[col.value()];
1019  const double lp_value = expanded_lp_solution_[var];
1020  const double lb = ToDouble(integer_trail_->LowerBound(var));
1021  const double ub = ToDouble(integer_trail_->UpperBound(var));
1022  const double bound_distance = std::min(ub - lp_value, lp_value - lb);
1023  if (bound_distance > 1e-2) {
1024  weights.push_back(bound_distance);
1025  col_candidates.push_back(col);
1026  }
1027  }
1028  if (col_candidates.empty()) break;
1029 
1030  const ColIndex var_to_eliminate =
1031  col_candidates[std::discrete_distribution<>(weights.begin(),
1032  weights.end())(*random_)];
1033 
1034  // What rows can we add to eliminate var_to_eliminate?
1035  std::vector<RowIndex> possible_rows;
1036  weights.clear();
1037  for (const auto entry : lp_data_.GetSparseColumn(var_to_eliminate)) {
1038  const RowIndex row = entry.row();
1039  const auto status = simplex_.GetConstraintStatus(row);
1040  if (status == glop::ConstraintStatus::BASIC) continue;
1041  if (status == glop::ConstraintStatus::FREE) continue;
1042 
1043  // We dissalow all the rows that contain a variable that we already
1044  // eliminated (or are about to). This mean that we choose rows that
1045  // form a "triangular" matrix on the position we choose to eliminate.
1046  if (used_rows[row]) continue;
1047  used_rows[row] = true;
1048 
1049  // TODO(user): Instead of using FIXED_VALUE consider also both direction
1050  // when we almost have an equality? that is if the LP constraints bounds
1051  // are close from each others (<1e-6 ?). Initial experiments shows it
1052  // doesn't change much, so I kept this version for now. Note that it
1053  // might just be better to use the side that constrain the current lp
1054  // optimal solution (that we get from the status).
1055  bool add_row = false;
1056  if (status == glop::ConstraintStatus::FIXED_VALUE ||
1058  if (entry.coefficient() > 0.0) {
1059  if (dense_cut[var_to_eliminate] < 0) add_row = true;
1060  } else {
1061  if (dense_cut[var_to_eliminate] > 0) add_row = true;
1062  }
1063  }
1064  if (status == glop::ConstraintStatus::FIXED_VALUE ||
1066  if (entry.coefficient() > 0.0) {
1067  if (dense_cut[var_to_eliminate] > 0) add_row = true;
1068  } else {
1069  if (dense_cut[var_to_eliminate] < 0) add_row = true;
1070  }
1071  }
1072  if (add_row) {
1073  possible_rows.push_back(row);
1074  weights.push_back(row_weights[row]);
1075  }
1076  }
1077  if (possible_rows.empty()) break;
1078 
1079  const RowIndex row_to_combine =
1080  possible_rows[std::discrete_distribution<>(weights.begin(),
1081  weights.end())(*random_)];
1082  const IntegerValue to_combine_coeff =
1083  GetCoeff(var_to_eliminate, integer_lp_[row_to_combine].terms);
1084  CHECK_NE(to_combine_coeff, 0);
1085 
1086  IntegerValue mult1 = -to_combine_coeff;
1087  IntegerValue mult2 = dense_cut[var_to_eliminate];
1088  CHECK_NE(mult2, 0);
1089  if (mult1 < 0) {
1090  mult1 = -mult1;
1091  mult2 = -mult2;
1092  }
1093 
1094  const IntegerValue gcd = IntegerValue(
1095  MathUtil::GCD64(std::abs(mult1.value()), std::abs(mult2.value())));
1096  CHECK_NE(gcd, 0);
1097  mult1 /= gcd;
1098  mult2 /= gcd;
1099 
1100  // Overflow detection.
1101  //
1102  // TODO(user): do that in the possible_rows selection? only problem is
1103  // that we do not have the integer coefficient there...
1104  if (CapAdd(CapProd(max_magnitude.value(), std::abs(mult1.value())),
1105  CapProd(infinity_norms_[row_to_combine].value(),
1106  std::abs(mult2.value()))) == kint64max) {
1107  break;
1108  }
1109 
1110  for (std::pair<RowIndex, IntegerValue>& entry : integer_multipliers) {
1111  entry.second *= mult1;
1112  }
1113  integer_multipliers.push_back({row_to_combine, mult2});
1114 
1115  // TODO(user): Not supper efficient to recombine the rows.
1116  if (AddCutFromConstraints(absl::StrCat("MIR_", i + 2),
1117  integer_multipliers)) {
1118  break;
1119  }
1120 
1121  // Minor optim: the computation below is only needed if we do one more
1122  // iteration.
1123  if (i + 1 == kMaxAggregation) break;
1124 
1125  for (ColIndex col : non_zeros.PositionsSetAtLeastOnce()) {
1126  dense_cut[col] *= mult1;
1127  }
1128  for (const std::pair<ColIndex, IntegerValue> term :
1129  integer_lp_[row_to_combine].terms) {
1130  const ColIndex col = term.first;
1131  const IntegerValue coeff = term.second;
1132  non_zeros.Set(col);
1133  dense_cut[col] += coeff * mult2;
1134  }
1135  }
1136  }
1137 }
1138 
1139 void LinearProgrammingConstraint::UpdateSimplexIterationLimit(
1140  const int64 min_iter, const int64 max_iter) {
1141  if (sat_parameters_.linearization_level() < 2) return;
1142  const int64 num_degenerate_columns = CalculateDegeneracy();
1143  const int64 num_cols = simplex_.GetProblemNumCols().value();
1144  if (num_cols <= 0) {
1145  return;
1146  }
1147  CHECK_GT(num_cols, 0);
1148  const int64 decrease_factor = (10 * num_degenerate_columns) / num_cols;
1150  // We reached here probably because we predicted wrong. We use this as a
1151  // signal to increase the iterations or punish less for degeneracy compare
1152  // to the other part.
1153  if (is_degenerate_) {
1154  next_simplex_iter_ /= std::max(int64{1}, decrease_factor);
1155  } else {
1156  next_simplex_iter_ *= 2;
1157  }
1158  } else if (simplex_.GetProblemStatus() == glop::ProblemStatus::OPTIMAL) {
1159  if (is_degenerate_) {
1160  next_simplex_iter_ /= std::max(int64{1}, 2 * decrease_factor);
1161  } else {
1162  // This is the most common case. We use the size of the problem to
1163  // determine the limit and ignore the previous limit.
1164  next_simplex_iter_ = num_cols / 40;
1165  }
1166  }
1167  next_simplex_iter_ =
1168  std::max(min_iter, std::min(max_iter, next_simplex_iter_));
1169 }
1170 
1172  UpdateBoundsOfLpVariables();
1173 
1174  // TODO(user): It seems the time we loose by not stopping early might be worth
1175  // it because we end up with a better explanation at optimality.
1176  glop::GlopParameters parameters = simplex_.GetParameters();
1177  if (/* DISABLES CODE */ (false) && objective_is_defined_) {
1178  // We put a limit on the dual objective since there is no point increasing
1179  // it past our current objective upper-bound (we will already fail as soon
1180  // as we pass it). Note that this limit is properly transformed using the
1181  // objective scaling factor and offset stored in lp_data_.
1182  //
1183  // Note that we use a bigger epsilon here to be sure that if we abort
1184  // because of this, we will report a conflict.
1185  parameters.set_objective_upper_limit(
1186  static_cast<double>(integer_trail_->UpperBound(objective_cp_).value() +
1187  100.0 * kCpEpsilon));
1188  }
1189 
1190  // Put an iteration limit on the work we do in the simplex for this call. Note
1191  // that because we are "incremental", even if we don't solve it this time we
1192  // will make progress towards a solve in the lower node of the tree search.
1193  if (trail_->CurrentDecisionLevel() == 0) {
1194  // TODO(user): Dynamically change the iteration limit for root node as
1195  // well.
1196  parameters.set_max_number_of_iterations(2000);
1197  } else {
1198  parameters.set_max_number_of_iterations(next_simplex_iter_);
1199  }
1200  if (sat_parameters_.use_exact_lp_reason()) {
1201  parameters.set_change_status_to_imprecise(false);
1202  parameters.set_primal_feasibility_tolerance(1e-7);
1203  parameters.set_dual_feasibility_tolerance(1e-7);
1204  }
1205 
1206  simplex_.SetParameters(parameters);
1208  if (!SolveLp()) return true;
1209 
1210  // Add new constraints to the LP and resolve?
1211  const int max_cuts_rounds =
1212  trail_->CurrentDecisionLevel() == 0
1213  ? sat_parameters_.max_cut_rounds_at_level_zero()
1214  : 1;
1215  int cuts_round = 0;
1216  while (simplex_.GetProblemStatus() == glop::ProblemStatus::OPTIMAL &&
1217  cuts_round < max_cuts_rounds) {
1218  // We wait for the first batch of problem constraints to be added before we
1219  // begin to generate cuts.
1220  cuts_round++;
1221  if (!integer_lp_.empty()) {
1222  // The "generic" cuts are currently part of this class as they are using
1223  // data from the current LP.
1224  //
1225  // TODO(user): Refactor so that they are just normal cut generators?
1226  if (trail_->CurrentDecisionLevel() == 0) {
1227  implied_bounds_processor_.ClearCache();
1228  if (sat_parameters_.add_mir_cuts()) AddMirCuts();
1229  if (sat_parameters_.add_cg_cuts()) AddCGCuts();
1230  }
1231 
1232  // Try to add cuts.
1233  if (!cut_generators_.empty() &&
1234  (trail_->CurrentDecisionLevel() == 0 ||
1235  !sat_parameters_.only_add_cuts_at_level_zero())) {
1236  for (const CutGenerator& generator : cut_generators_) {
1237  generator.generate_cuts(expanded_lp_solution_, &constraint_manager_);
1238  }
1239  }
1240  }
1241 
1242  glop::BasisState state = simplex_.GetState();
1243  if (constraint_manager_.ChangeLp(expanded_lp_solution_, &state)) {
1244  simplex_.LoadStateForNextSolve(state);
1245  if (!CreateLpFromConstraintManager()) {
1246  return integer_trail_->ReportConflict({});
1247  }
1248  const double old_obj = simplex_.GetObjectiveValue();
1249  if (!SolveLp()) return true;
1250  if (simplex_.GetProblemStatus() == glop::ProblemStatus::OPTIMAL) {
1251  VLOG(1) << "Relaxation improvement " << old_obj << " -> "
1252  << simplex_.GetObjectiveValue()
1253  << " diff: " << simplex_.GetObjectiveValue() - old_obj
1254  << " level: " << trail_->CurrentDecisionLevel();
1255  }
1256  } else {
1257  if (trail_->CurrentDecisionLevel() == 0) {
1258  lp_at_level_zero_is_final_ = true;
1259  }
1260  break;
1261  }
1262  }
1263 
1264  // A dual-unbounded problem is infeasible. We use the dual ray reason.
1266  if (sat_parameters_.use_exact_lp_reason()) {
1267  if (!FillExactDualRayReason()) return true;
1268  } else {
1269  FillReducedCostReasonIn(simplex_.GetDualRayRowCombination(),
1270  &integer_reason_);
1271  }
1272  return integer_trail_->ReportConflict(integer_reason_);
1273  }
1274 
1275  // TODO(user): Update limits for DUAL_UNBOUNDED status as well.
1276  UpdateSimplexIterationLimit(/*min_iter=*/10, /*max_iter=*/1000);
1277 
1278  // Optimality deductions if problem has an objective.
1279  if (objective_is_defined_ &&
1282  // Try to filter optimal objective value. Note that GetObjectiveValue()
1283  // already take care of the scaling so that it returns an objective in the
1284  // CP world.
1285  const double relaxed_optimal_objective = simplex_.GetObjectiveValue();
1286  const IntegerValue approximate_new_lb(
1287  static_cast<int64>(std::ceil(relaxed_optimal_objective - kCpEpsilon)));
1288 
1289  // TODO(user): Maybe do a bit less computation when we cannot propagate
1290  // anything.
1291  if (sat_parameters_.use_exact_lp_reason()) {
1292  if (!ExactLpReasonning()) return false;
1293 
1294  // Display when the inexact bound would have propagated more.
1295  const IntegerValue propagated_lb =
1296  integer_trail_->LowerBound(objective_cp_);
1297  if (approximate_new_lb > propagated_lb) {
1298  VLOG(2) << "LP objective [ " << ToDouble(propagated_lb) << ", "
1299  << ToDouble(integer_trail_->UpperBound(objective_cp_))
1300  << " ] approx_lb += "
1301  << ToDouble(approximate_new_lb - propagated_lb) << " gap: "
1302  << integer_trail_->UpperBound(objective_cp_) - propagated_lb;
1303  }
1304  } else {
1305  FillReducedCostReasonIn(simplex_.GetReducedCosts(), &integer_reason_);
1306  const double objective_cp_ub =
1307  ToDouble(integer_trail_->UpperBound(objective_cp_));
1308  ReducedCostStrengtheningDeductions(objective_cp_ub -
1309  relaxed_optimal_objective);
1310  if (!deductions_.empty()) {
1311  deductions_reason_ = integer_reason_;
1312  deductions_reason_.push_back(
1313  integer_trail_->UpperBoundAsLiteral(objective_cp_));
1314  }
1315 
1316  // Push new objective lb.
1317  if (approximate_new_lb > integer_trail_->LowerBound(objective_cp_)) {
1318  const IntegerLiteral deduction =
1319  IntegerLiteral::GreaterOrEqual(objective_cp_, approximate_new_lb);
1320  if (!integer_trail_->Enqueue(deduction, {}, integer_reason_)) {
1321  return false;
1322  }
1323  }
1324 
1325  // Push reduced cost strengthening bounds.
1326  if (!deductions_.empty()) {
1327  const int trail_index_with_same_reason = integer_trail_->Index();
1328  for (const IntegerLiteral deduction : deductions_) {
1329  if (!integer_trail_->Enqueue(deduction, {}, deductions_reason_,
1330  trail_index_with_same_reason)) {
1331  return false;
1332  }
1333  }
1334  }
1335  }
1336  }
1337 
1338  // Copy more info about the current solution.
1339  if (simplex_.GetProblemStatus() == glop::ProblemStatus::OPTIMAL) {
1340  CHECK(lp_solution_is_set_);
1341 
1342  lp_objective_ = simplex_.GetObjectiveValue();
1343  lp_solution_is_integer_ = true;
1344  const int num_vars = integer_variables_.size();
1345  for (int i = 0; i < num_vars; i++) {
1346  lp_reduced_cost_[i] = scaler_.UnscaleReducedCost(
1347  glop::ColIndex(i), simplex_.GetReducedCost(glop::ColIndex(i)));
1348  if (std::abs(lp_solution_[i] - std::round(lp_solution_[i])) >
1349  kCpEpsilon) {
1350  lp_solution_is_integer_ = false;
1351  }
1352  }
1353 
1354  if (compute_reduced_cost_averages_) {
1355  UpdateAverageReducedCosts();
1356  }
1357  }
1358 
1359  if (sat_parameters_.use_branching_in_lp() && objective_is_defined_ &&
1360  trail_->CurrentDecisionLevel() == 0 && !is_degenerate_ &&
1361  lp_solution_is_set_ && !lp_solution_is_integer_ &&
1362  sat_parameters_.linearization_level() >= 2 &&
1363  compute_reduced_cost_averages_ &&
1365  count_since_last_branching_++;
1366  if (count_since_last_branching_ < branching_frequency_) {
1367  return true;
1368  }
1369  count_since_last_branching_ = 0;
1370  bool branching_successful = false;
1371 
1372  // Strong branching on top max_num_branches variable.
1373  const int max_num_branches = 3;
1374  const int num_vars = integer_variables_.size();
1375  std::vector<std::pair<double, IntegerVariable>> branching_vars;
1376  for (int i = 0; i < num_vars; ++i) {
1377  const IntegerVariable var = integer_variables_[i];
1378  const IntegerVariable positive_var = PositiveVariable(var);
1379 
1380  // Skip non fractional variables.
1381  const double current_value = GetSolutionValue(positive_var);
1382  if (std::abs(current_value - std::round(current_value)) <= kCpEpsilon) {
1383  continue;
1384  }
1385 
1386  // Skip ignored variables.
1387  if (integer_trail_->IsCurrentlyIgnored(var)) continue;
1388 
1389  // We can use any metric to select a variable to branch on. Reduced cost
1390  // average is one of the most promissing metric. It captures the history
1391  // of the objective bound improvement in LP due to changes in the given
1392  // variable bounds.
1393  //
1394  // NOTE: We also experimented using PseudoCosts and most recent reduced
1395  // cost as metrics but it doesn't give better results on benchmarks.
1396  const double cost_i = rc_scores_[i];
1397  std::pair<double, IntegerVariable> branching_var =
1398  std::make_pair(-cost_i, positive_var);
1399  auto iterator = std::lower_bound(branching_vars.begin(),
1400  branching_vars.end(), branching_var);
1401 
1402  branching_vars.insert(iterator, branching_var);
1403  if (branching_vars.size() > max_num_branches) {
1404  branching_vars.resize(max_num_branches);
1405  }
1406  }
1407 
1408  for (const std::pair<double, IntegerVariable>& branching_var :
1409  branching_vars) {
1410  const IntegerVariable positive_var = branching_var.second;
1411  VLOG(2) << "Branching on: " << positive_var;
1412  if (BranchOnVar(positive_var)) {
1413  VLOG(2) << "Branching successful.";
1414  branching_successful = true;
1415  } else {
1416  break;
1417  }
1418  }
1419  if (!branching_successful) {
1420  branching_frequency_ *= 2;
1421  }
1422  }
1423  return true;
1424 }
1425 
1426 // Returns kMinIntegerValue in case of overflow.
1427 //
1428 // TODO(user): Because of PreventOverflow(), this should actually never happen.
1429 IntegerValue LinearProgrammingConstraint::GetImpliedLowerBound(
1430  const LinearConstraint& terms) const {
1431  IntegerValue lower_bound(0);
1432  const int size = terms.vars.size();
1433  for (int i = 0; i < size; ++i) {
1434  const IntegerVariable var = terms.vars[i];
1435  const IntegerValue coeff = terms.coeffs[i];
1436  CHECK_NE(coeff, 0);
1437  const IntegerValue bound = coeff > 0 ? integer_trail_->LowerBound(var)
1438  : integer_trail_->UpperBound(var);
1439  if (!AddProductTo(bound, coeff, &lower_bound)) return kMinIntegerValue;
1440  }
1441  return lower_bound;
1442 }
1443 
1444 bool LinearProgrammingConstraint::PossibleOverflow(
1445  const LinearConstraint& constraint) {
1446  IntegerValue lower_bound(0);
1447  const int size = constraint.vars.size();
1448  for (int i = 0; i < size; ++i) {
1449  const IntegerVariable var = constraint.vars[i];
1450  const IntegerValue coeff = constraint.coeffs[i];
1451  CHECK_NE(coeff, 0);
1452  const IntegerValue bound = coeff > 0 ? integer_trail_->LowerBound(var)
1453  : integer_trail_->UpperBound(var);
1454  if (!AddProductTo(bound, coeff, &lower_bound)) {
1455  return true;
1456  }
1457  }
1458  const int64 slack = CapAdd(lower_bound.value(), -constraint.ub.value());
1459  if (slack == kint64min || slack == kint64max) {
1460  return true;
1461  }
1462  return false;
1463 }
1464 
1465 namespace {
1466 
1467 absl::int128 FloorRatio128(absl::int128 x, IntegerValue positive_div) {
1468  absl::int128 div128(positive_div.value());
1469  absl::int128 result = x / div128;
1470  if (result * div128 > x) return result - 1;
1471  return result;
1472 }
1473 
1474 } // namespace
1475 
1476 void LinearProgrammingConstraint::PreventOverflow(LinearConstraint* constraint,
1477  int max_pow) {
1478  // Compute the min/max possible partial sum.
1479  //
1480  // Note that since we currently only use this cut locally, it is okay to
1481  // use the current lb/ub here to decide if we have an overflow or not. Below
1482  // however, we do have to use the level zero lower bound.
1483  double sum_min = std::min(0.0, ToDouble(-constraint->ub));
1484  double sum_max = std::max(0.0, ToDouble(-constraint->ub));
1485  const int size = constraint->vars.size();
1486  for (int i = 0; i < size; ++i) {
1487  const IntegerVariable var = constraint->vars[i];
1488  const double coeff = ToDouble(constraint->coeffs[i]);
1489  const double prod1 = coeff * ToDouble(integer_trail_->LowerBound(var));
1490  const double prod2 = coeff * ToDouble(integer_trail_->UpperBound(var));
1491  sum_min += std::min(0.0, std::min(prod1, prod2));
1492  sum_max += std::max(0.0, std::max(prod1, prod2));
1493  }
1494  const double max_value = std::max(sum_max, -sum_min);
1495 
1496  const IntegerValue divisor(std::ceil(std::ldexp(max_value, -max_pow)));
1497  if (divisor <= 1) return;
1498 
1499  // To be correct, we need to shift all variable so that they are positive.
1500  //
1501  // Important: One might be tempted to think that using the current variable
1502  // bounds is okay here since we only use this to derive cut/constraint that
1503  // only needs to be locally valid. However, in some corner cases (like when
1504  // one term become zero), we might loose the fact that we used one of the
1505  // variable bound to derive the new constraint, so we will miss it in the
1506  // explanation !!
1507  //
1508  // TODO(user): This code is tricky and similar to the one to generate cuts.
1509  // Test and may reduce the duplication? note however that here we use int128
1510  // to deal with potential overflow.
1511  int new_size = 0;
1512  absl::int128 adjust = 0;
1513  for (int i = 0; i < size; ++i) {
1514  const IntegerValue old_coeff = constraint->coeffs[i];
1515  const IntegerValue new_coeff = FloorRatio(old_coeff, divisor);
1516 
1517  // Compute the rhs adjustement.
1518  const absl::int128 remainder =
1519  absl::int128(old_coeff.value()) -
1520  absl::int128(new_coeff.value()) * absl::int128(divisor.value());
1521  adjust +=
1522  remainder *
1523  absl::int128(
1524  integer_trail_->LevelZeroLowerBound(constraint->vars[i]).value());
1525 
1526  if (new_coeff == 0) continue;
1527  constraint->vars[new_size] = constraint->vars[i];
1528  constraint->coeffs[new_size] = new_coeff;
1529  ++new_size;
1530  }
1531  constraint->vars.resize(new_size);
1532  constraint->coeffs.resize(new_size);
1533 
1534  constraint->ub = IntegerValue(static_cast<int64>(
1535  FloorRatio128(absl::int128(constraint->ub.value()) - adjust, divisor)));
1536 }
1537 
1538 // TODO(user): combine this with RelaxLinearReason() to avoid the extra
1539 // magnitude vector and the weird precondition of RelaxLinearReason().
1540 void LinearProgrammingConstraint::SetImpliedLowerBoundReason(
1541  const LinearConstraint& terms, IntegerValue slack) {
1542  integer_reason_.clear();
1543  std::vector<IntegerValue> magnitudes;
1544  const int size = terms.vars.size();
1545  for (int i = 0; i < size; ++i) {
1546  const IntegerVariable var = terms.vars[i];
1547  const IntegerValue coeff = terms.coeffs[i];
1548  CHECK_NE(coeff, 0);
1549  if (coeff > 0) {
1550  magnitudes.push_back(coeff);
1551  integer_reason_.push_back(integer_trail_->LowerBoundAsLiteral(var));
1552  } else {
1553  magnitudes.push_back(-coeff);
1554  integer_reason_.push_back(integer_trail_->UpperBoundAsLiteral(var));
1555  }
1556  }
1557  CHECK_GE(slack, 0);
1558  if (slack > 0) {
1559  integer_trail_->RelaxLinearReason(slack, magnitudes, &integer_reason_);
1560  }
1561  integer_trail_->RemoveLevelZeroBounds(&integer_reason_);
1562 }
1563 
1564 // TODO(user): Provide a sparse interface.
1565 std::vector<std::pair<RowIndex, IntegerValue>>
1566 LinearProgrammingConstraint::ScaleLpMultiplier(
1567  bool take_objective_into_account,
1568  const glop::DenseColumn& dense_lp_multipliers, Fractional* scaling,
1569  int max_pow) const {
1570  double max_sum = 0.0;
1571  std::vector<std::pair<RowIndex, Fractional>> cp_multipliers;
1572  for (RowIndex row(0); row < dense_lp_multipliers.size(); ++row) {
1573  const Fractional lp_multi = dense_lp_multipliers[row];
1574 
1575  // We ignore small values since these are likely errors and will not
1576  // contribute much to the new lp constraint anyway.
1577  if (std::abs(lp_multi) < 1e-12) continue;
1578 
1579  // Remove trivial bad cases.
1580  //
1581  // TODO(user): It might be better (when possible) to use the OPTIMAL row
1582  // status since in most situation we do want the constraint we add to be
1583  // tight under the current LP solution. Only for infeasible problem we might
1584  // not have access to the status.
1585  if (lp_multi > 0.0 && integer_lp_[row].ub >= kMaxIntegerValue) {
1586  continue;
1587  }
1588  if (lp_multi < 0.0 && integer_lp_[row].lb <= kMinIntegerValue) {
1589  continue;
1590  }
1591 
1592  const Fractional cp_multi = scaler_.UnscaleDualValue(row, lp_multi);
1593  cp_multipliers.push_back({row, cp_multi});
1594  max_sum += ToDouble(infinity_norms_[row]) * std::abs(cp_multi);
1595  }
1596 
1597  // This behave exactly like if we had another "objective" constraint with
1598  // an lp_multi of 1.0 and a cp_multi of 1.0.
1599  if (take_objective_into_account) {
1600  max_sum += ToDouble(objective_infinity_norm_);
1601  }
1602 
1603  std::vector<std::pair<RowIndex, IntegerValue>> integer_multipliers;
1604  if (max_sum == 0.0) {
1605  // Empty linear combinaison.
1606  *scaling = 1;
1607  return integer_multipliers;
1608  }
1609 
1610  // We want max_sum * scaling to be <= 2 ^ max_pow and fit on an int64.
1611  // We use a power of 2 as this seems to work better.
1612  const double threshold = std::ldexp(1, max_pow) / max_sum;
1613  *scaling = 1.0;
1614  while (2 * *scaling <= threshold) *scaling *= 2;
1615 
1616  // Scale the multipliers by *scaling.
1617  //
1618  // TODO(user): Maybe use int128 to avoid overflow?
1619  for (const auto entry : cp_multipliers) {
1620  const IntegerValue coeff(std::round(entry.second * (*scaling)));
1621  if (coeff != 0) integer_multipliers.push_back({entry.first, coeff});
1622  }
1623  return integer_multipliers;
1624 }
1625 
1626 bool LinearProgrammingConstraint::ComputeNewLinearConstraint(
1627  const std::vector<std::pair<RowIndex, IntegerValue>>& integer_multipliers,
1629  IntegerValue* upper_bound) const {
1630  // Initialize the new constraint.
1631  *upper_bound = 0;
1632  dense_terms->assign(integer_variables_.size(), IntegerValue(0));
1633 
1634  // Compute the new constraint by taking the linear combination given by
1635  // integer_multipliers of the integer constraints in integer_lp_.
1636  const ColIndex num_cols(integer_variables_.size());
1637  for (const std::pair<RowIndex, IntegerValue> term : integer_multipliers) {
1638  const RowIndex row = term.first;
1639  const IntegerValue multiplier = term.second;
1640  CHECK_LT(row, integer_lp_.size());
1641 
1642  // Update the constraint.
1643  if (!AddLinearExpressionMultiple(multiplier, integer_lp_[row].terms,
1644  dense_terms)) {
1645  return false;
1646  }
1647 
1648  // Update the upper bound.
1649  const IntegerValue bound =
1650  multiplier > 0 ? integer_lp_[row].ub : integer_lp_[row].lb;
1651  if (!AddProductTo(multiplier, bound, upper_bound)) return false;
1652  }
1653 
1654  return true;
1655 }
1656 
1657 // TODO(user): no need to update the multipliers.
1658 void LinearProgrammingConstraint::AdjustNewLinearConstraint(
1659  std::vector<std::pair<glop::RowIndex, IntegerValue>>* integer_multipliers,
1661  IntegerValue* upper_bound) const {
1662  const IntegerValue kMaxWantedCoeff(1e18);
1663  for (std::pair<RowIndex, IntegerValue>& term : *integer_multipliers) {
1664  const RowIndex row = term.first;
1665  const IntegerValue multiplier = term.second;
1666  if (multiplier == 0) continue;
1667 
1668  // We will only allow change of the form "multiplier += to_add" with to_add
1669  // in [-negative_limit, positive_limit].
1670  IntegerValue negative_limit = kMaxWantedCoeff;
1671  IntegerValue positive_limit = kMaxWantedCoeff;
1672 
1673  // Make sure we never change the sign of the multiplier, except if the
1674  // row is an equality in which case we don't care.
1675  if (integer_lp_[row].ub != integer_lp_[row].lb) {
1676  if (multiplier > 0) {
1677  negative_limit = std::min(negative_limit, multiplier);
1678  } else {
1679  positive_limit = std::min(positive_limit, -multiplier);
1680  }
1681  }
1682 
1683  // Make sure upper_bound + to_add * row_bound never overflow.
1684  const IntegerValue row_bound =
1685  multiplier > 0 ? integer_lp_[row].ub : integer_lp_[row].lb;
1686  if (row_bound != 0) {
1687  const IntegerValue limit1 = FloorRatio(
1688  std::max(IntegerValue(0), kMaxWantedCoeff - IntTypeAbs(*upper_bound)),
1689  IntTypeAbs(row_bound));
1690  const IntegerValue limit2 =
1691  FloorRatio(kMaxWantedCoeff, IntTypeAbs(row_bound));
1692  if (*upper_bound > 0 == row_bound > 0) { // Same sign.
1693  positive_limit = std::min(positive_limit, limit1);
1694  negative_limit = std::min(negative_limit, limit2);
1695  } else {
1696  negative_limit = std::min(negative_limit, limit1);
1697  positive_limit = std::min(positive_limit, limit2);
1698  }
1699  }
1700 
1701  // If we add the row to the dense_terms, diff will indicate by how much
1702  // |upper_bound - ImpliedLB(dense_terms)| will change. That correspond to
1703  // increasing the multiplier by 1.
1704  //
1705  // At this stage, we are not sure computing sum coeff * bound will not
1706  // overflow, so we use floating point numbers. It is fine to do so since
1707  // this is not directly involved in the actual exact constraint generation:
1708  // these variables are just used in an heuristic.
1709  double positive_diff = ToDouble(row_bound);
1710  double negative_diff = ToDouble(row_bound);
1711 
1712  // TODO(user): we could relax a bit some of the condition and allow a sign
1713  // change. It is just trickier to compute the diff when we allow such
1714  // changes.
1715  for (const auto entry : integer_lp_[row].terms) {
1716  const ColIndex col = entry.first;
1717  const IntegerValue coeff = entry.second;
1718  const IntegerValue abs_coef = IntTypeAbs(coeff);
1719  CHECK_NE(coeff, 0);
1720 
1721  const IntegerVariable var = integer_variables_[col.value()];
1722  const IntegerValue lb = integer_trail_->LowerBound(var);
1723  const IntegerValue ub = integer_trail_->UpperBound(var);
1724 
1725  // Moving a variable away from zero seems to improve the bound even
1726  // if it reduces the number of non-zero. Note that this is because of
1727  // this that positive_diff and negative_diff are not the same.
1728  const IntegerValue current = (*dense_terms)[col];
1729  if (current == 0) {
1730  const IntegerValue overflow_limit(
1731  FloorRatio(kMaxWantedCoeff, abs_coef));
1732  positive_limit = std::min(positive_limit, overflow_limit);
1733  negative_limit = std::min(negative_limit, overflow_limit);
1734  if (coeff > 0) {
1735  positive_diff -= ToDouble(coeff) * ToDouble(lb);
1736  negative_diff -= ToDouble(coeff) * ToDouble(ub);
1737  } else {
1738  positive_diff -= ToDouble(coeff) * ToDouble(ub);
1739  negative_diff -= ToDouble(coeff) * ToDouble(lb);
1740  }
1741  continue;
1742  }
1743 
1744  // We don't want to change the sign of current (except if the variable is
1745  // fixed) or to have an overflow.
1746  //
1747  // Corner case:
1748  // - IntTypeAbs(current) can be larger than kMaxWantedCoeff!
1749  // - The code assumes that 2 * kMaxWantedCoeff do not overflow.
1750  const IntegerValue current_magnitude = IntTypeAbs(current);
1751  const IntegerValue other_direction_limit = FloorRatio(
1752  lb == ub
1753  ? kMaxWantedCoeff + std::min(current_magnitude,
1754  kMaxIntegerValue - kMaxWantedCoeff)
1755  : current_magnitude,
1756  abs_coef);
1757  const IntegerValue same_direction_limit(FloorRatio(
1758  std::max(IntegerValue(0), kMaxWantedCoeff - current_magnitude),
1759  abs_coef));
1760  if (current > 0 == coeff > 0) { // Same sign.
1761  negative_limit = std::min(negative_limit, other_direction_limit);
1762  positive_limit = std::min(positive_limit, same_direction_limit);
1763  } else {
1764  negative_limit = std::min(negative_limit, same_direction_limit);
1765  positive_limit = std::min(positive_limit, other_direction_limit);
1766  }
1767 
1768  // This is how diff change.
1769  const IntegerValue implied = current > 0 ? lb : ub;
1770  if (implied != 0) {
1771  positive_diff -= ToDouble(coeff) * ToDouble(implied);
1772  negative_diff -= ToDouble(coeff) * ToDouble(implied);
1773  }
1774  }
1775 
1776  // Only add a multiple of this row if it tighten the final constraint.
1777  // The positive_diff/negative_diff are supposed to be integer modulo the
1778  // double precision, so we only add a multiple if they seems far away from
1779  // zero.
1780  IntegerValue to_add(0);
1781  if (positive_diff <= -1.0 && positive_limit > 0) {
1782  to_add = positive_limit;
1783  }
1784  if (negative_diff >= 1.0 && negative_limit > 0) {
1785  // Pick this if it is better than the positive sign.
1786  if (to_add == 0 ||
1787  std::abs(ToDouble(negative_limit) * negative_diff) >
1788  std::abs(ToDouble(positive_limit) * positive_diff)) {
1789  to_add = -negative_limit;
1790  }
1791  }
1792  if (to_add != 0) {
1793  term.second += to_add;
1794  *upper_bound += to_add * row_bound;
1795  for (const auto entry : integer_lp_[row].terms) {
1796  const ColIndex col = entry.first;
1797  const IntegerValue coeff = entry.second;
1798  (*dense_terms)[col] += to_add * coeff;
1799  }
1800  }
1801  }
1802 }
1803 
1804 // The "exact" computation go as follow:
1805 //
1806 // Given any INTEGER linear combination of the LP constraints, we can create a
1807 // new integer constraint that is valid (its computation must not overflow
1808 // though). Lets call this "linear_combination <= ub". We can then always add to
1809 // it the inequality "objective_terms <= objective_var", so we get:
1810 // ImpliedLB(objective_terms + linear_combination) - ub <= objective_var.
1811 // where ImpliedLB() is computed from the variable current bounds.
1812 //
1813 // Now, if we use for the linear combination and approximation of the optimal
1814 // negated dual LP values (by scaling them and rounding them to integer), we
1815 // will get an EXACT objective lower bound that is more or less the same as the
1816 // inexact bound given by the LP relaxation. This allows to derive exact reasons
1817 // for any propagation done by this constraint.
1818 bool LinearProgrammingConstraint::ExactLpReasonning() {
1819  // Clear old reason and deductions.
1820  integer_reason_.clear();
1821  deductions_.clear();
1822  deductions_reason_.clear();
1823 
1824  // The row multipliers will be the negation of the LP duals.
1825  //
1826  // TODO(user): Provide and use a sparse API in Glop to get the duals.
1827  const RowIndex num_rows = simplex_.GetProblemNumRows();
1828  glop::DenseColumn lp_multipliers(num_rows);
1829  for (RowIndex row(0); row < num_rows; ++row) {
1830  lp_multipliers[row] = -simplex_.GetDualValue(row);
1831  }
1832 
1833  Fractional scaling;
1834  std::vector<std::pair<RowIndex, IntegerValue>> integer_multipliers =
1835  ScaleLpMultiplier(/*take_objective_into_account=*/true, lp_multipliers,
1836  &scaling);
1837 
1838  IntegerValue rc_ub;
1839  if (!ComputeNewLinearConstraint(integer_multipliers, &tmp_dense_vector_,
1840  &rc_ub)) {
1841  VLOG(1) << "Issue while computing the exact LP reason. Aborting.";
1842  return true;
1843  }
1844 
1845  // The "objective constraint" behave like if the unscaled cp multiplier was
1846  // 1.0, so we will multiply it by this number and add it to reduced_costs.
1847  const IntegerValue obj_scale(std::round(scaling));
1848  if (obj_scale == 0) {
1849  VLOG(1) << "Overflow during exact LP reasoning. scaling=" << scaling;
1850  return true;
1851  }
1852  CHECK(AddLinearExpressionMultiple(obj_scale, integer_objective_,
1853  &tmp_dense_vector_));
1854 
1855  AdjustNewLinearConstraint(&integer_multipliers, &tmp_dense_vector_, &rc_ub);
1856 
1857  // Create the IntegerSumLE that will allow to propagate the objective and more
1858  // generally do the reduced cost fixing.
1859  LinearConstraint new_constraint;
1860  ConvertToLinearConstraint(tmp_dense_vector_, rc_ub, &new_constraint);
1861  new_constraint.vars.push_back(objective_cp_);
1862  new_constraint.coeffs.push_back(-obj_scale);
1863  DivideByGCD(&new_constraint);
1864  PreventOverflow(&new_constraint);
1865  DCHECK(!PossibleOverflow(new_constraint));
1866  DCHECK(constraint_manager_.DebugCheckConstraint(new_constraint));
1867 
1868  IntegerSumLE* cp_constraint =
1869  new IntegerSumLE({}, new_constraint.vars, new_constraint.coeffs,
1870  new_constraint.ub, model_);
1871  if (trail_->CurrentDecisionLevel() == 0) {
1872  // Since we will never ask the reason for a constraint at level 0, we just
1873  // keep the last one.
1874  optimal_constraints_.clear();
1875  }
1876  optimal_constraints_.emplace_back(cp_constraint);
1877  rev_optimal_constraints_size_ = optimal_constraints_.size();
1878  return cp_constraint->Propagate();
1879 }
1880 
1881 bool LinearProgrammingConstraint::FillExactDualRayReason() {
1882  Fractional scaling;
1883  std::vector<std::pair<RowIndex, IntegerValue>> integer_multipliers =
1884  ScaleLpMultiplier(/*take_objective_into_account=*/false,
1885  simplex_.GetDualRay(), &scaling);
1886 
1887  IntegerValue new_constraint_ub;
1888  if (!ComputeNewLinearConstraint(integer_multipliers, &tmp_dense_vector_,
1889  &new_constraint_ub)) {
1890  VLOG(1) << "Isse while computing the exact dual ray reason. Aborting.";
1891  return false;
1892  }
1893 
1894  AdjustNewLinearConstraint(&integer_multipliers, &tmp_dense_vector_,
1895  &new_constraint_ub);
1896 
1897  LinearConstraint new_constraint;
1898  ConvertToLinearConstraint(tmp_dense_vector_, new_constraint_ub,
1899  &new_constraint);
1900  DivideByGCD(&new_constraint);
1901  PreventOverflow(&new_constraint);
1902  DCHECK(!PossibleOverflow(new_constraint));
1903  DCHECK(constraint_manager_.DebugCheckConstraint(new_constraint));
1904 
1905  const IntegerValue implied_lb = GetImpliedLowerBound(new_constraint);
1906  if (implied_lb <= new_constraint.ub) {
1907  VLOG(1) << "LP exact dual ray not infeasible,"
1908  << " implied_lb: " << implied_lb.value() / scaling
1909  << " ub: " << new_constraint.ub.value() / scaling;
1910  return false;
1911  }
1912  const IntegerValue slack = (implied_lb - new_constraint.ub) - 1;
1913  SetImpliedLowerBoundReason(new_constraint, slack);
1914  return true;
1915 }
1916 
1917 int64 LinearProgrammingConstraint::CalculateDegeneracy() {
1918  const glop::ColIndex num_vars = simplex_.GetProblemNumCols();
1919  int num_non_basic_with_zero_rc = 0;
1920  for (glop::ColIndex i(0); i < num_vars; ++i) {
1921  const double rc = simplex_.GetReducedCost(i);
1922  if (rc != 0.0) continue;
1923  if (simplex_.GetVariableStatus(i) == glop::VariableStatus::BASIC) {
1924  continue;
1925  }
1926  num_non_basic_with_zero_rc++;
1927  }
1928  const int64 num_cols = simplex_.GetProblemNumCols().value();
1929  is_degenerate_ = num_non_basic_with_zero_rc >= 0.3 * num_cols;
1930  return num_non_basic_with_zero_rc;
1931 }
1932 
1933 void LinearProgrammingConstraint::ReducedCostStrengtheningDeductions(
1934  double cp_objective_delta) {
1935  deductions_.clear();
1936 
1937  // TRICKY: while simplex_.GetObjectiveValue() use the objective scaling factor
1938  // stored in the lp_data_, all the other functions like GetReducedCost() or
1939  // GetVariableValue() do not.
1940  const double lp_objective_delta =
1941  cp_objective_delta / lp_data_.objective_scaling_factor();
1942  const int num_vars = integer_variables_.size();
1943  for (int i = 0; i < num_vars; i++) {
1944  const IntegerVariable cp_var = integer_variables_[i];
1945  const glop::ColIndex lp_var = glop::ColIndex(i);
1946  const double rc = simplex_.GetReducedCost(lp_var);
1947  const double value = simplex_.GetVariableValue(lp_var);
1948 
1949  if (rc == 0.0) continue;
1950  const double lp_other_bound = value + lp_objective_delta / rc;
1951  const double cp_other_bound =
1952  scaler_.UnscaleVariableValue(lp_var, lp_other_bound);
1953 
1954  if (rc > kLpEpsilon) {
1955  const double ub = ToDouble(integer_trail_->UpperBound(cp_var));
1956  const double new_ub = std::floor(cp_other_bound + kCpEpsilon);
1957  if (new_ub < ub) {
1958  // TODO(user): Because rc > kLpEpsilon, the lower_bound of cp_var
1959  // will be part of the reason returned by FillReducedCostsReason(), but
1960  // we actually do not need it here. Same below.
1961  const IntegerValue new_ub_int(static_cast<IntegerValue>(new_ub));
1962  deductions_.push_back(IntegerLiteral::LowerOrEqual(cp_var, new_ub_int));
1963  }
1964  } else if (rc < -kLpEpsilon) {
1965  const double lb = ToDouble(integer_trail_->LowerBound(cp_var));
1966  const double new_lb = std::ceil(cp_other_bound - kCpEpsilon);
1967  if (new_lb > lb) {
1968  const IntegerValue new_lb_int(static_cast<IntegerValue>(new_lb));
1969  deductions_.push_back(
1970  IntegerLiteral::GreaterOrEqual(cp_var, new_lb_int));
1971  }
1972  }
1973  }
1974 }
1975 
1976 namespace {
1977 
1978 // Add a cut of the form Sum_{outgoing arcs from S} lp >= rhs_lower_bound.
1979 //
1980 // Note that we used to also add the same cut for the incoming arcs, but because
1981 // of flow conservation on these problems, the outgoing flow is always the same
1982 // as the incoming flow, so adding this extra cut doesn't seem relevant.
1983 void AddOutgoingCut(int num_nodes, int subset_size,
1984  const std::vector<bool>& in_subset,
1985  const std::vector<int>& tails,
1986  const std::vector<int>& heads,
1987  const std::vector<Literal>& literals,
1988  const std::vector<double>& literal_lp_values,
1989  int64 rhs_lower_bound,
1990  const gtl::ITIVector<IntegerVariable, double>& lp_values,
1991  LinearConstraintManager* manager, Model* model) {
1992  // A node is said to be optional if it can be excluded from the subcircuit,
1993  // in which case there is a self-loop on that node.
1994  // If there are optional nodes, use extended formula:
1995  // sum(cut) >= 1 - optional_loop_in - optional_loop_out
1996  // where optional_loop_in's node is in subset, optional_loop_out's is out.
1997  // TODO(user): Favor optional loops fixed to zero at root.
1998  int num_optional_nodes_in = 0;
1999  int num_optional_nodes_out = 0;
2000  int optional_loop_in = -1;
2001  int optional_loop_out = -1;
2002  for (int i = 0; i < tails.size(); ++i) {
2003  if (tails[i] != heads[i]) continue;
2004  if (in_subset[tails[i]]) {
2005  num_optional_nodes_in++;
2006  if (optional_loop_in == -1 ||
2007  literal_lp_values[i] < literal_lp_values[optional_loop_in]) {
2008  optional_loop_in = i;
2009  }
2010  } else {
2011  num_optional_nodes_out++;
2012  if (optional_loop_out == -1 ||
2013  literal_lp_values[i] < literal_lp_values[optional_loop_out]) {
2014  optional_loop_out = i;
2015  }
2016  }
2017  }
2018 
2019  // TODO(user): The lower bound for CVRP is computed assuming all nodes must be
2020  // served, if it is > 1 we lower it to one in the presence of optional nodes.
2021  if (num_optional_nodes_in + num_optional_nodes_out > 0) {
2022  CHECK_GE(rhs_lower_bound, 1);
2023  rhs_lower_bound = 1;
2024  }
2025 
2026  LinearConstraintBuilder outgoing(model, IntegerValue(rhs_lower_bound),
2028  double sum_outgoing = 0.0;
2029 
2030  // Add outgoing arcs, compute outgoing flow.
2031  for (int i = 0; i < tails.size(); ++i) {
2032  if (in_subset[tails[i]] && !in_subset[heads[i]]) {
2033  sum_outgoing += literal_lp_values[i];
2034  CHECK(outgoing.AddLiteralTerm(literals[i], IntegerValue(1)));
2035  }
2036  }
2037 
2038  // Support optional nodes if any.
2039  if (num_optional_nodes_in + num_optional_nodes_out > 0) {
2040  // When all optionals of one side are excluded in lp solution, no cut.
2041  if (num_optional_nodes_in == subset_size &&
2042  (optional_loop_in == -1 ||
2043  literal_lp_values[optional_loop_in] > 1.0 - 1e-6)) {
2044  return;
2045  }
2046  if (num_optional_nodes_out == num_nodes - subset_size &&
2047  (optional_loop_out == -1 ||
2048  literal_lp_values[optional_loop_out] > 1.0 - 1e-6)) {
2049  return;
2050  }
2051 
2052  // There is no mandatory node in subset, add optional_loop_in.
2053  if (num_optional_nodes_in == subset_size) {
2054  CHECK(
2055  outgoing.AddLiteralTerm(literals[optional_loop_in], IntegerValue(1)));
2056  sum_outgoing += literal_lp_values[optional_loop_in];
2057  }
2058 
2059  // There is no mandatory node out of subset, add optional_loop_out.
2060  if (num_optional_nodes_out == num_nodes - subset_size) {
2061  CHECK(outgoing.AddLiteralTerm(literals[optional_loop_out],
2062  IntegerValue(1)));
2063  sum_outgoing += literal_lp_values[optional_loop_out];
2064  }
2065  }
2066 
2067  if (sum_outgoing < rhs_lower_bound - 1e-6) {
2068  manager->AddCut(outgoing.Build(), "Circuit", lp_values);
2069  }
2070 }
2071 
2072 } // namespace
2073 
2074 // We roughly follow the algorithm described in section 6 of "The Traveling
2075 // Salesman Problem, A computational Study", David L. Applegate, Robert E.
2076 // Bixby, Vasek Chvatal, William J. Cook.
2077 //
2078 // Note that this is mainly a "symmetric" case algo, but it does still work for
2079 // the asymmetric case.
2081  int num_nodes, const std::vector<int>& tails, const std::vector<int>& heads,
2082  const std::vector<Literal>& literals,
2083  const gtl::ITIVector<IntegerVariable, double>& lp_values,
2084  absl::Span<const int64> demands, int64 capacity,
2085  LinearConstraintManager* manager, Model* model) {
2086  if (num_nodes <= 2) return;
2087 
2088  // We will collect only the arcs with a positive lp_values to speed up some
2089  // computation below.
2090  struct Arc {
2091  int tail;
2092  int head;
2093  double lp_value;
2094  };
2095  std::vector<Arc> relevant_arcs;
2096 
2097  // Sort the arcs by non-increasing lp_values.
2098  std::vector<double> literal_lp_values(literals.size());
2099  std::vector<std::pair<double, int>> arc_by_decreasing_lp_values;
2100  auto* encoder = model->GetOrCreate<IntegerEncoder>();
2101  for (int i = 0; i < literals.size(); ++i) {
2102  double lp_value;
2103  const IntegerVariable direct_view = encoder->GetLiteralView(literals[i]);
2104  if (direct_view != kNoIntegerVariable) {
2105  lp_value = lp_values[direct_view];
2106  } else {
2107  lp_value =
2108  1.0 - lp_values[encoder->GetLiteralView(literals[i].Negated())];
2109  }
2110  literal_lp_values[i] = lp_value;
2111 
2112  if (lp_value < 1e-6) continue;
2113  relevant_arcs.push_back({tails[i], heads[i], lp_value});
2114  arc_by_decreasing_lp_values.push_back({lp_value, i});
2115  }
2116  std::sort(arc_by_decreasing_lp_values.begin(),
2117  arc_by_decreasing_lp_values.end(),
2118  std::greater<std::pair<double, int>>());
2119 
2120  // We will do a union-find by adding one by one the arc of the lp solution
2121  // in the order above. Every intermediate set during this construction will
2122  // be a candidate for a cut.
2123  //
2124  // In parallel to the union-find, to efficiently reconstruct these sets (at
2125  // most num_nodes), we construct a "decomposition forest" of the different
2126  // connected components. Note that we don't exploit any asymmetric nature of
2127  // the graph here. This is exactly the algo 6.3 in the book above.
2128  int num_components = num_nodes;
2129  std::vector<int> parent(num_nodes);
2130  std::vector<int> root(num_nodes);
2131  for (int i = 0; i < num_nodes; ++i) {
2132  parent[i] = i;
2133  root[i] = i;
2134  }
2135  auto get_root_and_compress_path = [&root](int node) {
2136  int r = node;
2137  while (root[r] != r) r = root[r];
2138  while (root[node] != r) {
2139  const int next = root[node];
2140  root[node] = r;
2141  node = next;
2142  }
2143  return r;
2144  };
2145  for (const auto pair : arc_by_decreasing_lp_values) {
2146  if (num_components == 2) break;
2147  const int tail = get_root_and_compress_path(tails[pair.second]);
2148  const int head = get_root_and_compress_path(heads[pair.second]);
2149  if (tail != head) {
2150  // Update the decomposition forest, note that the number of nodes is
2151  // growing.
2152  const int new_node = parent.size();
2153  parent.push_back(new_node);
2154  parent[head] = new_node;
2155  parent[tail] = new_node;
2156  --num_components;
2157 
2158  // It is important that the union-find representative is the same node.
2159  root.push_back(new_node);
2160  root[head] = new_node;
2161  root[tail] = new_node;
2162  }
2163  }
2164 
2165  // For each node in the decomposition forest, try to add a cut for the set
2166  // formed by the nodes and its children. To do that efficiently, we first
2167  // order the nodes so that for each node in a tree, the set of children forms
2168  // a consecutive span in the pre_order vector. This vector just lists the
2169  // nodes in the "pre-order" graph traversal order. The Spans will point inside
2170  // the pre_order vector, it is why we initialize it once and for all.
2171  int new_size = 0;
2172  std::vector<int> pre_order(num_nodes);
2173  std::vector<absl::Span<const int>> subsets;
2174  {
2175  std::vector<absl::InlinedVector<int, 2>> graph(parent.size());
2176  for (int i = 0; i < parent.size(); ++i) {
2177  if (parent[i] != i) graph[parent[i]].push_back(i);
2178  }
2179  std::vector<int> queue;
2180  std::vector<bool> seen(graph.size(), false);
2181  std::vector<int> start_index(parent.size());
2182  for (int i = num_nodes; i < parent.size(); ++i) {
2183  // Note that because of the way we constructed 'parent', the graph is a
2184  // binary tree. This is not required for the correctness of the algorithm
2185  // here though.
2186  CHECK(graph[i].empty() || graph[i].size() == 2);
2187  if (parent[i] != i) continue;
2188 
2189  // Explore the subtree rooted at node i.
2190  CHECK(!seen[i]);
2191  queue.push_back(i);
2192  while (!queue.empty()) {
2193  const int node = queue.back();
2194  if (seen[node]) {
2195  queue.pop_back();
2196  // All the children of node are in the span [start, end) of the
2197  // pre_order vector.
2198  const int start = start_index[node];
2199  if (new_size - start > 1) {
2200  subsets.emplace_back(&pre_order[start], new_size - start);
2201  }
2202  continue;
2203  }
2204  seen[node] = true;
2205  start_index[node] = new_size;
2206  if (node < num_nodes) pre_order[new_size++] = node;
2207  for (const int child : graph[node]) {
2208  if (!seen[child]) queue.push_back(child);
2209  }
2210  }
2211  }
2212  }
2213 
2214  // Compute the total demands in order to know the minimum incoming/outgoing
2215  // flow.
2216  int64 total_demands = 0;
2217  if (!demands.empty()) {
2218  for (const int64 demand : demands) total_demands += demand;
2219  }
2220 
2221  // Process each subsets and add any violated cut.
2222  CHECK_EQ(pre_order.size(), num_nodes);
2223  std::vector<bool> in_subset(num_nodes, false);
2224  for (const absl::Span<const int> subset : subsets) {
2225  CHECK_GT(subset.size(), 1);
2226  CHECK_LT(subset.size(), num_nodes);
2227 
2228  // These fields will be left untouched if demands.empty().
2229  bool contain_depot = false;
2230  int64 subset_demand = 0;
2231 
2232  // Initialize "in_subset" and the subset demands.
2233  for (const int n : subset) {
2234  in_subset[n] = true;
2235  if (!demands.empty()) {
2236  if (n == 0) contain_depot = true;
2237  subset_demand += demands[n];
2238  }
2239  }
2240 
2241  // Compute a lower bound on the outgoing flow.
2242  //
2243  // TODO(user): This lower bound assume all nodes in subset must be served,
2244  // which is not the case. For TSP we do the correct thing in
2245  // AddOutgoingCut() but not for CVRP... Fix!!
2246  //
2247  // TODO(user): It could be very interesting to see if this "min outgoing
2248  // flow" cannot be automatically infered from the constraint in the
2249  // precedence graph. This might work if we assume that any kind of path
2250  // cumul constraint is encoded with constraints:
2251  // [edge => value_head >= value_tail + edge_weight].
2252  // We could take the minimum incoming edge weight per node in the set, and
2253  // use the cumul variable domain to infer some capacity.
2254  int64 min_outgoing_flow = 1;
2255  if (!demands.empty()) {
2256  min_outgoing_flow =
2257  contain_depot
2258  ? (total_demands - subset_demand + capacity - 1) / capacity
2259  : (subset_demand + capacity - 1) / capacity;
2260  }
2261 
2262  // We still need to serve nodes with a demand of zero, and in the corner
2263  // case where all node in subset have a zero demand, the formula above
2264  // result in a min_outgoing_flow of zero.
2265  min_outgoing_flow = std::max(min_outgoing_flow, int64{1});
2266 
2267  // Compute the current outgoing flow out of the subset.
2268  //
2269  // This can take a significant portion of the running time, it is why it is
2270  // faster to do it only on arcs with non-zero lp values which should be in
2271  // linear number rather than the total number of arc which can be quadratic.
2272  //
2273  // TODO(user): For the symmetric case there is an even faster algo. See if
2274  // it can be generalized to the asymmetric one if become needed.
2275  // Reference is algo 6.4 of the "The Traveling Salesman Problem" book
2276  // mentionned above.
2277  double outgoing_flow = 0.0;
2278  for (const auto arc : relevant_arcs) {
2279  if (in_subset[arc.tail] && !in_subset[arc.head]) {
2280  outgoing_flow += arc.lp_value;
2281  }
2282  }
2283 
2284  // Add a cut if the current outgoing flow is not enough.
2285  if (outgoing_flow < min_outgoing_flow - 1e-6) {
2286  AddOutgoingCut(num_nodes, subset.size(), in_subset, tails, heads,
2287  literals, literal_lp_values,
2288  /*rhs_lower_bound=*/min_outgoing_flow, lp_values, manager,
2289  model);
2290  }
2291 
2292  // Sparse clean up.
2293  for (const int n : subset) in_subset[n] = false;
2294  }
2295 }
2296 
2297 namespace {
2298 
2299 // Returns for each literal its integer view, or the view of its negation.
2300 std::vector<IntegerVariable> GetAssociatedVariables(
2301  const std::vector<Literal>& literals, Model* model) {
2302  auto* encoder = model->GetOrCreate<IntegerEncoder>();
2303  std::vector<IntegerVariable> result;
2304  for (const Literal l : literals) {
2305  const IntegerVariable direct_view = encoder->GetLiteralView(l);
2306  if (direct_view != kNoIntegerVariable) {
2307  result.push_back(direct_view);
2308  } else {
2309  result.push_back(encoder->GetLiteralView(l.Negated()));
2310  DCHECK_NE(result.back(), kNoIntegerVariable);
2311  }
2312  }
2313  return result;
2314 }
2315 
2316 } // namespace
2317 
2318 // We use a basic algorithm to detect components that are not connected to the
2319 // rest of the graph in the LP solution, and add cuts to force some arcs to
2320 // enter and leave this component from outside.
2322  int num_nodes, const std::vector<int>& tails, const std::vector<int>& heads,
2323  const std::vector<Literal>& literals, Model* model) {
2324  CutGenerator result;
2325  result.vars = GetAssociatedVariables(literals, model);
2326  result.generate_cuts =
2327  [num_nodes, tails, heads, literals, model](
2328  const gtl::ITIVector<IntegerVariable, double>& lp_values,
2329  LinearConstraintManager* manager) {
2331  num_nodes, tails, heads, literals, lp_values,
2332  /*demands=*/{}, /*capacity=*/0, manager, model);
2333  };
2334  return result;
2335 }
2336 
2338  const std::vector<int>& tails,
2339  const std::vector<int>& heads,
2340  const std::vector<Literal>& literals,
2341  const std::vector<int64>& demands,
2342  int64 capacity, Model* model) {
2343  CutGenerator result;
2344  result.vars = GetAssociatedVariables(literals, model);
2345  result.generate_cuts =
2346  [num_nodes, tails, heads, demands, capacity, literals, model](
2347  const gtl::ITIVector<IntegerVariable, double>& lp_values,
2348  LinearConstraintManager* manager) {
2349  SeparateSubtourInequalities(num_nodes, tails, heads, literals,
2350  lp_values, demands, capacity, manager,
2351  model);
2352  };
2353  return result;
2354 }
2355 
2356 std::function<LiteralIndex()>
2358  IntegerTrail* integer_trail = integer_trail_;
2359  IntegerEncoder* integer_encoder = model->GetOrCreate<IntegerEncoder>();
2360  // Gather all 0-1 variables that appear in some LP.
2361  std::vector<IntegerVariable> variables;
2362  for (IntegerVariable var : integer_variables_) {
2363  if (integer_trail_->LowerBound(var) == 0 &&
2364  integer_trail_->UpperBound(var) == 1) {
2365  variables.push_back(var);
2366  }
2367  }
2368  VLOG(1) << "HeuristicLPMostInfeasibleBinary has " << variables.size()
2369  << " variables.";
2370 
2371  return [this, variables, integer_trail, integer_encoder]() {
2372  const double kEpsilon = 1e-6;
2373  // Find most fractional value.
2374  IntegerVariable fractional_var = kNoIntegerVariable;
2375  double fractional_distance_best = -1.0;
2376  for (const IntegerVariable var : variables) {
2377  // Skip ignored and fixed variables.
2378  if (integer_trail_->IsCurrentlyIgnored(var)) continue;
2379  const IntegerValue lb = integer_trail_->LowerBound(var);
2380  const IntegerValue ub = integer_trail_->UpperBound(var);
2381  if (lb == ub) continue;
2382 
2383  // Check variable's support is fractional.
2384  const double lp_value = this->GetSolutionValue(var);
2385  const double fractional_distance =
2386  std::min(std::ceil(lp_value - kEpsilon) - lp_value,
2387  lp_value - std::floor(lp_value + kEpsilon));
2388  if (fractional_distance < kEpsilon) continue;
2389 
2390  // Keep variable if it is farther from integrality than the previous.
2391  if (fractional_distance > fractional_distance_best) {
2392  fractional_var = var;
2393  fractional_distance_best = fractional_distance;
2394  }
2395  }
2396 
2397  if (fractional_var != kNoIntegerVariable) {
2398  return integer_encoder
2400  IntegerLiteral::GreaterOrEqual(fractional_var, IntegerValue(1)))
2401  .Index();
2402  }
2403  return kNoLiteralIndex;
2404  };
2405 }
2406 
2407 std::function<LiteralIndex()>
2409  // Gather all 0-1 variables that appear in this LP.
2410  std::vector<IntegerVariable> variables;
2411  for (IntegerVariable var : integer_variables_) {
2412  if (integer_trail_->LowerBound(var) == 0 &&
2413  integer_trail_->UpperBound(var) == 1) {
2414  variables.push_back(var);
2415  }
2416  }
2417  VLOG(1) << "HeuristicLPPseudoCostBinary has " << variables.size()
2418  << " variables.";
2419 
2420  // Store average of reduced cost from 1 to 0. The best heuristic only sets
2421  // variables to one and cares about cost to zero, even though classic
2422  // pseudocost will use max_var min(cost_to_one[var], cost_to_zero[var]).
2423  const int num_vars = variables.size();
2424  std::vector<double> cost_to_zero(num_vars, 0.0);
2425  std::vector<int> num_cost_to_zero(num_vars);
2426  int num_calls = 0;
2427 
2428  IntegerEncoder* integer_encoder = model->GetOrCreate<IntegerEncoder>();
2429  return [=]() mutable {
2430  const double kEpsilon = 1e-6;
2431 
2432  // Every 10000 calls, decay pseudocosts.
2433  num_calls++;
2434  if (num_calls == 10000) {
2435  for (int i = 0; i < num_vars; i++) {
2436  cost_to_zero[i] /= 2;
2437  num_cost_to_zero[i] /= 2;
2438  }
2439  num_calls = 0;
2440  }
2441 
2442  // Accumulate pseudo-costs of all unassigned variables.
2443  for (int i = 0; i < num_vars; i++) {
2444  const IntegerVariable var = variables[i];
2445  // Skip ignored and fixed variables.
2446  if (integer_trail_->IsCurrentlyIgnored(var)) continue;
2447  const IntegerValue lb = integer_trail_->LowerBound(var);
2448  const IntegerValue ub = integer_trail_->UpperBound(var);
2449  if (lb == ub) continue;
2450 
2451  const double rc = this->GetSolutionReducedCost(var);
2452  // Skip reduced costs that are nonzero because of numerical issues.
2453  if (std::abs(rc) < kEpsilon) continue;
2454 
2455  const double value = std::round(this->GetSolutionValue(var));
2456  if (value == 1.0 && rc < 0.0) {
2457  cost_to_zero[i] -= rc;
2458  num_cost_to_zero[i]++;
2459  }
2460  }
2461 
2462  // Select noninstantiated variable with highest pseudo-cost.
2463  int selected_index = -1;
2464  double best_cost = 0.0;
2465  for (int i = 0; i < num_vars; i++) {
2466  const IntegerVariable var = variables[i];
2467  // Skip ignored and fixed variables.
2468  if (integer_trail_->IsCurrentlyIgnored(var)) continue;
2469  if (integer_trail_->IsFixed(var)) continue;
2470 
2471  if (num_cost_to_zero[i] > 0 &&
2472  best_cost < cost_to_zero[i] / num_cost_to_zero[i]) {
2473  best_cost = cost_to_zero[i] / num_cost_to_zero[i];
2474  selected_index = i;
2475  }
2476  }
2477 
2478  if (selected_index >= 0) {
2479  const Literal decision = integer_encoder->GetOrCreateAssociatedLiteral(
2480  IntegerLiteral::GreaterOrEqual(variables[selected_index],
2481  IntegerValue(1)));
2482  return decision.Index();
2483  }
2484 
2485  return kNoLiteralIndex;
2486  };
2487 }
2488 
2489 void LinearProgrammingConstraint::UpdateAverageReducedCosts() {
2490  const int num_vars = integer_variables_.size();
2491  if (sum_cost_down_.size() < num_vars) {
2492  sum_cost_down_.resize(num_vars, 0.0);
2493  num_cost_down_.resize(num_vars, 0);
2494  sum_cost_up_.resize(num_vars, 0.0);
2495  num_cost_up_.resize(num_vars, 0);
2496  rc_scores_.resize(num_vars, 0.0);
2497  }
2498 
2499  // Decay averages.
2500  num_calls_since_reduced_cost_averages_reset_++;
2501  if (num_calls_since_reduced_cost_averages_reset_ == 10000) {
2502  for (int i = 0; i < num_vars; i++) {
2503  sum_cost_up_[i] /= 2;
2504  num_cost_up_[i] /= 2;
2505  sum_cost_down_[i] /= 2;
2506  num_cost_down_[i] /= 2;
2507  }
2508  num_calls_since_reduced_cost_averages_reset_ = 0;
2509  }
2510 
2511  // Accumulate reduced costs of all unassigned variables.
2512  for (int i = 0; i < num_vars; i++) {
2513  const IntegerVariable var = integer_variables_[i];
2514 
2515  // Skip ignored and fixed variables.
2516  if (integer_trail_->IsCurrentlyIgnored(var)) continue;
2517  if (integer_trail_->IsFixed(var)) continue;
2518 
2519  // Skip reduced costs that are zero or close.
2520  const double rc = lp_reduced_cost_[i];
2521  if (std::abs(rc) < kCpEpsilon) continue;
2522 
2523  if (rc < 0.0) {
2524  sum_cost_down_[i] -= rc;
2525  num_cost_down_[i]++;
2526  } else {
2527  sum_cost_up_[i] += rc;
2528  num_cost_up_[i]++;
2529  }
2530  }
2531 
2532  // Tricky, we artificially reset the rc_rev_int_repository_ to level zero
2533  // so that the rev_rc_start_ is zero.
2534  rc_rev_int_repository_.SetLevel(0);
2535  rc_rev_int_repository_.SetLevel(trail_->CurrentDecisionLevel());
2536  rev_rc_start_ = 0;
2537 
2538  // Cache the new score (higher is better) using the average reduced costs
2539  // as a signal.
2540  positions_by_decreasing_rc_score_.clear();
2541  for (int i = 0; i < num_vars; i++) {
2542  // If only one direction exist, we takes its value divided by 2, so that
2543  // such variable should have a smaller cost than the min of the two side
2544  // except if one direction have a really high reduced costs.
2545  const double a_up =
2546  num_cost_up_[i] > 0 ? sum_cost_up_[i] / num_cost_up_[i] : 0.0;
2547  const double a_down =
2548  num_cost_down_[i] > 0 ? sum_cost_down_[i] / num_cost_down_[i] : 0.0;
2549  if (num_cost_down_[i] > 0 && num_cost_up_[i] > 0) {
2550  rc_scores_[i] = std::min(a_up, a_down);
2551  } else {
2552  rc_scores_[i] = 0.5 * (a_down + a_up);
2553  }
2554 
2555  // We ignore scores of zero (i.e. no data) and will follow the default
2556  // search heuristic if all variables are like this.
2557  if (rc_scores_[i] > 0.0) {
2558  positions_by_decreasing_rc_score_.push_back({-rc_scores_[i], i});
2559  }
2560  }
2561  std::sort(positions_by_decreasing_rc_score_.begin(),
2562  positions_by_decreasing_rc_score_.end());
2563 }
2564 
2565 std::function<LiteralIndex()>
2567  return [this]() { return this->LPReducedCostAverageDecision(); };
2568 }
2569 
2570 LiteralIndex LinearProgrammingConstraint::LPReducedCostAverageDecision() {
2571  // Select noninstantiated variable with highest positive average reduced cost.
2572  int selected_index = -1;
2573  const int size = positions_by_decreasing_rc_score_.size();
2574  rc_rev_int_repository_.SaveState(&rev_rc_start_);
2575  for (int i = rev_rc_start_; i < size; ++i) {
2576  const int index = positions_by_decreasing_rc_score_[i].second;
2577  const IntegerVariable var = integer_variables_[index];
2578  if (integer_trail_->IsCurrentlyIgnored(var)) continue;
2579  if (integer_trail_->IsFixed(var)) continue;
2580  selected_index = index;
2581  rev_rc_start_ = i;
2582  break;
2583  }
2584 
2585  if (selected_index == -1) return kNoLiteralIndex;
2586  const IntegerVariable var = integer_variables_[selected_index];
2587 
2588  // If ceil(value) is current upper bound, try var == upper bound first.
2589  // Guarding with >= prevents numerical problems.
2590  // With 0/1 variables, this will tend to try setting to 1 first,
2591  // which produces more shallow trees.
2592  const IntegerValue ub = integer_trail_->UpperBound(var);
2593  const IntegerValue value_ceil(
2594  std::ceil(this->GetSolutionValue(var) - kCpEpsilon));
2595  if (value_ceil >= ub) {
2596  const Literal result = integer_encoder_->GetOrCreateAssociatedLiteral(
2598  CHECK(!trail_->Assignment().LiteralIsAssigned(result));
2599  return result.Index();
2600  }
2601 
2602  // If floor(value) is current lower bound, try var == lower bound first.
2603  // Guarding with <= prevents numerical problems.
2604  const IntegerValue lb = integer_trail_->LowerBound(var);
2605  const IntegerValue value_floor(
2606  std::floor(this->GetSolutionValue(var) + kCpEpsilon));
2607  if (value_floor <= lb) {
2608  const Literal result = integer_encoder_->GetOrCreateAssociatedLiteral(
2610  CHECK(!trail_->Assignment().LiteralIsAssigned(result))
2611  << " " << lb << " " << ub;
2612  return result.Index();
2613  }
2614 
2615  // Here lb < value_floor <= value_ceil < ub.
2616  // Try the most promising split between var <= floor or var >= ceil.
2617  const double a_up =
2618  num_cost_up_[selected_index] > 0
2619  ? sum_cost_up_[selected_index] / num_cost_up_[selected_index]
2620  : 0.0;
2621  const double a_down =
2622  num_cost_down_[selected_index] > 0
2623  ? sum_cost_down_[selected_index] / num_cost_down_[selected_index]
2624  : 0.0;
2625  if (a_down < a_up) {
2626  const Literal result = integer_encoder_->GetOrCreateAssociatedLiteral(
2627  IntegerLiteral::LowerOrEqual(var, value_floor));
2628  CHECK(!trail_->Assignment().LiteralIsAssigned(result));
2629  return result.Index();
2630  } else {
2631  const Literal result = integer_encoder_->GetOrCreateAssociatedLiteral(
2632  IntegerLiteral::GreaterOrEqual(var, value_ceil));
2633  CHECK(!trail_->Assignment().LiteralIsAssigned(result));
2634  return result.Index();
2635  }
2636 }
2637 
2638 } // namespace sat
2639 } // namespace operations_research
operations_research::sat::Trail
Definition: sat_base.h:233
operations_research::sat::ImpliedBoundsProcessor::ClearCache
void ClearCache() const
Definition: cuts.h:106
var
IntVar * var
Definition: expr_array.cc:1858
operations_research::glop::LpScalingHelper::UnscaleDualValue
Fractional UnscaleDualValue(RowIndex row, Fractional value) const
Definition: lp_data_utils.cc:108
operations_research::sat::LinearProgrammingConstraint::RegisterWith
void RegisterWith(Model *model)
Definition: linear_programming_constraint.cc:378
tail
int64 tail
Definition: routing_flow.cc:127
operations_research::sat::IntegerTrail::IsFixed
bool IsFixed(IntegerVariable i) const
Definition: integer.h:1225
operations_research::glop::ConstraintStatus::FREE
@ FREE
operations_research::sat::LinearProgrammingConstraint::HeuristicLPMostInfeasibleBinary
std::function< LiteralIndex()> HeuristicLPMostInfeasibleBinary(Model *model)
Definition: linear_programming_constraint.cc:2357
operations_research::sat::IntegerLiteral
Definition: integer.h:164
operations_research::sat::CutGenerator::vars
std::vector< IntegerVariable > vars
Definition: cuts.h:41
operations_research::glop::RevisedSimplex::GetObjectiveValue
Fractional GetObjectiveValue() const
Definition: revised_simplex.cc:419
min
int64 min
Definition: alldiff_cst.cc:138
operations_research::sat::LinearConstraint::ub
IntegerValue ub
Definition: linear_constraint.h:41
integral_types.h
operations_research::glop::DenseRow
StrictITIVector< ColIndex, Fractional > DenseRow
Definition: lp_types.h:299
map_util.h
operations_research::glop::RevisedSimplex::LoadStateForNextSolve
void LoadStateForNextSolve(const BasisState &state)
Definition: revised_simplex.cc:124
operations_research::sat::IntegerTrail::Index
int Index() const
Definition: integer.h:811
operations_research::glop::VariableStatus::BASIC
@ BASIC
operations_research::sat::IntegerLiteral::GreaterOrEqual
static IntegerLiteral GreaterOrEqual(IntegerVariable i, IntegerValue bound)
Definition: integer.h:1197
operations_research::Arc
std::pair< int64, int64 > Arc
Definition: search.cc:3355
operations_research::CapSub
int64 CapSub(int64 x, int64 y)
Definition: saturated_arithmetic.h:154
operations_research::sat::kNoIntegerVariable
const IntegerVariable kNoIntegerVariable(-1)
operations_research::sat::FloorRatio
IntegerValue FloorRatio(IntegerValue dividend, IntegerValue positive_divisor)
Definition: integer.h:101
operations_research::sat::VariableIsPositive
bool VariableIsPositive(IntegerVariable i)
Definition: integer.h:141
max
int64 max
Definition: alldiff_cst.cc:139
operations_research::sat::LinearProgrammingConstraint::SetObjectiveCoefficient
void SetObjectiveCoefficient(IntegerVariable ivar, IntegerValue coeff)
Definition: linear_programming_constraint.cc:125
operations_research::glop::ProblemStatus::ABNORMAL
@ ABNORMAL
bound
int64 bound
Definition: routing_search.cc:972
operations_research::sat::ImpliedBounds
Definition: implied_bounds.h:77
operations_research::CapProd
int64 CapProd(int64 x, int64 y)
Definition: saturated_arithmetic.h:231
operations_research::sat::kNoLiteralIndex
const LiteralIndex kNoLiteralIndex(-1)
operations_research::glop::LpScalingHelper::UnscaleReducedCost
Fractional UnscaleReducedCost(ColIndex col, Fractional value) const
Definition: lp_data_utils.cc:102
operations_research::sat::GenericLiteralWatcher::WatchUpperBound
void WatchUpperBound(IntegerVariable var, int id, int watch_index=-1)
Definition: integer.h:1294
operations_research::glop::LinearProgram::CreateNewConstraint
RowIndex CreateNewConstraint()
Definition: lp_data.cc:189
operations_research::sat::IntegerTrail::UpperBound
IntegerValue UpperBound(IntegerVariable i) const
Definition: integer.h:1221
operations_research::sat::CreateStronglyConnectedGraphCutGenerator
CutGenerator CreateStronglyConnectedGraphCutGenerator(int num_nodes, const std::vector< int > &tails, const std::vector< int > &heads, const std::vector< Literal > &literals, Model *model)
Definition: linear_programming_constraint.cc:2321
operations_research::glop::RevisedSimplex::GetProblemStatus
ProblemStatus GetProblemStatus() const
Definition: revised_simplex.cc:415
operations_research::glop::ConstraintStatus::FIXED_VALUE
@ FIXED_VALUE
operations_research::sat::ImpliedBoundsProcessor::AddLpVariable
void AddLpVariable(IntegerVariable var)
Definition: cuts.h:102
operations_research::sat::IntegerTrail::LevelZeroLowerBound
IntegerValue LevelZeroLowerBound(IntegerVariable var) const
Definition: integer.h:1267
operations_research::sat::SeparateSubtourInequalities
void SeparateSubtourInequalities(int num_nodes, const std::vector< int > &tails, const std::vector< int > &heads, const std::vector< Literal > &literals, const gtl::ITIVector< IntegerVariable, double > &lp_values, absl::Span< const int64 > demands, int64 capacity, LinearConstraintManager *manager, Model *model)
Definition: linear_programming_constraint.cc:2080
operations_research::glop::kEpsilon
const double kEpsilon
Definition: lp_types.h:86
operations_research::sat::LinearProgrammingConstraintCollection
Definition: linear_programming_constraint.h:463
operations_research::sat::LinearProgrammingConstraintLpSolution
Definition: linear_programming_constraint.h:50
logging.h
operations_research::sat::IncrementalAverage::AddData
void AddData(double new_record)
Definition: sat/util.cc:68
operations_research::sat::LinearProgrammingDispatcher
Definition: linear_programming_constraint.h:456
operations_research::glop::LinearProgram::SetObjectiveCoefficient
void SetObjectiveCoefficient(ColIndex col, Fractional value)
Definition: lp_data.cc:324
operations_research::glop::RevisedSimplex::ClearStateForNextSolve
void ClearStateForNextSolve()
Definition: revised_simplex.cc:119
gtl::ITIVector::clear
void clear()
Definition: int_type_indexed_vector.h:169
operations_research::sat::IntegerTrail::LevelZeroUpperBound
IntegerValue LevelZeroUpperBound(IntegerVariable var) const
Definition: integer.h:1272
operations_research::sat::GenericLiteralWatcher::WatchIntegerVariable
void WatchIntegerVariable(IntegerVariable i, int id, int watch_index=-1)
Definition: integer.h:1300
operations_research::glop::RevisedSimplex::SetParameters
void SetParameters(const GlopParameters &parameters)
Definition: revised_simplex.cc:2917
value
int64 value
Definition: demon_profiler.cc:43
operations_research::sat::LinearProgrammingConstraint::AddLinearConstraint
void AddLinearConstraint(const LinearConstraint &ct)
Definition: linear_programming_constraint.cc:87
operations_research::sat::LinearConstraintManager::AddCut
bool AddCut(LinearConstraint ct, std::string type_name, const gtl::ITIVector< IntegerVariable, double > &lp_solution, std::string extra_info="")
Definition: linear_constraint_manager.cc:204
gtl::ITIVector::resize
void resize(size_type new_size)
Definition: int_type_indexed_vector.h:149
operations_research::sat::SearchHeuristicsVector
Definition: integer.h:258
saturated_arithmetic.h
operations_research
The vehicle routing library lets one model and solve generic vehicle routing problems ranging from th...
Definition: dense_doubly_linked_list.h:21
operations_research::glop::RevisedSimplex::GetConstraintStatus
ConstraintStatus GetConstraintStatus(RowIndex row) const
Definition: revised_simplex.cc:457
operations_research::glop::RevisedSimplex::GetProblemNumRows
RowIndex GetProblemNumRows() const
Definition: revised_simplex.cc:425
operations_research::glop::RevisedSimplex::NotifyThatMatrixIsUnchangedForNextSolve
void NotifyThatMatrixIsUnchangedForNextSolve()
Definition: revised_simplex.cc:130
operations_research::sat::LinearProgrammingConstraint::IncrementalPropagate
bool IncrementalPropagate(const std::vector< int > &watch_indices) override
Definition: linear_programming_constraint.cc:450
operations_research::sat::NegationOf
std::vector< IntegerVariable > NegationOf(const std::vector< IntegerVariable > &vars)
Definition: integer.cc:42
operations_research::sat::IntegerTrail::Enqueue
ABSL_MUST_USE_RESULT bool Enqueue(IntegerLiteral i_lit, absl::Span< const Literal > literal_reason, absl::Span< const IntegerLiteral > integer_reason)
Definition: integer.cc:965
operations_research::sat::IntegerTrail::IsCurrentlyIgnored
bool IsCurrentlyIgnored(IntegerVariable i) const
Definition: integer.h:612
operations_research::sat::LinearConstraintManager::ChangeLp
bool ChangeLp(const gtl::ITIVector< IntegerVariable, double > &lp_solution, glop::BasisState *solution_state)
Definition: linear_constraint_manager.cc:437
kint64min
static const int64 kint64min
Definition: integral_types.h:60
operations_research::sat::GenericLiteralWatcher::Register
int Register(PropagatorInterface *propagator)
Definition: integer.cc:1798
operations_research::sat::IntegerTrail
Definition: integer.h:534
operations_research::RevRepository::SetLevel
void SetLevel(int level) final
Definition: rev.h:134
operations_research::glop::LpScalingHelper::UnscaleVariableValue
Fractional UnscaleVariableValue(ColIndex col, Fractional value) const
Definition: lp_data_utils.cc:96
operations_research::sat::PositiveVariable
IntegerVariable PositiveVariable(IntegerVariable i)
Definition: integer.h:145
int64
int64_t int64
Definition: integral_types.h:34
gtl::ITIVector::size
size_type size() const
Definition: int_type_indexed_vector.h:146
operations_research::sat::IntegerEncoder::GetOrCreateAssociatedLiteral
Literal GetOrCreateAssociatedLiteral(IntegerLiteral i_lit)
Definition: integer.cc:217
operations_research::sat::Model
Class that owns everything related to a particular optimization model.
Definition: sat/model.h:38
operations_research::glop::LinearProgram::SetConstraintBounds
void SetConstraintBounds(RowIndex row, Fractional lower_bound, Fractional upper_bound)
Definition: lp_data.cc:307
index
int index
Definition: pack.cc:508
operations_research::sat::LinearProgrammingConstraint::GetSolutionValue
double GetSolutionValue(IntegerVariable variable) const
Definition: linear_programming_constraint.cc:487
operations_research::glop::Fractional
double Fractional
Definition: lp_types.h:77
operations_research::glop::ConstraintStatus::AT_UPPER_BOUND
@ AT_UPPER_BOUND
operations_research::sat::SatSolver
Definition: sat_solver.h:58
gtl::FindOrDie
const Collection::value_type::second_type & FindOrDie(const Collection &collection, const typename Collection::value_type::first_type &key)
Definition: map_util.h:176
operations_research::sat::Trail::CurrentDecisionLevel
int CurrentDecisionLevel() const
Definition: sat_base.h:355
operations_research::sat::IntegerTrail::RegisterReversibleClass
void RegisterReversibleClass(ReversibleInterface *rev)
Definition: integer.h:807
operations_research::glop::RevisedSimplex::GetDualRayRowCombination
const DenseRow & GetDualRayRowCombination() const
Definition: revised_simplex.cc:479
operations_research::glop::BasisState
Definition: revised_simplex.h:132
strongly_connected_components.h
operations_research::sat::LinearConstraintManager::Add
ConstraintIndex Add(LinearConstraint ct, bool *added=nullptr)
Definition: linear_constraint_manager.cc:122
operations_research::sat::Literal
Definition: sat_base.h:64
demand
int64 demand
Definition: resource.cc:123
operations_research::glop::LinearProgram::GetDimensionString
std::string GetDimensionString() const
Definition: lp_data.cc:423
operations_research::glop::RowToColIndex
ColIndex RowToColIndex(RowIndex row)
Definition: lp_types.h:48
operations_research::RevRepository::SaveState
void SaveState(T *object)
Definition: rev.h:61
DEBUG_MODE
const bool DEBUG_MODE
Definition: macros.h:24
operations_research::sat::LinearProgrammingConstraint::LinearProgrammingConstraint
LinearProgrammingConstraint(Model *model)
Definition: linear_programming_constraint.cc:54
operations_research::sat::LinearConstraint
Definition: linear_constraint.h:39
operations_research::glop::RevisedSimplex::GetVariableStatus
VariableStatus GetVariableStatus(ColIndex col) const
Definition: revised_simplex.cc:445
a
int64 a
Definition: constraint_solver/table.cc:42
operations_research::glop::RevisedSimplex::GetState
const BasisState & GetState() const
Definition: revised_simplex.cc:449
operations_research::glop::RevisedSimplex::GetDualRay
const DenseColumn & GetDualRay() const
Definition: revised_simplex.cc:474
operations_research::sat::GenericLiteralWatcher
Definition: integer.h:1056
operations_research::glop::LinearProgram::SetCoefficient
void SetCoefficient(RowIndex row, ColIndex col, Fractional value)
Definition: lp_data.cc:315
operations_research::sat::IntegerEncoder
Definition: integer.h:278
operations_research::glop::RevisedSimplex::GetReducedCost
Fractional GetReducedCost(ColIndex col) const
Definition: revised_simplex.cc:433
operations_research::sat::ImpliedBoundsProcessor::DebugSlack
bool DebugSlack(IntegerVariable first_slack, const LinearConstraint &initial_cut, const LinearConstraint &cut, const std::vector< SlackInfo > &info)
Definition: cuts.cc:1534
operations_research::sat::LinearProgrammingConstraint::LPReducedCostAverageBranching
std::function< LiteralIndex()> LPReducedCostAverageBranching()
Definition: linear_programming_constraint.cc:2566
operations_research::sat::IntTypeAbs
IntType IntTypeAbs(IntType t)
Definition: integer.h:77
operations_research::CapAdd
int64 CapAdd(int64 x, int64 y)
Definition: saturated_arithmetic.h:124
operations_research::TimeLimit
A simple class to enforce both an elapsed time limit and a deterministic time limit in the same threa...
Definition: time_limit.h:105
operations_research::sat::kMaxIntegerValue
constexpr IntegerValue kMaxIntegerValue(std::numeric_limits< IntegerValue::ValueType >::max() - 1)
operations_research::sat::IntegerTrail::RelaxLinearReason
void RelaxLinearReason(IntegerValue slack, absl::Span< const IntegerValue > coeffs, std::vector< IntegerLiteral > *reason) const
Definition: integer.cc:773
mathutil.h
operations_research::sat::ModelRandomGenerator
Definition: sat/util.h:33
operations_research::sat::IntegerTrail::UpperBoundAsLiteral
IntegerLiteral UpperBoundAsLiteral(IntegerVariable i) const
Definition: integer.h:1252
operations_research::sat::IntegerRoundingCutHelper::NumLiftedBooleans
int NumLiftedBooleans() const
Definition: cuts.h:210
int_type_indexed_vector.h
operations_research::glop::LinearProgram::SetVariableBounds
void SetVariableBounds(ColIndex col, Fractional lower_bound, Fractional upper_bound)
Definition: lp_data.cc:247
operations_research::sat::Trail::Assignment
const VariablesAssignment & Assignment() const
Definition: sat_base.h:380
operations_research::glop::ConstraintStatus::BASIC
@ BASIC
operations_research::sat::LinearProgrammingConstraint::GetSolutionReducedCost
double GetSolutionReducedCost(IntegerVariable variable) const
Definition: linear_programming_constraint.cc:492
operations_research::sat::LinearProgrammingConstraint::~LinearProgrammingConstraint
~LinearProgrammingConstraint() override
Definition: linear_programming_constraint.cc:82
ct
const Constraint * ct
Definition: demon_profiler.cc:42
operations_research::sat::AddProductTo
bool AddProductTo(IntegerValue a, IntegerValue b, IntegerValue *result)
Definition: integer.h:121
operations_research::sat::LinearConstraintManager::LpConstraints
const std::vector< ConstraintIndex > & LpConstraints() const
Definition: linear_constraint_manager.h:117
operations_research::MathUtil::GCD64
static int64 GCD64(int64 x, int64 y)
Definition: mathutil.h:107
operations_research::glop::LinearProgram::AddSlackVariablesWhereNecessary
void AddSlackVariablesWhereNecessary(bool detect_integer_constraints)
Definition: lp_data.cc:666
operations_research::sat::IncrementalAverage::CurrentAverage
double CurrentAverage() const
Definition: sat/util.h:113
operations_research::sat::LinearProgrammingConstraint::SetLevel
void SetLevel(int level) override
Definition: linear_programming_constraint.cc:420
operations_research::glop::LpScalingHelper::Scale
void Scale(LinearProgram *lp)
Definition: lp_data_utils.cc:76
operations_research::glop::ProblemStatus::DUAL_UNBOUNDED
@ DUAL_UNBOUNDED
operations_research::sat::IntegerTrail::ReportConflict
bool ReportConflict(absl::Span< const Literal > literal_reason, absl::Span< const IntegerLiteral > integer_reason)
Definition: integer.h:784
operations_research::sat::IntegerTrail::LowerBoundAsLiteral
IntegerLiteral LowerBoundAsLiteral(IntegerVariable i) const
Definition: integer.h:1247
operations_research::sat::LinearConstraintManager
Definition: linear_constraint_manager.h:40
implied_bounds.h
gtl::ITIVector::assign
void assign(size_type n, const value_type &val)
Definition: int_type_indexed_vector.h:130
operations_research::sat::IntegerEncoder::GetLiteralView
const IntegerVariable GetLiteralView(Literal lit) const
Definition: integer.h:422
operations_research::glop::RevisedSimplex::GetDualValue
Fractional GetDualValue(RowIndex row) const
Definition: revised_simplex.cc:441
model
GRBmodel * model
Definition: gurobi_interface.cc:195
operations_research::sat::LinearConstraintManager::AddAllConstraintsToLp
void AddAllConstraintsToLp()
Definition: linear_constraint_manager.cc:669
operations_research::glop::RevisedSimplex::Solve
ABSL_MUST_USE_RESULT Status Solve(const LinearProgram &lp, TimeLimit *time_limit)
Definition: revised_simplex.cc:134
operations_research::glop::LinearProgram::num_constraints
RowIndex num_constraints() const
Definition: lp_data.h:208
operations_research::sat::GenericLiteralWatcher::SetPropagatorPriority
void SetPropagatorPriority(int id, int priority)
Definition: integer.cc:1821
operations_research::sat::ToDouble
double ToDouble(IntegerValue value)
Definition: integer.h:69
operations_research::sat::LinearConstraintManager::AllConstraints
const gtl::ITIVector< ConstraintIndex, ConstraintInfo > & AllConstraints() const
Definition: linear_constraint_manager.h:110
operations_research::glop::LinearProgram::objective_scaling_factor
Fractional objective_scaling_factor() const
Definition: lp_data.h:261
operations_research::glop::ProblemStatus::OPTIMAL
@ OPTIMAL
operations_research::sat::IntegerTrail::LowerBound
IntegerValue LowerBound(IntegerVariable i) const
Definition: integer.h:1217
col
ColIndex col
Definition: markowitz.cc:176
operations_research::sat::LinearProgrammingConstraint::AddCutGenerator
void AddCutGenerator(CutGenerator generator)
Definition: linear_programming_constraint.cc:443
operations_research::sat::kMinIntegerValue
constexpr IntegerValue kMinIntegerValue(-kMaxIntegerValue)
stl_util.h
row
RowIndex row
Definition: markowitz.cc:175
operations_research::sat::ComputeActivity
double ComputeActivity(const LinearConstraint &constraint, const gtl::ITIVector< IntegerVariable, double > &values)
Definition: linear_constraint.cc:116
operations_research::glop::RevisedSimplex::GetParameters
const GlopParameters & GetParameters() const
Definition: revised_simplex.h:153
operations_research::glop::DenseColumn
StrictITIVector< RowIndex, Fractional > DenseColumn
Definition: lp_types.h:328
operations_research::sat::ImpliedBoundsProcessor::ProcessUpperBoundedConstraintWithSlackCreation
void ProcessUpperBoundedConstraintWithSlackCreation(bool substitute_only_inner_variables, IntegerVariable first_slack, const gtl::ITIVector< IntegerVariable, double > &lp_values, LinearConstraint *cut, std::vector< SlackInfo > *slack_infos, std::vector< LinearConstraint > *implied_bound_cuts) const
Definition: cuts.cc:1391
operations_research::sat::LinearProgrammingConstraint::Propagate
bool Propagate() override
Definition: linear_programming_constraint.cc:1171
operations_research::glop::ConstraintStatus::AT_LOWER_BOUND
@ AT_LOWER_BOUND
operations_research::glop::LinearProgram::NotifyThatColumnsAreClean
void NotifyThatColumnsAreClean()
Definition: lp_data.h:539
operations_research::glop::RevisedSimplex::GetVariableValue
Fractional GetVariableValue(ColIndex col) const
Definition: revised_simplex.cc:429
operations_research::sat::LinearConstraint::vars
std::vector< IntegerVariable > vars
Definition: linear_constraint.h:42
operations_research::sat::IntegerLiteral::LowerOrEqual
static IntegerLiteral LowerOrEqual(IntegerVariable i, IntegerValue bound)
Definition: integer.h:1203
b
int64 b
Definition: constraint_solver/table.cc:43
operations_research::sat::IntegerRoundingCutHelper::ComputeCut
void ComputeCut(RoundingOptions options, const std::vector< double > &lp_values, const std::vector< IntegerValue > &lower_bounds, const std::vector< IntegerValue > &upper_bounds, ImpliedBoundsProcessor *ib_processor, LinearConstraint *cut)
Definition: cuts.cc:692
operations_research::sat::GenericLiteralWatcher::AlwaysCallAtLevelZero
void AlwaysCallAtLevelZero(int id)
Definition: integer.cc:1833
operations_research::sat::CutGenerator::generate_cuts
std::function< void(const gtl::ITIVector< IntegerVariable, double > &lp_values, LinearConstraintManager *manager)> generate_cuts
Definition: cuts.h:44
operations_research::glop::RevisedSimplex::GetUnitRowLeftInverse
const ScatteredRow & GetUnitRowLeftInverse(RowIndex row)
Definition: revised_simplex.h:223
gtl::ITIVector
Definition: int_type_indexed_vector.h:76
capacity
int64 capacity
Definition: routing_flow.cc:129
operations_research::sat::CutGenerator
Definition: cuts.h:40
next
Block * next
Definition: constraint_solver.cc:667
operations_research::sat::LinearProgrammingConstraint::ConstraintIndex
glop::RowIndex ConstraintIndex
Definition: linear_programming_constraint.h:82
operations_research::glop::RevisedSimplex::GetReducedCosts
const DenseRow & GetReducedCosts() const
Definition: revised_simplex.cc:437
operations_research::sat::CreateCVRPCutGenerator
CutGenerator CreateCVRPCutGenerator(int num_nodes, const std::vector< int > &tails, const std::vector< int > &heads, const std::vector< Literal > &literals, const std::vector< int64 > &demands, int64 capacity, Model *model)
Definition: linear_programming_constraint.cc:2337
operations_research::sat::ComputeInfinityNorm
IntegerValue ComputeInfinityNorm(const LinearConstraint &constraint)
Definition: linear_constraint.cc:135
operations_research::glop::ProblemStatus::DUAL_FEASIBLE
@ DUAL_FEASIBLE
gtl::ITIVector::push_back
void push_back(const value_type &x)
Definition: int_type_indexed_vector.h:157
operations_research::glop::SparseVector::num_entries
EntryIndex num_entries() const
Definition: sparse_vector.h:270
operations_research::sat::LinearProgrammingConstraint::HeuristicLPPseudoCostBinary
std::function< LiteralIndex()> HeuristicLPPseudoCostBinary(Model *model)
Definition: linear_programming_constraint.cc:2408
lp_types.h
head
int64 head
Definition: routing_flow.cc:128
operations_research::sat::DivideByGCD
void DivideByGCD(LinearConstraint *constraint)
Definition: linear_constraint.cc:182
gtl::ITIVector::back
reference back()
Definition: int_type_indexed_vector.h:173
preprocessor.h
operations_research::glop::LinearProgram::CreateNewVariable
ColIndex CreateNewVariable()
Definition: lp_data.cc:160
operations_research::sat::VariablesAssignment::LiteralIsAssigned
bool LiteralIsAssigned(Literal literal) const
Definition: sat_base.h:153
operations_research::glop::LinearProgram::GetSparseColumn
const SparseColumn & GetSparseColumn(ColIndex col) const
Definition: lp_data.cc:407
operations_research::sat::IntegerTrail::RemoveLevelZeroBounds
void RemoveLevelZeroBounds(std::vector< IntegerLiteral > *reason) const
Definition: integer.cc:895
operations_research::sat::Literal::Index
LiteralIndex Index() const
Definition: sat_base.h:84
commandlineflags.h
parameters
SatParameters parameters
Definition: cp_model_fz_solver.cc:107
operations_research::glop::RevisedSimplex::GetProblemNumCols
ColIndex GetProblemNumCols() const
Definition: revised_simplex.cc:427
operations_research::glop::LpScalingHelper::VariableScalingFactor
Fractional VariableScalingFactor(ColIndex col) const
Definition: lp_data_utils.cc:90
integer.h
name
const std::string name
Definition: default_search.cc:807
status.h
operations_research::sat::LinearConstraint::coeffs
std::vector< IntegerValue > coeffs
Definition: linear_constraint.h:43
operations_research::sat::LinearConstraintManager::DebugCheckConstraint
bool DebugCheckConstraint(const LinearConstraint &cut)
Definition: linear_constraint_manager.cc:677
parameters.pb.h
linear_programming_constraint.h
operations_research::glop::RevisedSimplex::GetBasis
ColIndex GetBasis(RowIndex row) const
Definition: revised_simplex.cc:484
kint64max
static const int64 kint64max
Definition: integral_types.h:62
operations_research::glop::RevisedSimplex::GetNumberOfIterations
int64 GetNumberOfIterations() const
Definition: revised_simplex.cc:423
operations_research::sat::GenericLiteralWatcher::RegisterReversibleInt
void RegisterReversibleInt(int id, int *rev)
Definition: integer.cc:1842
operations_research::sat::LinearConstraintManager::SetObjectiveCoefficient
void SetObjectiveCoefficient(IntegerVariable var, IntegerValue coeff)
Definition: linear_constraint_manager.cc:298
operations_research::glop::LinearProgram::Clear
void Clear()
Definition: lp_data.cc:132