OR-Tools  9.3
linear_programming_constraint.cc
Go to the documentation of this file.
1// Copyright 2010-2021 Google LLC
2// Licensed under the Apache License, Version 2.0 (the "License");
3// you may not use this file except in compliance with the License.
4// You may obtain a copy of the License at
5//
6// http://www.apache.org/licenses/LICENSE-2.0
7//
8// Unless required by applicable law or agreed to in writing, software
9// distributed under the License is distributed on an "AS IS" BASIS,
10// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11// See the License for the specific language governing permissions and
12// limitations under the License.
13
15
16#include <algorithm>
17#include <cmath>
18#include <cstdint>
19#include <cstdlib>
20#include <functional>
21#include <limits>
22#include <memory>
23#include <random>
24#include <string>
25#include <utility>
26#include <vector>
27
28#include "absl/container/flat_hash_map.h"
29#include "absl/container/inlined_vector.h"
30#include "absl/meta/type_traits.h"
31#include "absl/numeric/int128.h"
32#include "absl/random/distributions.h"
33#include "absl/strings/str_cat.h"
34#include "absl/types/span.h"
39#include "ortools/glop/parameters.pb.h"
41#include "ortools/glop/status.h"
47#include "ortools/sat/cuts.h"
49#include "ortools/sat/integer.h"
53#include "ortools/sat/model.h"
55#include "ortools/sat/sat_parameters.pb.h"
57#include "ortools/sat/util.h"
59#include "ortools/util/bitset.h"
60#include "ortools/util/rev.h"
64
65namespace operations_research {
66namespace sat {
67
68using glop::ColIndex;
70using glop::RowIndex;
71
73 if (is_sparse_) {
74 for (const glop::ColIndex col : non_zeros_) {
75 dense_vector_[col] = IntegerValue(0);
76 }
77 dense_vector_.resize(size, IntegerValue(0));
78 } else {
79 dense_vector_.assign(size, IntegerValue(0));
80 }
81 for (const glop::ColIndex col : non_zeros_) {
82 is_zeros_[col] = true;
83 }
84 is_zeros_.resize(size, true);
85 non_zeros_.clear();
86 is_sparse_ = true;
87}
88
89bool ScatteredIntegerVector::Add(glop::ColIndex col, IntegerValue value) {
90 const int64_t add = CapAdd(value.value(), dense_vector_[col].value());
93 return false;
94 dense_vector_[col] = IntegerValue(add);
95 if (is_sparse_ && is_zeros_[col]) {
96 is_zeros_[col] = false;
97 non_zeros_.push_back(col);
98 }
99 return true;
100}
101
103 IntegerValue multiplier,
104 const std::vector<std::pair<glop::ColIndex, IntegerValue>>& terms) {
105 const double threshold = 0.1 * static_cast<double>(dense_vector_.size());
106 if (is_sparse_ && static_cast<double>(terms.size()) < threshold) {
107 for (const std::pair<glop::ColIndex, IntegerValue>& term : terms) {
108 if (is_zeros_[term.first]) {
109 is_zeros_[term.first] = false;
110 non_zeros_.push_back(term.first);
111 }
112 if (!AddProductTo(multiplier, term.second, &dense_vector_[term.first])) {
113 return false;
114 }
115 }
116 if (static_cast<double>(non_zeros_.size()) > threshold) {
117 is_sparse_ = false;
118 }
119 } else {
120 is_sparse_ = false;
121 for (const std::pair<glop::ColIndex, IntegerValue>& term : terms) {
122 if (!AddProductTo(multiplier, term.second, &dense_vector_[term.first])) {
123 return false;
124 }
125 }
126 }
127 return true;
128}
129
131 const std::vector<IntegerVariable>& integer_variables,
132 IntegerValue upper_bound, LinearConstraint* result) {
133 result->vars.clear();
134 result->coeffs.clear();
135 if (is_sparse_) {
136 std::sort(non_zeros_.begin(), non_zeros_.end());
137 for (const glop::ColIndex col : non_zeros_) {
138 const IntegerValue coeff = dense_vector_[col];
139 if (coeff == 0) continue;
140 result->vars.push_back(integer_variables[col.value()]);
141 result->coeffs.push_back(coeff);
142 }
143 } else {
144 const int size = dense_vector_.size();
145 for (glop::ColIndex col(0); col < size; ++col) {
146 const IntegerValue coeff = dense_vector_[col];
147 if (coeff == 0) continue;
148 result->vars.push_back(integer_variables[col.value()]);
149 result->coeffs.push_back(coeff);
150 }
151 }
152 result->lb = kMinIntegerValue;
153 result->ub = upper_bound;
154}
155
156std::vector<std::pair<glop::ColIndex, IntegerValue>>
158 std::vector<std::pair<glop::ColIndex, IntegerValue>> result;
159 if (is_sparse_) {
160 std::sort(non_zeros_.begin(), non_zeros_.end());
161 for (const glop::ColIndex col : non_zeros_) {
162 const IntegerValue coeff = dense_vector_[col];
163 if (coeff != 0) result.push_back({col, coeff});
164 }
165 } else {
166 const int size = dense_vector_.size();
167 for (glop::ColIndex col(0); col < size; ++col) {
168 const IntegerValue coeff = dense_vector_[col];
169 if (coeff != 0) result.push_back({col, coeff});
170 }
171 }
172 return result;
173}
174
175// TODO(user): make SatParameters singleton too, otherwise changing them after
176// a constraint was added will have no effect on this class.
178 : constraint_manager_(model),
179 parameters_(*(model->GetOrCreate<SatParameters>())),
180 model_(model),
181 time_limit_(model->GetOrCreate<TimeLimit>()),
182 integer_trail_(model->GetOrCreate<IntegerTrail>()),
183 trail_(model->GetOrCreate<Trail>()),
184 integer_encoder_(model->GetOrCreate<IntegerEncoder>()),
185 random_(model->GetOrCreate<ModelRandomGenerator>()),
186 implied_bounds_processor_({}, integer_trail_,
187 model->GetOrCreate<ImpliedBounds>()),
188 dispatcher_(model->GetOrCreate<LinearProgrammingDispatcher>()),
189 expanded_lp_solution_(
191 // Tweak the default parameters to make the solve incremental.
192 glop::GlopParameters parameters;
193 parameters.set_use_dual_simplex(true);
194 simplex_.SetParameters(parameters);
195 if (parameters_.use_branching_in_lp() ||
196 parameters_.search_branching() == SatParameters::LP_SEARCH) {
197 compute_reduced_cost_averages_ = true;
198 }
199
200 // Register our local rev int repository.
201 integer_trail_->RegisterReversibleClass(&rc_rev_int_repository_);
202}
203
205 const LinearConstraint& ct) {
206 DCHECK(!lp_constraint_is_registered_);
207 constraint_manager_.Add(ct);
208
209 // We still create the mirror variable right away though.
210 //
211 // TODO(user): clean this up? Note that it is important that the variable
212 // in lp_data_ never changes though, so we can restart from the current
213 // lp solution and be incremental (even if the constraints changed).
214 for (const IntegerVariable var : ct.vars) {
215 GetOrCreateMirrorVariable(PositiveVariable(var));
216 }
217}
218
219glop::ColIndex LinearProgrammingConstraint::GetOrCreateMirrorVariable(
220 IntegerVariable positive_variable) {
221 DCHECK(VariableIsPositive(positive_variable));
222 const auto it = mirror_lp_variable_.find(positive_variable);
223 if (it == mirror_lp_variable_.end()) {
224 const glop::ColIndex col(integer_variables_.size());
225 implied_bounds_processor_.AddLpVariable(positive_variable);
226 mirror_lp_variable_[positive_variable] = col;
227 integer_variables_.push_back(positive_variable);
228 lp_solution_.push_back(std::numeric_limits<double>::infinity());
229 lp_reduced_cost_.push_back(0.0);
230 (*dispatcher_)[positive_variable] = this;
231
232 const int index = std::max(positive_variable.value(),
233 NegationOf(positive_variable).value());
234 if (index >= expanded_lp_solution_.size()) {
235 expanded_lp_solution_.resize(index + 1, 0.0);
236 }
237 return col;
238 }
239 return it->second;
240}
241
243 IntegerValue coeff) {
244 CHECK(!lp_constraint_is_registered_);
245 objective_is_defined_ = true;
246 IntegerVariable pos_var = VariableIsPositive(ivar) ? ivar : NegationOf(ivar);
247 if (ivar != pos_var) coeff = -coeff;
248
249 constraint_manager_.SetObjectiveCoefficient(pos_var, coeff);
250 const glop::ColIndex col = GetOrCreateMirrorVariable(pos_var);
251 integer_objective_.push_back({col, coeff});
252 objective_infinity_norm_ =
253 std::max(objective_infinity_norm_, IntTypeAbs(coeff));
254}
255
256// TODO(user): As the search progress, some variables might get fixed. Exploit
257// this to reduce the number of variables in the LP and in the
258// ConstraintManager? We might also detect during the search that two variable
259// are equivalent.
260//
261// TODO(user): On TSP/VRP with a lot of cuts, this can take 20% of the overall
262// running time. We should be able to almost remove most of this from the
263// profile by being more incremental (modulo LP scaling).
264//
265// TODO(user): A longer term idea for LP with a lot of variables is to not
266// add all variables to each LP solve and do some "sifting". That can be useful
267// for TSP for instance where the number of edges is large, but only a small
268// fraction will be used in the optimal solution.
269bool LinearProgrammingConstraint::CreateLpFromConstraintManager() {
270 // Fill integer_lp_.
271 integer_lp_.clear();
272 infinity_norms_.clear();
273 const auto& all_constraints = constraint_manager_.AllConstraints();
274 for (const auto index : constraint_manager_.LpConstraints()) {
275 const LinearConstraint& ct = all_constraints[index].constraint;
276
277 integer_lp_.push_back(LinearConstraintInternal());
278 LinearConstraintInternal& new_ct = integer_lp_.back();
279 new_ct.lb = ct.lb;
280 new_ct.ub = ct.ub;
281 const int size = ct.vars.size();
282 IntegerValue infinity_norm(0);
283 if (ct.lb > ct.ub) {
284 VLOG(1) << "Trivial infeasible bound in an LP constraint";
285 return false;
286 }
287 if (ct.lb > kMinIntegerValue) {
288 infinity_norm = std::max(infinity_norm, IntTypeAbs(ct.lb));
289 }
290 if (ct.ub < kMaxIntegerValue) {
291 infinity_norm = std::max(infinity_norm, IntTypeAbs(ct.ub));
292 }
293 for (int i = 0; i < size; ++i) {
294 // We only use positive variable inside this class.
295 IntegerVariable var = ct.vars[i];
296 IntegerValue coeff = ct.coeffs[i];
297 if (!VariableIsPositive(var)) {
298 var = NegationOf(var);
299 coeff = -coeff;
300 }
301 infinity_norm = std::max(infinity_norm, IntTypeAbs(coeff));
302 new_ct.terms.push_back({GetOrCreateMirrorVariable(var), coeff});
303 }
304 infinity_norms_.push_back(infinity_norm);
305
306 // Important to keep lp_data_ "clean".
307 std::sort(new_ct.terms.begin(), new_ct.terms.end());
308 }
309
310 // Copy the integer_lp_ into lp_data_.
311 lp_data_.Clear();
312 for (int i = 0; i < integer_variables_.size(); ++i) {
313 CHECK_EQ(glop::ColIndex(i), lp_data_.CreateNewVariable());
314 }
315
316 // We remove fixed variables from the objective. This should help the LP
317 // scaling, but also our integer reason computation.
318 int new_size = 0;
319 objective_infinity_norm_ = 0;
320 for (const auto& entry : integer_objective_) {
321 const IntegerVariable var = integer_variables_[entry.first.value()];
322 if (integer_trail_->IsFixedAtLevelZero(var)) {
323 integer_objective_offset_ +=
324 entry.second * integer_trail_->LevelZeroLowerBound(var);
325 continue;
326 }
327 objective_infinity_norm_ =
328 std::max(objective_infinity_norm_, IntTypeAbs(entry.second));
329 integer_objective_[new_size++] = entry;
330 lp_data_.SetObjectiveCoefficient(entry.first, ToDouble(entry.second));
331 }
332 objective_infinity_norm_ =
333 std::max(objective_infinity_norm_, IntTypeAbs(integer_objective_offset_));
334 integer_objective_.resize(new_size);
335 lp_data_.SetObjectiveOffset(ToDouble(integer_objective_offset_));
336
337 for (const LinearConstraintInternal& ct : integer_lp_) {
338 const ConstraintIndex row = lp_data_.CreateNewConstraint();
339 lp_data_.SetConstraintBounds(row, ToDouble(ct.lb), ToDouble(ct.ub));
340 for (const auto& term : ct.terms) {
341 lp_data_.SetCoefficient(row, term.first, ToDouble(term.second));
342 }
343 }
344 lp_data_.NotifyThatColumnsAreClean();
345
346 // We scale the LP using the level zero bounds that we later override
347 // with the current ones.
348 //
349 // TODO(user): As part of the scaling, we may also want to shift the initial
350 // variable bounds so that each variable contain the value zero in their
351 // domain. Maybe just once and for all at the beginning.
352 const int num_vars = integer_variables_.size();
353 for (int i = 0; i < num_vars; i++) {
354 const IntegerVariable cp_var = integer_variables_[i];
355 const double lb = ToDouble(integer_trail_->LevelZeroLowerBound(cp_var));
356 const double ub = ToDouble(integer_trail_->LevelZeroUpperBound(cp_var));
357 lp_data_.SetVariableBounds(glop::ColIndex(i), lb, ub);
358 }
359
360 // TODO(user): As we have an idea of the LP optimal after the first solves,
361 // maybe we can adapt the scaling accordingly.
362 glop::GlopParameters params;
363 params.set_cost_scaling(glop::GlopParameters::MEAN_COST_SCALING);
364 scaler_.Scale(params, &lp_data_);
365 UpdateBoundsOfLpVariables();
366
367 // Set the information for the step to polish the LP basis. All our variables
368 // are integer, but for now, we just try to minimize the fractionality of the
369 // binary variables.
370 if (parameters_.polish_lp_solution()) {
371 simplex_.ClearIntegralityScales();
372 for (int i = 0; i < num_vars; ++i) {
373 const IntegerVariable cp_var = integer_variables_[i];
374 const IntegerValue lb = integer_trail_->LevelZeroLowerBound(cp_var);
375 const IntegerValue ub = integer_trail_->LevelZeroUpperBound(cp_var);
376 if (lb != 0 || ub != 1) continue;
377 simplex_.SetIntegralityScale(
378 glop::ColIndex(i),
379 1.0 / scaler_.VariableScalingFactor(glop::ColIndex(i)));
380 }
381 }
382
383 lp_data_.NotifyThatColumnsAreClean();
384 VLOG(1) << "LP relaxation: " << lp_data_.GetDimensionString() << ". "
385 << constraint_manager_.AllConstraints().size()
386 << " Managed constraints.";
387 return true;
388}
389
390LPSolveInfo LinearProgrammingConstraint::SolveLpForBranching() {
391 LPSolveInfo info;
392 glop::BasisState basis_state = simplex_.GetState();
393
394 const glop::Status status = simplex_.Solve(lp_data_, time_limit_);
395 total_num_simplex_iterations_ += simplex_.GetNumberOfIterations();
396 simplex_.LoadStateForNextSolve(basis_state);
397 if (!status.ok()) {
398 VLOG(1) << "The LP solver encountered an error: " << status.error_message();
399 info.status = glop::ProblemStatus::ABNORMAL;
400 return info;
401 }
402 info.status = simplex_.GetProblemStatus();
403 if (info.status == glop::ProblemStatus::OPTIMAL ||
404 info.status == glop::ProblemStatus::DUAL_FEASIBLE) {
405 // Record the objective bound.
406 info.lp_objective = simplex_.GetObjectiveValue();
407 info.new_obj_bound = IntegerValue(
408 static_cast<int64_t>(std::ceil(info.lp_objective - kCpEpsilon)));
409 }
410 return info;
411}
412
413void LinearProgrammingConstraint::FillReducedCostReasonIn(
414 const glop::DenseRow& reduced_costs,
415 std::vector<IntegerLiteral>* integer_reason) {
416 integer_reason->clear();
417 const int num_vars = integer_variables_.size();
418 for (int i = 0; i < num_vars; i++) {
419 const double rc = reduced_costs[glop::ColIndex(i)];
420 if (rc > kLpEpsilon) {
421 integer_reason->push_back(
422 integer_trail_->LowerBoundAsLiteral(integer_variables_[i]));
423 } else if (rc < -kLpEpsilon) {
424 integer_reason->push_back(
425 integer_trail_->UpperBoundAsLiteral(integer_variables_[i]));
426 }
427 }
428
429 integer_trail_->RemoveLevelZeroBounds(integer_reason);
430}
431
432bool LinearProgrammingConstraint::BranchOnVar(IntegerVariable positive_var) {
433 // From the current LP solution, branch on the given var if fractional.
434 DCHECK(lp_solution_is_set_);
435 const double current_value = GetSolutionValue(positive_var);
436 DCHECK_GT(std::abs(current_value - std::round(current_value)), kCpEpsilon);
437
438 // Used as empty reason in this method.
439 integer_reason_.clear();
440
441 bool deductions_were_made = false;
442
443 UpdateBoundsOfLpVariables();
444
445 const IntegerValue current_obj_lb = integer_trail_->LowerBound(objective_cp_);
446 // This will try to branch in both direction around the LP value of the
447 // given variable and push any deduction done this way.
448
449 const glop::ColIndex lp_var = GetOrCreateMirrorVariable(positive_var);
450 const double current_lb = ToDouble(integer_trail_->LowerBound(positive_var));
451 const double current_ub = ToDouble(integer_trail_->UpperBound(positive_var));
452 const double factor = scaler_.VariableScalingFactor(lp_var);
453 if (current_value < current_lb || current_value > current_ub) {
454 return false;
455 }
456
457 // Form LP1 var <= floor(current_value)
458 const double new_ub = std::floor(current_value);
459 lp_data_.SetVariableBounds(lp_var, current_lb * factor, new_ub * factor);
460
461 LPSolveInfo lower_branch_info = SolveLpForBranching();
462 if (lower_branch_info.status != glop::ProblemStatus::OPTIMAL &&
463 lower_branch_info.status != glop::ProblemStatus::DUAL_FEASIBLE &&
464 lower_branch_info.status != glop::ProblemStatus::DUAL_UNBOUNDED) {
465 return false;
466 }
467
468 if (lower_branch_info.status == glop::ProblemStatus::DUAL_UNBOUNDED) {
469 // Push the other branch.
470 const IntegerLiteral deduction = IntegerLiteral::GreaterOrEqual(
471 positive_var, IntegerValue(std::ceil(current_value)));
472 if (!integer_trail_->Enqueue(deduction, {}, integer_reason_)) {
473 return false;
474 }
475 deductions_were_made = true;
476 } else if (lower_branch_info.new_obj_bound <= current_obj_lb) {
477 return false;
478 }
479
480 // Form LP2 var >= ceil(current_value)
481 const double new_lb = std::ceil(current_value);
482 lp_data_.SetVariableBounds(lp_var, new_lb * factor, current_ub * factor);
483
484 LPSolveInfo upper_branch_info = SolveLpForBranching();
485 if (upper_branch_info.status != glop::ProblemStatus::OPTIMAL &&
486 upper_branch_info.status != glop::ProblemStatus::DUAL_FEASIBLE &&
487 upper_branch_info.status != glop::ProblemStatus::DUAL_UNBOUNDED) {
488 return deductions_were_made;
489 }
490
491 if (upper_branch_info.status == glop::ProblemStatus::DUAL_UNBOUNDED) {
492 // Push the other branch if not infeasible.
493 if (lower_branch_info.status != glop::ProblemStatus::DUAL_UNBOUNDED) {
494 const IntegerLiteral deduction = IntegerLiteral::LowerOrEqual(
495 positive_var, IntegerValue(std::floor(current_value)));
496 if (!integer_trail_->Enqueue(deduction, {}, integer_reason_)) {
497 return deductions_were_made;
498 }
499 deductions_were_made = true;
500 }
501 } else if (upper_branch_info.new_obj_bound <= current_obj_lb) {
502 return deductions_were_made;
503 }
504
505 IntegerValue approximate_obj_lb = kMinIntegerValue;
506
507 if (lower_branch_info.status == glop::ProblemStatus::DUAL_UNBOUNDED &&
508 upper_branch_info.status == glop::ProblemStatus::DUAL_UNBOUNDED) {
509 return integer_trail_->ReportConflict(integer_reason_);
510 } else if (lower_branch_info.status == glop::ProblemStatus::DUAL_UNBOUNDED) {
511 approximate_obj_lb = upper_branch_info.new_obj_bound;
512 } else if (upper_branch_info.status == glop::ProblemStatus::DUAL_UNBOUNDED) {
513 approximate_obj_lb = lower_branch_info.new_obj_bound;
514 } else {
515 approximate_obj_lb = std::min(lower_branch_info.new_obj_bound,
516 upper_branch_info.new_obj_bound);
517 }
518
519 // NOTE: On some problems, the approximate_obj_lb could be inexact which add
520 // some tolerance to CP-SAT where currently there is none.
521 if (approximate_obj_lb <= current_obj_lb) return deductions_were_made;
522
523 // Push the bound to the trail.
524 const IntegerLiteral deduction =
525 IntegerLiteral::GreaterOrEqual(objective_cp_, approximate_obj_lb);
526 if (!integer_trail_->Enqueue(deduction, {}, integer_reason_)) {
527 return deductions_were_made;
528 }
529
530 return true;
531}
532
534 DCHECK(!lp_constraint_is_registered_);
535 lp_constraint_is_registered_ = true;
536 model->GetOrCreate<LinearProgrammingConstraintCollection>()->push_back(this);
537
538 // Note fdid, this is not really needed by should lead to better cache
539 // locality.
540 std::sort(integer_objective_.begin(), integer_objective_.end());
541
542 // Set the LP to its initial content.
543 if (!parameters_.add_lp_constraints_lazily()) {
544 constraint_manager_.AddAllConstraintsToLp();
545 }
546 if (!CreateLpFromConstraintManager()) {
547 model->GetOrCreate<SatSolver>()->NotifyThatModelIsUnsat();
548 return;
549 }
550
551 GenericLiteralWatcher* watcher = model->GetOrCreate<GenericLiteralWatcher>();
552 const int watcher_id = watcher->Register(this);
553 const int num_vars = integer_variables_.size();
554 for (int i = 0; i < num_vars; i++) {
555 watcher->WatchIntegerVariable(integer_variables_[i], watcher_id, i);
556 }
557 if (objective_is_defined_) {
558 watcher->WatchUpperBound(objective_cp_, watcher_id);
559 }
560 watcher->SetPropagatorPriority(watcher_id, 2);
561 watcher->AlwaysCallAtLevelZero(watcher_id);
562
563 // Registering it with the trail make sure this class is always in sync when
564 // it is used in the decision heuristics.
565 integer_trail_->RegisterReversibleClass(this);
566 watcher->RegisterReversibleInt(watcher_id, &rev_optimal_constraints_size_);
567}
568
570 optimal_constraints_.resize(rev_optimal_constraints_size_);
571 if (lp_solution_is_set_ && level < lp_solution_level_) {
572 lp_solution_is_set_ = false;
573 }
574
575 // Special case for level zero, we "reload" any previously known optimal
576 // solution from that level.
577 //
578 // TODO(user): Keep all optimal solution in the current branch?
579 // TODO(user): Still try to add cuts/constraints though!
580 if (level == 0 && !level_zero_lp_solution_.empty()) {
581 lp_solution_is_set_ = true;
582 lp_solution_ = level_zero_lp_solution_;
583 lp_solution_level_ = 0;
584 for (int i = 0; i < lp_solution_.size(); i++) {
585 expanded_lp_solution_[integer_variables_[i]] = lp_solution_[i];
586 expanded_lp_solution_[NegationOf(integer_variables_[i])] =
587 -lp_solution_[i];
588 }
589 }
590}
591
593 for (const IntegerVariable var : generator.vars) {
594 GetOrCreateMirrorVariable(VariableIsPositive(var) ? var : NegationOf(var));
595 }
596 cut_generators_.push_back(std::move(generator));
597}
598
600 const std::vector<int>& watch_indices) {
601 if (!lp_solution_is_set_) return Propagate();
602
603 // At level zero, if there is still a chance to add cuts or lazy constraints,
604 // we re-run the LP.
605 if (trail_->CurrentDecisionLevel() == 0 && !lp_at_level_zero_is_final_) {
606 return Propagate();
607 }
608
609 // Check whether the change breaks the current LP solution. If it does, call
610 // Propagate() on the current LP.
611 for (const int index : watch_indices) {
612 const double lb =
613 ToDouble(integer_trail_->LowerBound(integer_variables_[index]));
614 const double ub =
615 ToDouble(integer_trail_->UpperBound(integer_variables_[index]));
616 const double value = lp_solution_[index];
617 if (value < lb - kCpEpsilon || value > ub + kCpEpsilon) return Propagate();
618 }
619
620 // TODO(user): The saved lp solution is still valid given the current variable
621 // bounds, so the LP optimal didn't change. However we might still want to add
622 // new cuts or new lazy constraints?
623 //
624 // TODO(user): Propagate the last optimal_constraint? Note that we need
625 // to be careful since the reversible int in IntegerSumLE are not registered.
626 // However, because we delete "optimalconstraints" on backtrack, we might not
627 // care.
628 return true;
629}
630
631glop::Fractional LinearProgrammingConstraint::GetVariableValueAtCpScale(
632 glop::ColIndex var) {
633 return scaler_.UnscaleVariableValue(var, simplex_.GetVariableValue(var));
634}
635
637 IntegerVariable variable) const {
638 return lp_solution_[gtl::FindOrDie(mirror_lp_variable_, variable).value()];
639}
640
642 IntegerVariable variable) const {
643 return lp_reduced_cost_[gtl::FindOrDie(mirror_lp_variable_, variable)
644 .value()];
645}
646
647void LinearProgrammingConstraint::UpdateBoundsOfLpVariables() {
648 const int num_vars = integer_variables_.size();
649 for (int i = 0; i < num_vars; i++) {
650 const IntegerVariable cp_var = integer_variables_[i];
651 const double lb = ToDouble(integer_trail_->LowerBound(cp_var));
652 const double ub = ToDouble(integer_trail_->UpperBound(cp_var));
653 const double factor = scaler_.VariableScalingFactor(glop::ColIndex(i));
654 lp_data_.SetVariableBounds(glop::ColIndex(i), lb * factor, ub * factor);
655 }
656}
657
658bool LinearProgrammingConstraint::SolveLp() {
659 if (trail_->CurrentDecisionLevel() == 0) {
660 lp_at_level_zero_is_final_ = false;
661 }
662
663 const auto status = simplex_.Solve(lp_data_, time_limit_);
664 total_num_simplex_iterations_ += simplex_.GetNumberOfIterations();
665 if (!status.ok()) {
666 VLOG(1) << "The LP solver encountered an error: " << status.error_message();
667 simplex_.ClearStateForNextSolve();
668 return false;
669 }
670 average_degeneracy_.AddData(CalculateDegeneracy());
671 if (average_degeneracy_.CurrentAverage() >= 1000.0) {
672 VLOG(2) << "High average degeneracy: "
673 << average_degeneracy_.CurrentAverage();
674 }
675
676 const int status_as_int = static_cast<int>(simplex_.GetProblemStatus());
677 if (status_as_int >= num_solves_by_status_.size()) {
678 num_solves_by_status_.resize(status_as_int + 1);
679 }
680 num_solves_++;
681 num_solves_by_status_[status_as_int]++;
682 VLOG(2) << "lvl:" << trail_->CurrentDecisionLevel() << " "
683 << simplex_.GetProblemStatus()
684 << " iter:" << simplex_.GetNumberOfIterations()
685 << " obj:" << simplex_.GetObjectiveValue();
686
688 lp_solution_is_set_ = true;
689 lp_solution_level_ = trail_->CurrentDecisionLevel();
690 const int num_vars = integer_variables_.size();
691 for (int i = 0; i < num_vars; i++) {
692 const glop::Fractional value =
693 GetVariableValueAtCpScale(glop::ColIndex(i));
694 lp_solution_[i] = value;
695 expanded_lp_solution_[integer_variables_[i]] = value;
696 expanded_lp_solution_[NegationOf(integer_variables_[i])] = -value;
697 }
698
699 if (lp_solution_level_ == 0) {
700 level_zero_lp_solution_ = lp_solution_;
701 }
702 }
703 return true;
704}
705
706bool LinearProgrammingConstraint::AddCutFromConstraints(
707 const std::string& name,
708 const std::vector<std::pair<RowIndex, IntegerValue>>& integer_multipliers) {
709 // This is initialized to a valid linear constraint (by taking linear
710 // combination of the LP rows) and will be transformed into a cut if
711 // possible.
712 //
713 // TODO(user): For CG cuts, Ideally this linear combination should have only
714 // one fractional variable (basis_col). But because of imprecision, we get a
715 // bunch of fractional entry with small coefficient (relative to the one of
716 // basis_col). We try to handle that in IntegerRoundingCut(), but it might be
717 // better to add small multiple of the involved rows to get rid of them.
718 IntegerValue cut_ub;
719 if (!ComputeNewLinearConstraint(integer_multipliers, &tmp_scattered_vector_,
720 &cut_ub)) {
721 VLOG(1) << "Issue, overflow!";
722 return false;
723 }
724
725 // Important: because we use integer_multipliers below, we cannot just
726 // divide by GCD or call PreventOverflow() here.
727 //
728 // TODO(user): the conversion col_index -> IntegerVariable is slow and could
729 // in principle be removed. Easy for cuts, but not so much for
730 // implied_bounds_processor_. Note that in theory this could allow us to
731 // use Literal directly without the need to have an IntegerVariable for them.
732 tmp_scattered_vector_.ConvertToLinearConstraint(integer_variables_, cut_ub,
733 &cut_);
734
735 // Note that the base constraint we use are currently always tight.
736 // It is not a requirement though.
737 if (DEBUG_MODE) {
738 const double norm = ToDouble(ComputeInfinityNorm(cut_));
739 const double activity = ComputeActivity(cut_, expanded_lp_solution_);
740 if (std::abs(activity - ToDouble(cut_.ub)) / norm > 1e-4) {
741 VLOG(1) << "Cut not tight " << activity << " <= " << ToDouble(cut_.ub);
742 return false;
743 }
744 }
745 CHECK(constraint_manager_.DebugCheckConstraint(cut_));
746
747 // We will create "artificial" variables after this index that will be
748 // substitued back into LP variables afterwards. Also not that we only use
749 // positive variable indices for these new variables, so that algorithm that
750 // take their negation will not mess up the indexing.
751 const IntegerVariable first_new_var(expanded_lp_solution_.size());
752 CHECK_EQ(first_new_var.value() % 2, 0);
753
754 LinearConstraint copy_in_debug;
755 if (DEBUG_MODE) {
756 copy_in_debug = cut_;
757 }
758
759 // Unlike for the knapsack cuts, it might not be always beneficial to
760 // process the implied bounds even though it seems to be better in average.
761 //
762 // TODO(user): Perform more experiments, in particular with which bound we use
763 // and if we complement or not before the MIR rounding. Other solvers seems
764 // to try different complementation strategies in a "potprocessing" and we
765 // don't. Try this too.
766 tmp_ib_slack_infos_.clear();
767 implied_bounds_processor_.ProcessUpperBoundedConstraintWithSlackCreation(
768 /*substitute_only_inner_variables=*/false, first_new_var,
769 expanded_lp_solution_, &cut_, &tmp_ib_slack_infos_);
770 DCHECK(implied_bounds_processor_.DebugSlack(first_new_var, copy_in_debug,
771 cut_, tmp_ib_slack_infos_));
772
773 // Fills data for IntegerRoundingCut().
774 //
775 // Note(user): we use the current bound here, so the reasonement will only
776 // produce locally valid cut if we call this at a non-root node. We could
777 // use the level zero bounds if we wanted to generate a globally valid cut
778 // at another level. For now this is only called at level zero anyway.
779 tmp_lp_values_.clear();
780 tmp_var_lbs_.clear();
781 tmp_var_ubs_.clear();
782 for (const IntegerVariable var : cut_.vars) {
783 if (var >= first_new_var) {
785 const auto& info =
786 tmp_ib_slack_infos_[(var.value() - first_new_var.value()) / 2];
787 tmp_lp_values_.push_back(info.lp_value);
788 tmp_var_lbs_.push_back(info.lb);
789 tmp_var_ubs_.push_back(info.ub);
790 } else {
791 tmp_lp_values_.push_back(expanded_lp_solution_[var]);
792 tmp_var_lbs_.push_back(integer_trail_->LevelZeroLowerBound(var));
793 tmp_var_ubs_.push_back(integer_trail_->LevelZeroUpperBound(var));
794 }
795 }
796
797 // Add slack.
798 // definition: integer_lp_[row] + slack_row == bound;
799 const IntegerVariable first_slack(
800 first_new_var + IntegerVariable(2 * tmp_ib_slack_infos_.size()));
801 tmp_slack_rows_.clear();
802 tmp_slack_bounds_.clear();
803 for (const auto& pair : integer_multipliers) {
804 const RowIndex row = pair.first;
805 const IntegerValue coeff = pair.second;
806 const auto status = simplex_.GetConstraintStatus(row);
808
809 tmp_lp_values_.push_back(0.0);
810 cut_.vars.push_back(first_slack +
811 2 * IntegerVariable(tmp_slack_rows_.size()));
812 tmp_slack_rows_.push_back(row);
813 cut_.coeffs.push_back(coeff);
814
815 const IntegerValue diff(
816 CapSub(integer_lp_[row].ub.value(), integer_lp_[row].lb.value()));
817 if (coeff > 0) {
818 tmp_slack_bounds_.push_back(integer_lp_[row].ub);
819 tmp_var_lbs_.push_back(IntegerValue(0));
820 tmp_var_ubs_.push_back(diff);
821 } else {
822 tmp_slack_bounds_.push_back(integer_lp_[row].lb);
823 tmp_var_lbs_.push_back(-diff);
824 tmp_var_ubs_.push_back(IntegerValue(0));
825 }
826 }
827
828 bool at_least_one_added = false;
829
830 // Try cover approach to find cut.
831 {
832 if (cover_cut_helper_.TrySimpleKnapsack(cut_, tmp_lp_values_, tmp_var_lbs_,
833 tmp_var_ubs_)) {
834 at_least_one_added |= PostprocessAndAddCut(
835 absl::StrCat(name, "_K"), cover_cut_helper_.Info(), first_new_var,
836 first_slack, tmp_ib_slack_infos_, cover_cut_helper_.mutable_cut());
837 }
838 }
839
840 // Try integer rounding heuristic to find cut.
841 {
842 RoundingOptions options;
843 options.max_scaling = parameters_.max_integer_rounding_scaling();
844 integer_rounding_cut_helper_.ComputeCut(options, tmp_lp_values_,
845 tmp_var_lbs_, tmp_var_ubs_,
846 &implied_bounds_processor_, &cut_);
847 at_least_one_added |= PostprocessAndAddCut(
848 name,
849 absl::StrCat("num_lifted_booleans=",
850 integer_rounding_cut_helper_.NumLiftedBooleans()),
851 first_new_var, first_slack, tmp_ib_slack_infos_, &cut_);
852 }
853 return at_least_one_added;
854}
855
856bool LinearProgrammingConstraint::PostprocessAndAddCut(
857 const std::string& name, const std::string& info,
858 IntegerVariable first_new_var, IntegerVariable first_slack,
859 const std::vector<ImpliedBoundsProcessor::SlackInfo>& ib_slack_infos,
860 LinearConstraint* cut) {
861 // Compute the activity. Warning: the cut no longer have the same size so we
862 // cannot use tmp_lp_values_. Note that the substitution below shouldn't
863 // change the activity by definition.
864 double activity = 0.0;
865 for (int i = 0; i < cut->vars.size(); ++i) {
866 if (cut->vars[i] < first_new_var) {
867 activity +=
868 ToDouble(cut->coeffs[i]) * expanded_lp_solution_[cut->vars[i]];
869 }
870 }
871 const double kMinViolation = 1e-4;
872 const double violation = activity - ToDouble(cut->ub);
873 if (violation < kMinViolation) {
874 VLOG(3) << "Bad cut " << activity << " <= " << ToDouble(cut->ub);
875 return false;
876 }
877
878 // Substitute any slack left.
879 {
880 int num_slack = 0;
881 tmp_scattered_vector_.ClearAndResize(integer_variables_.size());
882 IntegerValue cut_ub = cut->ub;
883 bool overflow = false;
884 for (int i = 0; i < cut->vars.size(); ++i) {
885 const IntegerVariable var = cut->vars[i];
886
887 // Simple copy for non-slack variables.
888 if (var < first_new_var) {
889 const glop::ColIndex col =
890 gtl::FindOrDie(mirror_lp_variable_, PositiveVariable(var));
891 if (VariableIsPositive(var)) {
892 tmp_scattered_vector_.Add(col, cut->coeffs[i]);
893 } else {
894 tmp_scattered_vector_.Add(col, -cut->coeffs[i]);
895 }
896 continue;
897 }
898
899 // Replace slack from bound substitution.
900 if (var < first_slack) {
901 const IntegerValue multiplier = cut->coeffs[i];
902 const int index = (var.value() - first_new_var.value()) / 2;
903 CHECK_LT(index, ib_slack_infos.size());
904
905 tmp_terms_.clear();
906 for (const std::pair<IntegerVariable, IntegerValue>& term :
907 ib_slack_infos[index].terms) {
908 tmp_terms_.push_back(
909 {gtl::FindOrDie(mirror_lp_variable_,
910 PositiveVariable(term.first)),
911 VariableIsPositive(term.first) ? term.second : -term.second});
912 }
913 if (!tmp_scattered_vector_.AddLinearExpressionMultiple(multiplier,
914 tmp_terms_)) {
915 overflow = true;
916 break;
917 }
918 if (!AddProductTo(multiplier, -ib_slack_infos[index].offset, &cut_ub)) {
919 overflow = true;
920 break;
921 }
922 continue;
923 }
924
925 // Replace slack from LP constraints.
926 ++num_slack;
927 const int slack_index = (var.value() - first_slack.value()) / 2;
928 const glop::RowIndex row = tmp_slack_rows_[slack_index];
929 const IntegerValue multiplier = -cut->coeffs[i];
930 if (!tmp_scattered_vector_.AddLinearExpressionMultiple(
931 multiplier, integer_lp_[row].terms)) {
932 overflow = true;
933 break;
934 }
935
936 // Update rhs.
937 if (!AddProductTo(multiplier, tmp_slack_bounds_[slack_index], &cut_ub)) {
938 overflow = true;
939 break;
940 }
941 }
942
943 if (overflow) {
944 VLOG(1) << "Overflow in slack removal.";
945 return false;
946 }
947
948 VLOG(3) << " num_slack: " << num_slack;
949 tmp_scattered_vector_.ConvertToLinearConstraint(integer_variables_, cut_ub,
950 cut);
951 }
952
953 // Display some stats used for investigation of cut generation.
954 const std::string extra_info =
955 absl::StrCat(info, " num_ib_substitutions=", ib_slack_infos.size());
956
957 const double new_violation =
958 ComputeActivity(*cut, expanded_lp_solution_) - ToDouble(cut_.ub);
959 if (std::abs(violation - new_violation) >= 1e-4) {
960 VLOG(1) << "Violation discrepancy after slack removal. "
961 << " before = " << violation << " after = " << new_violation;
962 }
963
964 DivideByGCD(cut);
965 return constraint_manager_.AddCut(*cut, name, expanded_lp_solution_,
966 extra_info);
967}
968
969// TODO(user): This can be still too slow on some problems like
970// 30_70_45_05_100.mps.gz. Not this actual function, but the set of computation
971// it triggers. We should add heuristics to abort earlier if a cut is not
972// promising. Or only test a few positions and not all rows.
973void LinearProgrammingConstraint::AddCGCuts() {
974 const RowIndex num_rows = lp_data_.num_constraints();
975 for (RowIndex row(0); row < num_rows; ++row) {
976 ColIndex basis_col = simplex_.GetBasis(row);
977 const Fractional lp_value = GetVariableValueAtCpScale(basis_col);
978
979 // Only consider fractional basis element. We ignore element that are close
980 // to an integer to reduce the amount of positions we try.
981 //
982 // TODO(user): We could just look at the diff with std::floor() in the hope
983 // that when we are just under an integer, the exact computation below will
984 // also be just under it.
985 if (std::abs(lp_value - std::round(lp_value)) < 0.01) continue;
986
987 // If this variable is a slack, we ignore it. This is because the
988 // corresponding row is not tight under the given lp values.
989 if (basis_col >= integer_variables_.size()) continue;
990
991 if (time_limit_->LimitReached()) break;
992
993 // TODO(user): Avoid code duplication between the sparse/dense path.
994 double magnitude = 0.0;
995 tmp_lp_multipliers_.clear();
996 const glop::ScatteredRow& lambda = simplex_.GetUnitRowLeftInverse(row);
997 if (lambda.non_zeros.empty()) {
998 for (RowIndex row(0); row < num_rows; ++row) {
999 const double value = lambda.values[glop::RowToColIndex(row)];
1000 if (std::abs(value) < kZeroTolerance) continue;
1001
1002 // There should be no BASIC status, but they could be imprecision
1003 // in the GetUnitRowLeftInverse() code? not sure, so better be safe.
1004 const auto status = simplex_.GetConstraintStatus(row);
1006 VLOG(1) << "BASIC row not expected! " << value;
1007 continue;
1008 }
1009
1010 magnitude = std::max(magnitude, std::abs(value));
1011 tmp_lp_multipliers_.push_back({row, value});
1012 }
1013 } else {
1014 for (const ColIndex col : lambda.non_zeros) {
1015 const RowIndex row = glop::ColToRowIndex(col);
1016 const double value = lambda.values[col];
1017 if (std::abs(value) < kZeroTolerance) continue;
1018
1019 const auto status = simplex_.GetConstraintStatus(row);
1021 VLOG(1) << "BASIC row not expected! " << value;
1022 continue;
1023 }
1024
1025 magnitude = std::max(magnitude, std::abs(value));
1026 tmp_lp_multipliers_.push_back({row, value});
1027 }
1028 }
1029 if (tmp_lp_multipliers_.empty()) continue;
1030
1031 Fractional scaling;
1032 for (int i = 0; i < 2; ++i) {
1033 if (i == 1) {
1034 // Try other sign.
1035 //
1036 // TODO(user): Maybe add an heuristic to know beforehand which sign to
1037 // use?
1038 for (std::pair<RowIndex, double>& p : tmp_lp_multipliers_) {
1039 p.second = -p.second;
1040 }
1041 }
1042
1043 // TODO(user): We use a lower value here otherwise we might run into
1044 // overflow while computing the cut. This should be fixable.
1045 tmp_integer_multipliers_ =
1046 ScaleLpMultiplier(/*take_objective_into_account=*/false,
1047 tmp_lp_multipliers_, &scaling, /*max_pow=*/52);
1048 AddCutFromConstraints("CG", tmp_integer_multipliers_);
1049 }
1050 }
1051}
1052
1053namespace {
1054
1055// For each element of a, adds a random one in b and append the pair to output.
1056void RandomPick(const std::vector<RowIndex>& a, const std::vector<RowIndex>& b,
1057 ModelRandomGenerator* random,
1058 std::vector<std::pair<RowIndex, RowIndex>>* output) {
1059 if (a.empty() || b.empty()) return;
1060 for (const RowIndex row : a) {
1061 const RowIndex other = b[absl::Uniform<int>(*random, 0, b.size())];
1062 if (other != row) {
1063 output->push_back({row, other});
1064 }
1065 }
1066}
1067
1068template <class ListOfTerms>
1069IntegerValue GetCoeff(ColIndex col, const ListOfTerms& terms) {
1070 for (const auto& term : terms) {
1071 if (term.first == col) return term.second;
1072 }
1073 return IntegerValue(0);
1074}
1075
1076} // namespace
1077
1078// Because we know the objective is integer, the constraint objective >= lb can
1079// sometime cut the current lp optimal, and it can make a big difference to add
1080// it. Or at least use it when constructing more advanced cuts. See
1081// 'multisetcover_batch_0_case_115_instance_0_small_subset_elements_3_sumreqs
1082// _1295_candidates_41.fzn'
1083//
1084// TODO(user): It might be better to just integrate this with the MIR code so
1085// that we not only consider MIR1 involving the objective but we also consider
1086// combining it with other constraints.
1087void LinearProgrammingConstraint::AddObjectiveCut() {
1088 if (integer_objective_.size() <= 1) return;
1089
1090 // We only try to add such cut if the LB objective is "far" from the current
1091 // objective lower bound. Note that this is in term of the "internal" integer
1092 // objective.
1093 const double obj_lp_value = simplex_.GetObjectiveValue();
1094 const IntegerValue obj_lower_bound =
1095 integer_trail_->LevelZeroLowerBound(objective_cp_);
1096 if (obj_lp_value + 1.0 >= ToDouble(obj_lower_bound)) return;
1097
1098 tmp_lp_values_.clear();
1099 tmp_var_lbs_.clear();
1100 tmp_var_ubs_.clear();
1101
1102 // We negate everything to have a <= base constraint.
1103 LinearConstraint objective_ct;
1104 objective_ct.lb = kMinIntegerValue;
1105 objective_ct.ub = integer_objective_offset_ -
1106 integer_trail_->LevelZeroLowerBound(objective_cp_);
1107 IntegerValue obj_coeff_magnitude(0);
1108 for (const auto& [col, coeff] : integer_objective_) {
1109 const IntegerVariable var = integer_variables_[col.value()];
1110 objective_ct.vars.push_back(var);
1111 tmp_lp_values_.push_back(expanded_lp_solution_[var]);
1112 tmp_var_lbs_.push_back(integer_trail_->LevelZeroLowerBound(var));
1113 tmp_var_ubs_.push_back(integer_trail_->LevelZeroUpperBound(var));
1114 objective_ct.coeffs.push_back(-coeff);
1115 obj_coeff_magnitude = std::max(obj_coeff_magnitude, IntTypeAbs(coeff));
1116 }
1117
1118 // If the magnitude is small enough, just try to add the full objective. Other
1119 // cuts will be derived in subsequent passes. Otherwise, try normal cut
1120 // heuristic that should result in a cut with reasonable coefficients.
1121 if (obj_coeff_magnitude < 1e9) {
1122 const bool added = constraint_manager_.AddCut(objective_ct, "Objective",
1123 expanded_lp_solution_);
1124 if (added) return;
1125 }
1126
1127 // Try knapsack.
1128 {
1129 cut_ = objective_ct;
1130 if (cover_cut_helper_.TrySimpleKnapsack(cut_, tmp_lp_values_, tmp_var_lbs_,
1131 tmp_var_ubs_)) {
1132 constraint_manager_.AddCut(cut_, "Objective_K", expanded_lp_solution_);
1133 }
1134 }
1135
1136 // Try MIR1.
1137 {
1138 cut_ = objective_ct;
1139 RoundingOptions options;
1140 options.max_scaling = parameters_.max_integer_rounding_scaling();
1141 integer_rounding_cut_helper_.ComputeCut(options, tmp_lp_values_,
1142 tmp_var_lbs_, tmp_var_ubs_,
1143 &implied_bounds_processor_, &cut_);
1144
1145 // Note that the cut will not be added if it is not good enough.
1146 constraint_manager_.AddCut(cut_, "Objective_MIR", expanded_lp_solution_);
1147 }
1148}
1149
1150void LinearProgrammingConstraint::AddMirCuts() {
1151 // Heuristic to generate MIR_n cuts by combining a small number of rows. This
1152 // works greedily and follow more or less the MIR cut description in the
1153 // literature. We have a current cut, and we add one more row to it while
1154 // eliminating a variable of the current cut whose LP value is far from its
1155 // bound.
1156 //
1157 // A notable difference is that we randomize the variable we eliminate and
1158 // the row we use to do so. We still have weights to indicate our preferred
1159 // choices. This allows to generate different cuts when called again and
1160 // again.
1161 //
1162 // TODO(user): We could combine n rows to make sure we eliminate n variables
1163 // far away from their bounds by solving exactly in integer small linear
1164 // system.
1166 integer_variables_.size(), IntegerValue(0));
1167 SparseBitset<ColIndex> non_zeros_(ColIndex(integer_variables_.size()));
1168
1169 // We compute all the rows that are tight, these will be used as the base row
1170 // for the MIR_n procedure below.
1171 const RowIndex num_rows = lp_data_.num_constraints();
1172 std::vector<std::pair<RowIndex, IntegerValue>> base_rows;
1173 absl::StrongVector<RowIndex, double> row_weights(num_rows.value(), 0.0);
1174 for (RowIndex row(0); row < num_rows; ++row) {
1175 const auto status = simplex_.GetConstraintStatus(row);
1176 if (status == glop::ConstraintStatus::BASIC) continue;
1177 if (status == glop::ConstraintStatus::FREE) continue;
1178
1181 base_rows.push_back({row, IntegerValue(1)});
1182 }
1185 base_rows.push_back({row, IntegerValue(-1)});
1186 }
1187
1188 // For now, we use the dual values for the row "weights".
1189 //
1190 // Note that we use the dual at LP scale so that it make more sense when we
1191 // compare different rows since the LP has been scaled.
1192 //
1193 // TODO(user): In Kati Wolter PhD "Implementation of Cutting Plane
1194 // Separators for Mixed Integer Programs" which describe SCIP's MIR cuts
1195 // implementation (or at least an early version of it), a more complex score
1196 // is used.
1197 //
1198 // Note(user): Because we only consider tight rows under the current lp
1199 // solution (i.e. non-basic rows), most should have a non-zero dual values.
1200 // But there is some degenerate problem where these rows have a really low
1201 // weight (or even zero), and having only weight of exactly zero in
1202 // std::discrete_distribution will result in a crash.
1203 row_weights[row] = std::max(1e-8, std::abs(simplex_.GetDualValue(row)));
1204 }
1205
1206 std::vector<double> weights;
1208 std::vector<std::pair<RowIndex, IntegerValue>> integer_multipliers;
1209 for (const std::pair<RowIndex, IntegerValue>& entry : base_rows) {
1210 if (time_limit_->LimitReached()) break;
1211
1212 // First try to generate a cut directly from this base row (MIR1).
1213 //
1214 // Note(user): We abort on success like it seems to be done in the
1215 // literature. Note that we don't succeed that often in generating an
1216 // efficient cut, so I am not sure aborting will make a big difference
1217 // speedwise. We might generate similar cuts though, but hopefully the cut
1218 // management can deal with that.
1219 integer_multipliers = {entry};
1220 if (AddCutFromConstraints("MIR_1", integer_multipliers)) {
1221 continue;
1222 }
1223
1224 // Cleanup.
1225 for (const ColIndex col : non_zeros_.PositionsSetAtLeastOnce()) {
1226 dense_cut[col] = IntegerValue(0);
1227 }
1228 non_zeros_.SparseClearAll();
1229
1230 // Copy cut.
1231 const IntegerValue multiplier = entry.second;
1232 for (const std::pair<ColIndex, IntegerValue>& term :
1233 integer_lp_[entry.first].terms) {
1234 const ColIndex col = term.first;
1235 const IntegerValue coeff = term.second;
1236 non_zeros_.Set(col);
1237 dense_cut[col] += coeff * multiplier;
1238 }
1239
1240 used_rows.assign(num_rows.value(), false);
1241 used_rows[entry.first] = true;
1242
1243 // We will aggregate at most kMaxAggregation more rows.
1244 //
1245 // TODO(user): optim + tune.
1246 const int kMaxAggregation = 5;
1247 for (int i = 0; i < kMaxAggregation; ++i) {
1248 // First pick a variable to eliminate. We currently pick a random one with
1249 // a weight that depend on how far it is from its closest bound.
1250 IntegerValue max_magnitude(0);
1251 weights.clear();
1252 std::vector<ColIndex> col_candidates;
1253 for (const ColIndex col : non_zeros_.PositionsSetAtLeastOnce()) {
1254 if (dense_cut[col] == 0) continue;
1255
1256 max_magnitude = std::max(max_magnitude, IntTypeAbs(dense_cut[col]));
1257 const int col_degree =
1258 lp_data_.GetSparseColumn(col).num_entries().value();
1259 if (col_degree <= 1) continue;
1261 continue;
1262 }
1263
1264 const IntegerVariable var = integer_variables_[col.value()];
1265 const double lp_value = expanded_lp_solution_[var];
1266 const double lb = ToDouble(integer_trail_->LevelZeroLowerBound(var));
1267 const double ub = ToDouble(integer_trail_->LevelZeroUpperBound(var));
1268 const double bound_distance = std::min(ub - lp_value, lp_value - lb);
1269 if (bound_distance > 1e-2) {
1270 weights.push_back(bound_distance);
1271 col_candidates.push_back(col);
1272 }
1273 }
1274 if (col_candidates.empty()) break;
1275
1276 const ColIndex var_to_eliminate =
1277 col_candidates[std::discrete_distribution<>(weights.begin(),
1278 weights.end())(*random_)];
1279
1280 // What rows can we add to eliminate var_to_eliminate?
1281 std::vector<RowIndex> possible_rows;
1282 weights.clear();
1283 for (const auto entry : lp_data_.GetSparseColumn(var_to_eliminate)) {
1284 const RowIndex row = entry.row();
1285 const auto status = simplex_.GetConstraintStatus(row);
1286 if (status == glop::ConstraintStatus::BASIC) continue;
1287 if (status == glop::ConstraintStatus::FREE) continue;
1288
1289 // We disallow all the rows that contain a variable that we already
1290 // eliminated (or are about to). This mean that we choose rows that
1291 // form a "triangular" matrix on the position we choose to eliminate.
1292 if (used_rows[row]) continue;
1293 used_rows[row] = true;
1294
1295 // TODO(user): Instead of using FIXED_VALUE consider also both direction
1296 // when we almost have an equality? that is if the LP constraints bounds
1297 // are close from each others (<1e-6 ?). Initial experiments shows it
1298 // doesn't change much, so I kept this version for now. Note that it
1299 // might just be better to use the side that constrain the current lp
1300 // optimal solution (that we get from the status).
1301 bool add_row = false;
1304 if (entry.coefficient() > 0.0) {
1305 if (dense_cut[var_to_eliminate] < 0) add_row = true;
1306 } else {
1307 if (dense_cut[var_to_eliminate] > 0) add_row = true;
1308 }
1309 }
1312 if (entry.coefficient() > 0.0) {
1313 if (dense_cut[var_to_eliminate] > 0) add_row = true;
1314 } else {
1315 if (dense_cut[var_to_eliminate] < 0) add_row = true;
1316 }
1317 }
1318 if (add_row) {
1319 possible_rows.push_back(row);
1320 weights.push_back(row_weights[row]);
1321 }
1322 }
1323 if (possible_rows.empty()) break;
1324
1325 const RowIndex row_to_combine =
1326 possible_rows[std::discrete_distribution<>(weights.begin(),
1327 weights.end())(*random_)];
1328 const IntegerValue to_combine_coeff =
1329 GetCoeff(var_to_eliminate, integer_lp_[row_to_combine].terms);
1330 CHECK_NE(to_combine_coeff, 0);
1331
1332 IntegerValue mult1 = -to_combine_coeff;
1333 IntegerValue mult2 = dense_cut[var_to_eliminate];
1334 CHECK_NE(mult2, 0);
1335 if (mult1 < 0) {
1336 mult1 = -mult1;
1337 mult2 = -mult2;
1338 }
1339
1340 const IntegerValue gcd = IntegerValue(
1341 MathUtil::GCD64(std::abs(mult1.value()), std::abs(mult2.value())));
1342 CHECK_NE(gcd, 0);
1343 mult1 /= gcd;
1344 mult2 /= gcd;
1345
1346 // Overflow detection.
1347 //
1348 // TODO(user): do that in the possible_rows selection? only problem is
1349 // that we do not have the integer coefficient there...
1350 for (std::pair<RowIndex, IntegerValue>& entry : integer_multipliers) {
1351 max_magnitude = std::max(max_magnitude, IntTypeAbs(entry.second));
1352 }
1353 if (CapAdd(CapProd(max_magnitude.value(), std::abs(mult1.value())),
1354 CapProd(infinity_norms_[row_to_combine].value(),
1355 std::abs(mult2.value()))) ==
1357 break;
1358 }
1359
1360 for (std::pair<RowIndex, IntegerValue>& entry : integer_multipliers) {
1361 entry.second *= mult1;
1362 }
1363 integer_multipliers.push_back({row_to_combine, mult2});
1364
1365 // TODO(user): Not supper efficient to recombine the rows.
1366 if (AddCutFromConstraints(absl::StrCat("MIR_", i + 2),
1367 integer_multipliers)) {
1368 break;
1369 }
1370
1371 // Minor optim: the computation below is only needed if we do one more
1372 // iteration.
1373 if (i + 1 == kMaxAggregation) break;
1374
1375 for (ColIndex col : non_zeros_.PositionsSetAtLeastOnce()) {
1376 dense_cut[col] *= mult1;
1377 }
1378 for (const std::pair<ColIndex, IntegerValue>& term :
1379 integer_lp_[row_to_combine].terms) {
1380 const ColIndex col = term.first;
1381 const IntegerValue coeff = term.second;
1382 non_zeros_.Set(col);
1383 dense_cut[col] += coeff * mult2;
1384 }
1385 }
1386 }
1387}
1388
1389void LinearProgrammingConstraint::AddZeroHalfCuts() {
1390 if (time_limit_->LimitReached()) return;
1391
1392 tmp_lp_values_.clear();
1393 tmp_var_lbs_.clear();
1394 tmp_var_ubs_.clear();
1395 for (const IntegerVariable var : integer_variables_) {
1396 tmp_lp_values_.push_back(expanded_lp_solution_[var]);
1397 tmp_var_lbs_.push_back(integer_trail_->LevelZeroLowerBound(var));
1398 tmp_var_ubs_.push_back(integer_trail_->LevelZeroUpperBound(var));
1399 }
1400
1401 // TODO(user): See if it make sense to try to use implied bounds there.
1402 zero_half_cut_helper_.ProcessVariables(tmp_lp_values_, tmp_var_lbs_,
1403 tmp_var_ubs_);
1404 for (glop::RowIndex row(0); row < integer_lp_.size(); ++row) {
1405 // Even though we could use non-tight row, for now we prefer to use tight
1406 // ones.
1407 const auto status = simplex_.GetConstraintStatus(row);
1408 if (status == glop::ConstraintStatus::BASIC) continue;
1409 if (status == glop::ConstraintStatus::FREE) continue;
1410
1411 zero_half_cut_helper_.AddOneConstraint(
1412 row, integer_lp_[row].terms, integer_lp_[row].lb, integer_lp_[row].ub);
1413 }
1414 for (const std::vector<std::pair<RowIndex, IntegerValue>>& multipliers :
1415 zero_half_cut_helper_.InterestingCandidates(random_)) {
1416 if (time_limit_->LimitReached()) break;
1417
1418 // TODO(user): Make sure that if the resulting linear coefficients are not
1419 // too high, we do try a "divisor" of two and thus try a true zero-half cut
1420 // instead of just using our best MIR heuristic (which might still be better
1421 // though).
1422 AddCutFromConstraints("ZERO_HALF", multipliers);
1423 }
1424}
1425
1426void LinearProgrammingConstraint::UpdateSimplexIterationLimit(
1427 const int64_t min_iter, const int64_t max_iter) {
1428 if (parameters_.linearization_level() < 2) return;
1429 const int64_t num_degenerate_columns = CalculateDegeneracy();
1430 const int64_t num_cols = simplex_.GetProblemNumCols().value();
1431 if (num_cols <= 0) {
1432 return;
1433 }
1434 CHECK_GT(num_cols, 0);
1435 const int64_t decrease_factor = (10 * num_degenerate_columns) / num_cols;
1437 // We reached here probably because we predicted wrong. We use this as a
1438 // signal to increase the iterations or punish less for degeneracy compare
1439 // to the other part.
1440 if (is_degenerate_) {
1441 next_simplex_iter_ /= std::max(int64_t{1}, decrease_factor);
1442 } else {
1443 next_simplex_iter_ *= 2;
1444 }
1445 } else if (simplex_.GetProblemStatus() == glop::ProblemStatus::OPTIMAL) {
1446 if (is_degenerate_) {
1447 next_simplex_iter_ /= std::max(int64_t{1}, 2 * decrease_factor);
1448 } else {
1449 // This is the most common case. We use the size of the problem to
1450 // determine the limit and ignore the previous limit.
1451 next_simplex_iter_ = num_cols / 40;
1452 }
1453 }
1454 next_simplex_iter_ =
1455 std::max(min_iter, std::min(max_iter, next_simplex_iter_));
1456}
1457
1459 UpdateBoundsOfLpVariables();
1460
1461 // TODO(user): It seems the time we loose by not stopping early might be worth
1462 // it because we end up with a better explanation at optimality.
1463 glop::GlopParameters parameters = simplex_.GetParameters();
1464 if (/* DISABLES CODE */ (false) && objective_is_defined_) {
1465 // We put a limit on the dual objective since there is no point increasing
1466 // it past our current objective upper-bound (we will already fail as soon
1467 // as we pass it). Note that this limit is properly transformed using the
1468 // objective scaling factor and offset stored in lp_data_.
1469 //
1470 // Note that we use a bigger epsilon here to be sure that if we abort
1471 // because of this, we will report a conflict.
1472 parameters.set_objective_upper_limit(
1473 static_cast<double>(integer_trail_->UpperBound(objective_cp_).value() +
1474 100.0 * kCpEpsilon));
1475 }
1476
1477 // Put an iteration limit on the work we do in the simplex for this call. Note
1478 // that because we are "incremental", even if we don't solve it this time we
1479 // will make progress towards a solve in the lower node of the tree search.
1480 if (trail_->CurrentDecisionLevel() == 0) {
1481 // TODO(user): Dynamically change the iteration limit for root node as
1482 // well.
1483 parameters.set_max_number_of_iterations(2000);
1484 } else {
1485 parameters.set_max_number_of_iterations(next_simplex_iter_);
1486 }
1487 if (parameters_.use_exact_lp_reason()) {
1488 parameters.set_change_status_to_imprecise(false);
1489 parameters.set_primal_feasibility_tolerance(1e-7);
1490 parameters.set_dual_feasibility_tolerance(1e-7);
1491 }
1492
1493 simplex_.SetParameters(parameters);
1495 if (!SolveLp()) return true;
1496
1497 // Add new constraints to the LP and resolve?
1498 const int max_cuts_rounds =
1499 parameters_.cut_level() <= 0
1500 ? 0
1501 : (trail_->CurrentDecisionLevel() == 0
1502 ? parameters_.max_cut_rounds_at_level_zero()
1503 : 1);
1504 int cuts_round = 0;
1505 while (simplex_.GetProblemStatus() == glop::ProblemStatus::OPTIMAL &&
1506 cuts_round < max_cuts_rounds) {
1507 // We wait for the first batch of problem constraints to be added before we
1508 // begin to generate cuts. Note that we rely on num_solves_ since on some
1509 // problems there is no other constriants than the cuts.
1510 cuts_round++;
1511 if (num_solves_ > 1) {
1512 // This must be called first.
1513 implied_bounds_processor_.RecomputeCacheAndSeparateSomeImpliedBoundCuts(
1514 expanded_lp_solution_);
1515
1516 // The "generic" cuts are currently part of this class as they are using
1517 // data from the current LP.
1518 //
1519 // TODO(user): Refactor so that they are just normal cut generators?
1520 if (trail_->CurrentDecisionLevel() == 0) {
1521 if (parameters_.add_objective_cut()) AddObjectiveCut();
1522 if (parameters_.add_mir_cuts()) AddMirCuts();
1523 if (parameters_.add_cg_cuts()) AddCGCuts();
1524 if (parameters_.add_zero_half_cuts()) AddZeroHalfCuts();
1525 }
1526
1527 // Try to add cuts.
1528 if (!cut_generators_.empty() &&
1529 (trail_->CurrentDecisionLevel() == 0 ||
1530 !parameters_.only_add_cuts_at_level_zero())) {
1531 for (const CutGenerator& generator : cut_generators_) {
1532 if (!generator.generate_cuts(expanded_lp_solution_,
1533 &constraint_manager_)) {
1534 return false;
1535 }
1536 }
1537 }
1538
1539 implied_bounds_processor_.IbCutPool().TransferToManager(
1540 expanded_lp_solution_, &constraint_manager_);
1541 }
1542
1543 glop::BasisState state = simplex_.GetState();
1544 if (constraint_manager_.ChangeLp(expanded_lp_solution_, &state)) {
1545 simplex_.LoadStateForNextSolve(state);
1546 if (!CreateLpFromConstraintManager()) {
1547 return integer_trail_->ReportConflict({});
1548 }
1549 const double old_obj = simplex_.GetObjectiveValue();
1550 if (!SolveLp()) return true;
1552 VLOG(1) << "Relaxation improvement " << old_obj << " -> "
1553 << simplex_.GetObjectiveValue()
1554 << " diff: " << simplex_.GetObjectiveValue() - old_obj
1555 << " level: " << trail_->CurrentDecisionLevel();
1556 }
1557 } else {
1558 if (trail_->CurrentDecisionLevel() == 0) {
1559 lp_at_level_zero_is_final_ = true;
1560 }
1561 break;
1562 }
1563 }
1564
1565 // A dual-unbounded problem is infeasible. We use the dual ray reason.
1567 if (parameters_.use_exact_lp_reason()) {
1568 if (!FillExactDualRayReason()) return true;
1569 } else {
1570 FillReducedCostReasonIn(simplex_.GetDualRayRowCombination(),
1571 &integer_reason_);
1572 }
1573 return integer_trail_->ReportConflict(integer_reason_);
1574 }
1575
1576 // TODO(user): Update limits for DUAL_UNBOUNDED status as well.
1577 UpdateSimplexIterationLimit(/*min_iter=*/10, /*max_iter=*/1000);
1578
1579 // Optimality deductions if problem has an objective.
1580 if (objective_is_defined_ &&
1583 // TODO(user): Maybe do a bit less computation when we cannot propagate
1584 // anything.
1585 if (parameters_.use_exact_lp_reason()) {
1586 if (!ExactLpReasonning()) return false;
1587
1588 // Display when the inexact bound would have propagated more.
1589 if (VLOG_IS_ON(2)) {
1590 const double relaxed_optimal_objective = simplex_.GetObjectiveValue();
1591 const IntegerValue approximate_new_lb(static_cast<int64_t>(
1592 std::ceil(relaxed_optimal_objective - kCpEpsilon)));
1593 const IntegerValue propagated_lb =
1594 integer_trail_->LowerBound(objective_cp_);
1595 if (approximate_new_lb > propagated_lb) {
1596 VLOG(2) << "LP objective [ " << ToDouble(propagated_lb) << ", "
1597 << ToDouble(integer_trail_->UpperBound(objective_cp_))
1598 << " ] approx_lb += "
1599 << ToDouble(approximate_new_lb - propagated_lb) << " gap: "
1600 << integer_trail_->UpperBound(objective_cp_) - propagated_lb;
1601 }
1602 }
1603 } else {
1604 // Try to filter optimal objective value. Note that GetObjectiveValue()
1605 // already take care of the scaling so that it returns an objective in the
1606 // CP world.
1607 FillReducedCostReasonIn(simplex_.GetReducedCosts(), &integer_reason_);
1608 const double objective_cp_ub =
1609 ToDouble(integer_trail_->UpperBound(objective_cp_));
1610 const double relaxed_optimal_objective = simplex_.GetObjectiveValue();
1611 ReducedCostStrengtheningDeductions(objective_cp_ub -
1612 relaxed_optimal_objective);
1613 if (!deductions_.empty()) {
1614 deductions_reason_ = integer_reason_;
1615 deductions_reason_.push_back(
1616 integer_trail_->UpperBoundAsLiteral(objective_cp_));
1617 }
1618
1619 // Push new objective lb.
1620 const IntegerValue approximate_new_lb(static_cast<int64_t>(
1621 std::ceil(relaxed_optimal_objective - kCpEpsilon)));
1622 if (approximate_new_lb > integer_trail_->LowerBound(objective_cp_)) {
1623 const IntegerLiteral deduction =
1624 IntegerLiteral::GreaterOrEqual(objective_cp_, approximate_new_lb);
1625 if (!integer_trail_->Enqueue(deduction, {}, integer_reason_)) {
1626 return false;
1627 }
1628 }
1629
1630 // Push reduced cost strengthening bounds.
1631 if (!deductions_.empty()) {
1632 const int trail_index_with_same_reason = integer_trail_->Index();
1633 for (const IntegerLiteral deduction : deductions_) {
1634 if (!integer_trail_->Enqueue(deduction, {}, deductions_reason_,
1635 trail_index_with_same_reason)) {
1636 return false;
1637 }
1638 }
1639 }
1640 }
1641 }
1642
1643 // Copy more info about the current solution.
1645 CHECK(lp_solution_is_set_);
1646
1647 lp_objective_ = simplex_.GetObjectiveValue();
1648 lp_solution_is_integer_ = true;
1649 const int num_vars = integer_variables_.size();
1650 for (int i = 0; i < num_vars; i++) {
1651 lp_reduced_cost_[i] = scaler_.UnscaleReducedCost(
1652 glop::ColIndex(i), simplex_.GetReducedCost(glop::ColIndex(i)));
1653 if (std::abs(lp_solution_[i] - std::round(lp_solution_[i])) >
1654 kCpEpsilon) {
1655 lp_solution_is_integer_ = false;
1656 }
1657 }
1658
1659 if (compute_reduced_cost_averages_) {
1660 UpdateAverageReducedCosts();
1661 }
1662 }
1663
1664 if (parameters_.use_branching_in_lp() && objective_is_defined_ &&
1665 trail_->CurrentDecisionLevel() == 0 && !is_degenerate_ &&
1666 lp_solution_is_set_ && !lp_solution_is_integer_ &&
1667 parameters_.linearization_level() >= 2 &&
1668 compute_reduced_cost_averages_ &&
1670 count_since_last_branching_++;
1671 if (count_since_last_branching_ < branching_frequency_) {
1672 return true;
1673 }
1674 count_since_last_branching_ = 0;
1675 bool branching_successful = false;
1676
1677 // Strong branching on top max_num_branches variable.
1678 const int max_num_branches = 3;
1679 const int num_vars = integer_variables_.size();
1680 std::vector<std::pair<double, IntegerVariable>> branching_vars;
1681 for (int i = 0; i < num_vars; ++i) {
1682 const IntegerVariable var = integer_variables_[i];
1683 const IntegerVariable positive_var = PositiveVariable(var);
1684
1685 // Skip non fractional variables.
1686 const double current_value = GetSolutionValue(positive_var);
1687 if (std::abs(current_value - std::round(current_value)) <= kCpEpsilon) {
1688 continue;
1689 }
1690
1691 // Skip ignored variables.
1692 if (integer_trail_->IsCurrentlyIgnored(var)) continue;
1693
1694 // We can use any metric to select a variable to branch on. Reduced cost
1695 // average is one of the most promissing metric. It captures the history
1696 // of the objective bound improvement in LP due to changes in the given
1697 // variable bounds.
1698 //
1699 // NOTE: We also experimented using PseudoCosts and most recent reduced
1700 // cost as metrics but it doesn't give better results on benchmarks.
1701 const double cost_i = rc_scores_[i];
1702 std::pair<double, IntegerVariable> branching_var =
1703 std::make_pair(-cost_i, positive_var);
1704 auto iterator = std::lower_bound(branching_vars.begin(),
1705 branching_vars.end(), branching_var);
1706
1707 branching_vars.insert(iterator, branching_var);
1708 if (branching_vars.size() > max_num_branches) {
1709 branching_vars.resize(max_num_branches);
1710 }
1711 }
1712
1713 for (const std::pair<double, IntegerVariable>& branching_var :
1714 branching_vars) {
1715 const IntegerVariable positive_var = branching_var.second;
1716 VLOG(2) << "Branching on: " << positive_var;
1717 if (BranchOnVar(positive_var)) {
1718 VLOG(2) << "Branching successful.";
1719 branching_successful = true;
1720 } else {
1721 break;
1722 }
1723 }
1724 if (!branching_successful) {
1725 branching_frequency_ *= 2;
1726 }
1727 }
1728 return true;
1729}
1730
1731// Returns kMinIntegerValue in case of overflow.
1732//
1733// TODO(user): Because of PreventOverflow(), this should actually never happen.
1734IntegerValue LinearProgrammingConstraint::GetImpliedLowerBound(
1735 const LinearConstraint& terms) const {
1736 IntegerValue lower_bound(0);
1737 const int size = terms.vars.size();
1738 for (int i = 0; i < size; ++i) {
1739 const IntegerVariable var = terms.vars[i];
1740 const IntegerValue coeff = terms.coeffs[i];
1741 CHECK_NE(coeff, 0);
1742 const IntegerValue bound = coeff > 0 ? integer_trail_->LowerBound(var)
1743 : integer_trail_->UpperBound(var);
1745 }
1746 return lower_bound;
1747}
1748
1749bool LinearProgrammingConstraint::PossibleOverflow(
1750 const LinearConstraint& constraint) {
1751 IntegerValue lower_bound(0);
1752 const int size = constraint.vars.size();
1753 for (int i = 0; i < size; ++i) {
1754 const IntegerVariable var = constraint.vars[i];
1755 const IntegerValue coeff = constraint.coeffs[i];
1756 CHECK_NE(coeff, 0);
1757 const IntegerValue bound = coeff > 0
1758 ? integer_trail_->LevelZeroLowerBound(var)
1759 : integer_trail_->LevelZeroUpperBound(var);
1761 return true;
1762 }
1763 }
1764 const int64_t slack = CapAdd(lower_bound.value(), -constraint.ub.value());
1765 if (slack == std::numeric_limits<int64_t>::min() ||
1767 return true;
1768 }
1769 return false;
1770}
1771
1772namespace {
1773
1774absl::int128 FloorRatio128(absl::int128 x, IntegerValue positive_div) {
1775 absl::int128 div128(positive_div.value());
1776 absl::int128 result = x / div128;
1777 if (result * div128 > x) return result - 1;
1778 return result;
1779}
1780
1781} // namespace
1782
1783void LinearProgrammingConstraint::PreventOverflow(LinearConstraint* constraint,
1784 int max_pow) {
1785 // First, make all coefficient positive.
1786 MakeAllCoefficientsPositive(constraint);
1787
1788 // Compute the min/max possible partial sum. Note that we need to use the
1789 // level zero bounds here since we might use this cut after backtrack.
1790 double sum_min = std::min(0.0, ToDouble(-constraint->ub));
1791 double sum_max = std::max(0.0, ToDouble(-constraint->ub));
1792 const int size = constraint->vars.size();
1793 for (int i = 0; i < size; ++i) {
1794 const IntegerVariable var = constraint->vars[i];
1795 const double coeff = ToDouble(constraint->coeffs[i]);
1796 sum_min +=
1797 coeff *
1798 std::min(0.0, ToDouble(integer_trail_->LevelZeroLowerBound(var)));
1799 sum_max +=
1800 coeff *
1801 std::max(0.0, ToDouble(integer_trail_->LevelZeroUpperBound(var)));
1802 }
1803 const double max_value = std::max({sum_max, -sum_min, sum_max - sum_min});
1804
1805 const IntegerValue divisor(std::ceil(std::ldexp(max_value, -max_pow)));
1806 if (divisor <= 1) return;
1807
1808 // To be correct, we need to shift all variable so that they are positive.
1809 //
1810 // Important: One might be tempted to think that using the current variable
1811 // bounds is okay here since we only use this to derive cut/constraint that
1812 // only needs to be locally valid. However, in some corner cases (like when
1813 // one term become zero), we might loose the fact that we used one of the
1814 // variable bound to derive the new constraint, so we will miss it in the
1815 // explanation !!
1816 //
1817 // TODO(user): This code is tricky and similar to the one to generate cuts.
1818 // Test and may reduce the duplication? note however that here we use int128
1819 // to deal with potential overflow.
1820 int new_size = 0;
1821 absl::int128 adjust = 0;
1822 for (int i = 0; i < size; ++i) {
1823 const IntegerValue old_coeff = constraint->coeffs[i];
1824 const IntegerValue new_coeff = FloorRatio(old_coeff, divisor);
1825
1826 // Compute the rhs adjustement.
1827 const absl::int128 remainder =
1828 absl::int128(old_coeff.value()) -
1829 absl::int128(new_coeff.value()) * absl::int128(divisor.value());
1830 adjust +=
1831 remainder *
1832 absl::int128(
1833 integer_trail_->LevelZeroLowerBound(constraint->vars[i]).value());
1834
1835 if (new_coeff == 0) continue;
1836 constraint->vars[new_size] = constraint->vars[i];
1837 constraint->coeffs[new_size] = new_coeff;
1838 ++new_size;
1839 }
1840 constraint->vars.resize(new_size);
1841 constraint->coeffs.resize(new_size);
1842
1843 constraint->ub = IntegerValue(static_cast<int64_t>(
1844 FloorRatio128(absl::int128(constraint->ub.value()) - adjust, divisor)));
1845}
1846
1847// TODO(user): combine this with RelaxLinearReason() to avoid the extra
1848// magnitude vector and the weird precondition of RelaxLinearReason().
1849void LinearProgrammingConstraint::SetImpliedLowerBoundReason(
1850 const LinearConstraint& terms, IntegerValue slack) {
1851 integer_reason_.clear();
1852 std::vector<IntegerValue> magnitudes;
1853 const int size = terms.vars.size();
1854 for (int i = 0; i < size; ++i) {
1855 const IntegerVariable var = terms.vars[i];
1856 const IntegerValue coeff = terms.coeffs[i];
1857 CHECK_NE(coeff, 0);
1858 if (coeff > 0) {
1859 magnitudes.push_back(coeff);
1860 integer_reason_.push_back(integer_trail_->LowerBoundAsLiteral(var));
1861 } else {
1862 magnitudes.push_back(-coeff);
1863 integer_reason_.push_back(integer_trail_->UpperBoundAsLiteral(var));
1864 }
1865 }
1866 CHECK_GE(slack, 0);
1867 if (slack > 0) {
1868 integer_trail_->RelaxLinearReason(slack, magnitudes, &integer_reason_);
1869 }
1870 integer_trail_->RemoveLevelZeroBounds(&integer_reason_);
1871}
1872
1873std::vector<std::pair<RowIndex, IntegerValue>>
1874LinearProgrammingConstraint::ScaleLpMultiplier(
1875 bool take_objective_into_account,
1876 const std::vector<std::pair<RowIndex, double>>& lp_multipliers,
1877 Fractional* scaling, int max_pow) const {
1878 double max_sum = 0.0;
1879 tmp_cp_multipliers_.clear();
1880 for (const std::pair<RowIndex, double>& p : lp_multipliers) {
1881 const RowIndex row = p.first;
1882 const Fractional lp_multi = p.second;
1883
1884 // We ignore small values since these are likely errors and will not
1885 // contribute much to the new lp constraint anyway.
1886 if (std::abs(lp_multi) < kZeroTolerance) continue;
1887
1888 // Remove trivial bad cases.
1889 //
1890 // TODO(user): It might be better (when possible) to use the OPTIMAL row
1891 // status since in most situation we do want the constraint we add to be
1892 // tight under the current LP solution. Only for infeasible problem we might
1893 // not have access to the status.
1894 if (lp_multi > 0.0 && integer_lp_[row].ub >= kMaxIntegerValue) {
1895 continue;
1896 }
1897 if (lp_multi < 0.0 && integer_lp_[row].lb <= kMinIntegerValue) {
1898 continue;
1899 }
1900
1901 const Fractional cp_multi = scaler_.UnscaleDualValue(row, lp_multi);
1902 tmp_cp_multipliers_.push_back({row, cp_multi});
1903 max_sum += ToDouble(infinity_norms_[row]) * std::abs(cp_multi);
1904 }
1905
1906 // This behave exactly like if we had another "objective" constraint with
1907 // an lp_multi of 1.0 and a cp_multi of 1.0.
1908 if (take_objective_into_account) {
1909 max_sum += ToDouble(objective_infinity_norm_);
1910 }
1911
1912 *scaling = 1.0;
1913 std::vector<std::pair<RowIndex, IntegerValue>> integer_multipliers;
1914 if (max_sum == 0.0) {
1915 // Empty linear combinaison.
1916 return integer_multipliers;
1917 }
1918
1919 // We want max_sum * scaling to be <= 2 ^ max_pow and fit on an int64_t.
1920 // We use a power of 2 as this seems to work better.
1921 const double threshold = std::ldexp(1, max_pow) / max_sum;
1922 if (threshold < 1.0) {
1923 // TODO(user): we currently do not support scaling down, so we just abort
1924 // in this case.
1925 return integer_multipliers;
1926 }
1927 while (2 * *scaling <= threshold) *scaling *= 2;
1928
1929 // Scale the multipliers by *scaling.
1930 //
1931 // TODO(user): Maybe use int128 to avoid overflow?
1932 for (const auto& entry : tmp_cp_multipliers_) {
1933 const IntegerValue coeff(std::round(entry.second * (*scaling)));
1934 if (coeff != 0) integer_multipliers.push_back({entry.first, coeff});
1935 }
1936 return integer_multipliers;
1937}
1938
1939bool LinearProgrammingConstraint::ComputeNewLinearConstraint(
1940 const std::vector<std::pair<RowIndex, IntegerValue>>& integer_multipliers,
1941 ScatteredIntegerVector* scattered_vector, IntegerValue* upper_bound) const {
1942 // Initialize the new constraint.
1943 *upper_bound = 0;
1944 scattered_vector->ClearAndResize(integer_variables_.size());
1945
1946 // Compute the new constraint by taking the linear combination given by
1947 // integer_multipliers of the integer constraints in integer_lp_.
1948 for (const std::pair<RowIndex, IntegerValue>& term : integer_multipliers) {
1949 const RowIndex row = term.first;
1950 const IntegerValue multiplier = term.second;
1951 CHECK_LT(row, integer_lp_.size());
1952
1953 // Update the constraint.
1954 if (!scattered_vector->AddLinearExpressionMultiple(
1955 multiplier, integer_lp_[row].terms)) {
1956 return false;
1957 }
1958
1959 // Update the upper bound.
1960 const IntegerValue bound =
1961 multiplier > 0 ? integer_lp_[row].ub : integer_lp_[row].lb;
1962 if (!AddProductTo(multiplier, bound, upper_bound)) return false;
1963 }
1964
1965 return true;
1966}
1967
1968// TODO(user): no need to update the multipliers.
1969void LinearProgrammingConstraint::AdjustNewLinearConstraint(
1970 std::vector<std::pair<glop::RowIndex, IntegerValue>>* integer_multipliers,
1971 ScatteredIntegerVector* scattered_vector, IntegerValue* upper_bound) const {
1972 const IntegerValue kMaxWantedCoeff(1e18);
1973 for (std::pair<RowIndex, IntegerValue>& term : *integer_multipliers) {
1974 const RowIndex row = term.first;
1975 const IntegerValue multiplier = term.second;
1976 if (multiplier == 0) continue;
1977
1978 // We will only allow change of the form "multiplier += to_add" with to_add
1979 // in [-negative_limit, positive_limit].
1980 IntegerValue negative_limit = kMaxWantedCoeff;
1981 IntegerValue positive_limit = kMaxWantedCoeff;
1982
1983 // Make sure we never change the sign of the multiplier, except if the
1984 // row is an equality in which case we don't care.
1985 if (integer_lp_[row].ub != integer_lp_[row].lb) {
1986 if (multiplier > 0) {
1987 negative_limit = std::min(negative_limit, multiplier);
1988 } else {
1989 positive_limit = std::min(positive_limit, -multiplier);
1990 }
1991 }
1992
1993 // Make sure upper_bound + to_add * row_bound never overflow.
1994 const IntegerValue row_bound =
1995 multiplier > 0 ? integer_lp_[row].ub : integer_lp_[row].lb;
1996 if (row_bound != 0) {
1997 const IntegerValue limit1 = FloorRatio(
1998 std::max(IntegerValue(0), kMaxWantedCoeff - IntTypeAbs(*upper_bound)),
1999 IntTypeAbs(row_bound));
2000 const IntegerValue limit2 =
2001 FloorRatio(kMaxWantedCoeff, IntTypeAbs(row_bound));
2002 if ((*upper_bound > 0) == (row_bound > 0)) { // Same sign.
2003 positive_limit = std::min(positive_limit, limit1);
2004 negative_limit = std::min(negative_limit, limit2);
2005 } else {
2006 negative_limit = std::min(negative_limit, limit1);
2007 positive_limit = std::min(positive_limit, limit2);
2008 }
2009 }
2010
2011 // If we add the row to the scattered_vector, diff will indicate by how much
2012 // |upper_bound - ImpliedLB(scattered_vector)| will change. That correspond
2013 // to increasing the multiplier by 1.
2014 //
2015 // At this stage, we are not sure computing sum coeff * bound will not
2016 // overflow, so we use floating point numbers. It is fine to do so since
2017 // this is not directly involved in the actual exact constraint generation:
2018 // these variables are just used in an heuristic.
2019 double positive_diff = ToDouble(row_bound);
2020 double negative_diff = ToDouble(row_bound);
2021
2022 // TODO(user): we could relax a bit some of the condition and allow a sign
2023 // change. It is just trickier to compute the diff when we allow such
2024 // changes.
2025 for (const auto& entry : integer_lp_[row].terms) {
2026 const ColIndex col = entry.first;
2027 const IntegerValue coeff = entry.second;
2028 const IntegerValue abs_coef = IntTypeAbs(coeff);
2029 CHECK_NE(coeff, 0);
2030
2031 const IntegerVariable var = integer_variables_[col.value()];
2032 const IntegerValue lb = integer_trail_->LowerBound(var);
2033 const IntegerValue ub = integer_trail_->UpperBound(var);
2034
2035 // Moving a variable away from zero seems to improve the bound even
2036 // if it reduces the number of non-zero. Note that this is because of
2037 // this that positive_diff and negative_diff are not the same.
2038 const IntegerValue current = (*scattered_vector)[col];
2039 if (current == 0) {
2040 const IntegerValue overflow_limit(
2041 FloorRatio(kMaxWantedCoeff, abs_coef));
2042 positive_limit = std::min(positive_limit, overflow_limit);
2043 negative_limit = std::min(negative_limit, overflow_limit);
2044 if (coeff > 0) {
2045 positive_diff -= ToDouble(coeff) * ToDouble(lb);
2046 negative_diff -= ToDouble(coeff) * ToDouble(ub);
2047 } else {
2048 positive_diff -= ToDouble(coeff) * ToDouble(ub);
2049 negative_diff -= ToDouble(coeff) * ToDouble(lb);
2050 }
2051 continue;
2052 }
2053
2054 // We don't want to change the sign of current (except if the variable is
2055 // fixed) or to have an overflow.
2056 //
2057 // Corner case:
2058 // - IntTypeAbs(current) can be larger than kMaxWantedCoeff!
2059 // - The code assumes that 2 * kMaxWantedCoeff do not overflow.
2060 const IntegerValue current_magnitude = IntTypeAbs(current);
2061 const IntegerValue other_direction_limit = FloorRatio(
2062 lb == ub
2063 ? kMaxWantedCoeff + std::min(current_magnitude,
2064 kMaxIntegerValue - kMaxWantedCoeff)
2065 : current_magnitude,
2066 abs_coef);
2067 const IntegerValue same_direction_limit(FloorRatio(
2068 std::max(IntegerValue(0), kMaxWantedCoeff - current_magnitude),
2069 abs_coef));
2070 if ((current > 0) == (coeff > 0)) { // Same sign.
2071 negative_limit = std::min(negative_limit, other_direction_limit);
2072 positive_limit = std::min(positive_limit, same_direction_limit);
2073 } else {
2074 negative_limit = std::min(negative_limit, same_direction_limit);
2075 positive_limit = std::min(positive_limit, other_direction_limit);
2076 }
2077
2078 // This is how diff change.
2079 const IntegerValue implied = current > 0 ? lb : ub;
2080 if (implied != 0) {
2081 positive_diff -= ToDouble(coeff) * ToDouble(implied);
2082 negative_diff -= ToDouble(coeff) * ToDouble(implied);
2083 }
2084 }
2085
2086 // Only add a multiple of this row if it tighten the final constraint.
2087 // The positive_diff/negative_diff are supposed to be integer modulo the
2088 // double precision, so we only add a multiple if they seems far away from
2089 // zero.
2090 IntegerValue to_add(0);
2091 if (positive_diff <= -1.0 && positive_limit > 0) {
2092 to_add = positive_limit;
2093 }
2094 if (negative_diff >= 1.0 && negative_limit > 0) {
2095 // Pick this if it is better than the positive sign.
2096 if (to_add == 0 ||
2097 std::abs(ToDouble(negative_limit) * negative_diff) >
2098 std::abs(ToDouble(positive_limit) * positive_diff)) {
2099 to_add = -negative_limit;
2100 }
2101 }
2102 if (to_add != 0) {
2103 term.second += to_add;
2104 *upper_bound += to_add * row_bound;
2105
2106 // TODO(user): we could avoid checking overflow here, but this is likely
2107 // not in the hot loop.
2108 CHECK(scattered_vector->AddLinearExpressionMultiple(
2109 to_add, integer_lp_[row].terms));
2110 }
2111 }
2112}
2113
2114// The "exact" computation go as follow:
2115//
2116// Given any INTEGER linear combination of the LP constraints, we can create a
2117// new integer constraint that is valid (its computation must not overflow
2118// though). Lets call this "linear_combination <= ub". We can then always add to
2119// it the inequality "objective_terms <= objective_var", so we get:
2120// ImpliedLB(objective_terms + linear_combination) - ub <= objective_var.
2121// where ImpliedLB() is computed from the variable current bounds.
2122//
2123// Now, if we use for the linear combination and approximation of the optimal
2124// negated dual LP values (by scaling them and rounding them to integer), we
2125// will get an EXACT objective lower bound that is more or less the same as the
2126// inexact bound given by the LP relaxation. This allows to derive exact reasons
2127// for any propagation done by this constraint.
2128bool LinearProgrammingConstraint::ExactLpReasonning() {
2129 // Clear old reason and deductions.
2130 integer_reason_.clear();
2131 deductions_.clear();
2132 deductions_reason_.clear();
2133
2134 // The row multipliers will be the negation of the LP duals.
2135 //
2136 // TODO(user): Provide and use a sparse API in Glop to get the duals.
2137 const RowIndex num_rows = simplex_.GetProblemNumRows();
2138 tmp_lp_multipliers_.clear();
2139 for (RowIndex row(0); row < num_rows; ++row) {
2140 const double value = -simplex_.GetDualValue(row);
2141 if (std::abs(value) < kZeroTolerance) continue;
2142 tmp_lp_multipliers_.push_back({row, value});
2143 }
2144
2145 Fractional scaling;
2146 tmp_integer_multipliers_ = ScaleLpMultiplier(
2147 /*take_objective_into_account=*/true, tmp_lp_multipliers_, &scaling);
2148
2149 IntegerValue rc_ub;
2150 if (!ComputeNewLinearConstraint(tmp_integer_multipliers_,
2151 &tmp_scattered_vector_, &rc_ub)) {
2152 VLOG(1) << "Issue while computing the exact LP reason. Aborting.";
2153 return true;
2154 }
2155
2156 // The "objective constraint" behave like if the unscaled cp multiplier was
2157 // 1.0, so we will multiply it by this number and add it to reduced_costs.
2158 const IntegerValue obj_scale(std::round(scaling));
2159 if (obj_scale == 0) {
2160 VLOG(1) << "Overflow during exact LP reasoning. scaling=" << scaling;
2161 return true;
2162 }
2163 CHECK(tmp_scattered_vector_.AddLinearExpressionMultiple(obj_scale,
2164 integer_objective_));
2165 CHECK(AddProductTo(-obj_scale, integer_objective_offset_, &rc_ub));
2166 AdjustNewLinearConstraint(&tmp_integer_multipliers_, &tmp_scattered_vector_,
2167 &rc_ub);
2168
2169 // Create the IntegerSumLE that will allow to propagate the objective and more
2170 // generally do the reduced cost fixing.
2171 tmp_scattered_vector_.ConvertToLinearConstraint(integer_variables_, rc_ub,
2172 &tmp_constraint_);
2173 tmp_constraint_.vars.push_back(objective_cp_);
2174 tmp_constraint_.coeffs.push_back(-obj_scale);
2175 DivideByGCD(&tmp_constraint_);
2176 PreventOverflow(&tmp_constraint_);
2177 DCHECK(!PossibleOverflow(tmp_constraint_));
2178 DCHECK(constraint_manager_.DebugCheckConstraint(tmp_constraint_));
2179
2180 // Corner case where prevent overflow removed all terms.
2181 if (tmp_constraint_.vars.empty()) {
2182 trail_->MutableConflict()->clear();
2183 return tmp_constraint_.ub >= 0;
2184 }
2185
2186 IntegerSumLE* cp_constraint =
2187 new IntegerSumLE({}, tmp_constraint_.vars, tmp_constraint_.coeffs,
2188 tmp_constraint_.ub, model_);
2189 if (trail_->CurrentDecisionLevel() == 0) {
2190 // Since we will never ask the reason for a constraint at level 0, we just
2191 // keep the last one.
2192 optimal_constraints_.clear();
2193 }
2194 optimal_constraints_.emplace_back(cp_constraint);
2195 rev_optimal_constraints_size_ = optimal_constraints_.size();
2196 if (!cp_constraint->PropagateAtLevelZero()) return false;
2197 return cp_constraint->Propagate();
2198}
2199
2200bool LinearProgrammingConstraint::FillExactDualRayReason() {
2201 Fractional scaling;
2202 const glop::DenseColumn ray = simplex_.GetDualRay();
2203 tmp_lp_multipliers_.clear();
2204 for (RowIndex row(0); row < ray.size(); ++row) {
2205 const double value = ray[row];
2206 if (std::abs(value) < kZeroTolerance) continue;
2207 tmp_lp_multipliers_.push_back({row, value});
2208 }
2209 tmp_integer_multipliers_ = ScaleLpMultiplier(
2210 /*take_objective_into_account=*/false, tmp_lp_multipliers_, &scaling);
2211
2212 IntegerValue new_constraint_ub;
2213 if (!ComputeNewLinearConstraint(tmp_integer_multipliers_,
2214 &tmp_scattered_vector_, &new_constraint_ub)) {
2215 VLOG(1) << "Isse while computing the exact dual ray reason. Aborting.";
2216 return false;
2217 }
2218
2219 AdjustNewLinearConstraint(&tmp_integer_multipliers_, &tmp_scattered_vector_,
2220 &new_constraint_ub);
2221
2222 tmp_scattered_vector_.ConvertToLinearConstraint(
2223 integer_variables_, new_constraint_ub, &tmp_constraint_);
2224 DivideByGCD(&tmp_constraint_);
2225 PreventOverflow(&tmp_constraint_);
2226 DCHECK(!PossibleOverflow(tmp_constraint_));
2227 DCHECK(constraint_manager_.DebugCheckConstraint(tmp_constraint_));
2228
2229 const IntegerValue implied_lb = GetImpliedLowerBound(tmp_constraint_);
2230 if (implied_lb <= tmp_constraint_.ub) {
2231 VLOG(1) << "LP exact dual ray not infeasible,"
2232 << " implied_lb: " << implied_lb.value() / scaling
2233 << " ub: " << tmp_constraint_.ub.value() / scaling;
2234 return false;
2235 }
2236 const IntegerValue slack = (implied_lb - tmp_constraint_.ub) - 1;
2237 SetImpliedLowerBoundReason(tmp_constraint_, slack);
2238 return true;
2239}
2240
2241int64_t LinearProgrammingConstraint::CalculateDegeneracy() {
2242 const glop::ColIndex num_vars = simplex_.GetProblemNumCols();
2243 int num_non_basic_with_zero_rc = 0;
2244 for (glop::ColIndex i(0); i < num_vars; ++i) {
2245 const double rc = simplex_.GetReducedCost(i);
2246 if (rc != 0.0) continue;
2248 continue;
2249 }
2250 num_non_basic_with_zero_rc++;
2251 }
2252 const int64_t num_cols = simplex_.GetProblemNumCols().value();
2253 is_degenerate_ = num_non_basic_with_zero_rc >= 0.3 * num_cols;
2254 return num_non_basic_with_zero_rc;
2255}
2256
2257void LinearProgrammingConstraint::ReducedCostStrengtheningDeductions(
2258 double cp_objective_delta) {
2259 deductions_.clear();
2260
2261 // TRICKY: while simplex_.GetObjectiveValue() use the objective scaling factor
2262 // stored in the lp_data_, all the other functions like GetReducedCost() or
2263 // GetVariableValue() do not.
2264 const double lp_objective_delta =
2265 cp_objective_delta / lp_data_.objective_scaling_factor();
2266 const int num_vars = integer_variables_.size();
2267 for (int i = 0; i < num_vars; i++) {
2268 const IntegerVariable cp_var = integer_variables_[i];
2269 const glop::ColIndex lp_var = glop::ColIndex(i);
2270 const double rc = simplex_.GetReducedCost(lp_var);
2271 const double value = simplex_.GetVariableValue(lp_var);
2272
2273 if (rc == 0.0) continue;
2274 const double lp_other_bound = value + lp_objective_delta / rc;
2275 const double cp_other_bound =
2276 scaler_.UnscaleVariableValue(lp_var, lp_other_bound);
2277
2278 if (rc > kLpEpsilon) {
2279 const double ub = ToDouble(integer_trail_->UpperBound(cp_var));
2280 const double new_ub = std::floor(cp_other_bound + kCpEpsilon);
2281 if (new_ub < ub) {
2282 // TODO(user): Because rc > kLpEpsilon, the lower_bound of cp_var
2283 // will be part of the reason returned by FillReducedCostsReason(), but
2284 // we actually do not need it here. Same below.
2285 const IntegerValue new_ub_int(static_cast<IntegerValue>(new_ub));
2286 deductions_.push_back(IntegerLiteral::LowerOrEqual(cp_var, new_ub_int));
2287 }
2288 } else if (rc < -kLpEpsilon) {
2289 const double lb = ToDouble(integer_trail_->LowerBound(cp_var));
2290 const double new_lb = std::ceil(cp_other_bound - kCpEpsilon);
2291 if (new_lb > lb) {
2292 const IntegerValue new_lb_int(static_cast<IntegerValue>(new_lb));
2293 deductions_.push_back(
2294 IntegerLiteral::GreaterOrEqual(cp_var, new_lb_int));
2295 }
2296 }
2297 }
2298}
2299
2300namespace {
2301
2302// Add a cut of the form Sum_{outgoing arcs from S} lp >= rhs_lower_bound.
2303//
2304// Note that we used to also add the same cut for the incoming arcs, but because
2305// of flow conservation on these problems, the outgoing flow is always the same
2306// as the incoming flow, so adding this extra cut doesn't seem relevant.
2307void AddOutgoingCut(
2308 int num_nodes, int subset_size, const std::vector<bool>& in_subset,
2309 const std::vector<int>& tails, const std::vector<int>& heads,
2310 const std::vector<Literal>& literals,
2311 const std::vector<double>& literal_lp_values, int64_t rhs_lower_bound,
2313 LinearConstraintManager* manager, Model* model) {
2314 // A node is said to be optional if it can be excluded from the subcircuit,
2315 // in which case there is a self-loop on that node.
2316 // If there are optional nodes, use extended formula:
2317 // sum(cut) >= 1 - optional_loop_in - optional_loop_out
2318 // where optional_loop_in's node is in subset, optional_loop_out's is out.
2319 // TODO(user): Favor optional loops fixed to zero at root.
2320 int num_optional_nodes_in = 0;
2321 int num_optional_nodes_out = 0;
2322 int optional_loop_in = -1;
2323 int optional_loop_out = -1;
2324 for (int i = 0; i < tails.size(); ++i) {
2325 if (tails[i] != heads[i]) continue;
2326 if (in_subset[tails[i]]) {
2327 num_optional_nodes_in++;
2328 if (optional_loop_in == -1 ||
2329 literal_lp_values[i] < literal_lp_values[optional_loop_in]) {
2330 optional_loop_in = i;
2331 }
2332 } else {
2333 num_optional_nodes_out++;
2334 if (optional_loop_out == -1 ||
2335 literal_lp_values[i] < literal_lp_values[optional_loop_out]) {
2336 optional_loop_out = i;
2337 }
2338 }
2339 }
2340
2341 // TODO(user): The lower bound for CVRP is computed assuming all nodes must be
2342 // served, if it is > 1 we lower it to one in the presence of optional nodes.
2343 if (num_optional_nodes_in + num_optional_nodes_out > 0) {
2344 CHECK_GE(rhs_lower_bound, 1);
2345 rhs_lower_bound = 1;
2346 }
2347
2348 LinearConstraintBuilder outgoing(model, IntegerValue(rhs_lower_bound),
2350 double sum_outgoing = 0.0;
2351
2352 // Add outgoing arcs, compute outgoing flow.
2353 for (int i = 0; i < tails.size(); ++i) {
2354 if (in_subset[tails[i]] && !in_subset[heads[i]]) {
2355 sum_outgoing += literal_lp_values[i];
2356 CHECK(outgoing.AddLiteralTerm(literals[i], IntegerValue(1)));
2357 }
2358 }
2359
2360 // Support optional nodes if any.
2361 if (num_optional_nodes_in + num_optional_nodes_out > 0) {
2362 // When all optionals of one side are excluded in lp solution, no cut.
2363 if (num_optional_nodes_in == subset_size &&
2364 (optional_loop_in == -1 ||
2365 literal_lp_values[optional_loop_in] > 1.0 - 1e-6)) {
2366 return;
2367 }
2368 if (num_optional_nodes_out == num_nodes - subset_size &&
2369 (optional_loop_out == -1 ||
2370 literal_lp_values[optional_loop_out] > 1.0 - 1e-6)) {
2371 return;
2372 }
2373
2374 // There is no mandatory node in subset, add optional_loop_in.
2375 if (num_optional_nodes_in == subset_size) {
2376 CHECK(
2377 outgoing.AddLiteralTerm(literals[optional_loop_in], IntegerValue(1)));
2378 sum_outgoing += literal_lp_values[optional_loop_in];
2379 }
2380
2381 // There is no mandatory node out of subset, add optional_loop_out.
2382 if (num_optional_nodes_out == num_nodes - subset_size) {
2383 CHECK(outgoing.AddLiteralTerm(literals[optional_loop_out],
2384 IntegerValue(1)));
2385 sum_outgoing += literal_lp_values[optional_loop_out];
2386 }
2387 }
2388
2389 if (sum_outgoing < rhs_lower_bound - 1e-6) {
2390 manager->AddCut(outgoing.Build(), "Circuit", lp_values);
2391 }
2392}
2393
2394} // namespace
2395
2396// We roughly follow the algorithm described in section 6 of "The Traveling
2397// Salesman Problem, A computational Study", David L. Applegate, Robert E.
2398// Bixby, Vasek Chvatal, William J. Cook.
2399//
2400// Note that this is mainly a "symmetric" case algo, but it does still work for
2401// the asymmetric case.
2403 int num_nodes, const std::vector<int>& tails, const std::vector<int>& heads,
2404 const std::vector<Literal>& literals,
2406 absl::Span<const int64_t> demands, int64_t capacity,
2407 LinearConstraintManager* manager, Model* model) {
2408 if (num_nodes <= 2) return;
2409
2410 // We will collect only the arcs with a positive lp_values to speed up some
2411 // computation below.
2412 struct Arc {
2413 int tail;
2414 int head;
2415 double lp_value;
2416 };
2417 std::vector<Arc> relevant_arcs;
2418
2419 // Sort the arcs by non-increasing lp_values.
2420 std::vector<double> literal_lp_values(literals.size());
2421 std::vector<std::pair<double, int>> arc_by_decreasing_lp_values;
2422 auto* encoder = model->GetOrCreate<IntegerEncoder>();
2423 for (int i = 0; i < literals.size(); ++i) {
2424 double lp_value;
2425 const IntegerVariable direct_view = encoder->GetLiteralView(literals[i]);
2426 if (direct_view != kNoIntegerVariable) {
2427 lp_value = lp_values[direct_view];
2428 } else {
2429 lp_value =
2430 1.0 - lp_values[encoder->GetLiteralView(literals[i].Negated())];
2431 }
2432 literal_lp_values[i] = lp_value;
2433
2434 if (lp_value < 1e-6) continue;
2435 relevant_arcs.push_back({tails[i], heads[i], lp_value});
2436 arc_by_decreasing_lp_values.push_back({lp_value, i});
2437 }
2438 std::sort(arc_by_decreasing_lp_values.begin(),
2439 arc_by_decreasing_lp_values.end(),
2440 std::greater<std::pair<double, int>>());
2441
2442 // We will do a union-find by adding one by one the arc of the lp solution
2443 // in the order above. Every intermediate set during this construction will
2444 // be a candidate for a cut.
2445 //
2446 // In parallel to the union-find, to efficiently reconstruct these sets (at
2447 // most num_nodes), we construct a "decomposition forest" of the different
2448 // connected components. Note that we don't exploit any asymmetric nature of
2449 // the graph here. This is exactly the algo 6.3 in the book above.
2450 int num_components = num_nodes;
2451 std::vector<int> parent(num_nodes);
2452 std::vector<int> root(num_nodes);
2453 for (int i = 0; i < num_nodes; ++i) {
2454 parent[i] = i;
2455 root[i] = i;
2456 }
2457 auto get_root_and_compress_path = [&root](int node) {
2458 int r = node;
2459 while (root[r] != r) r = root[r];
2460 while (root[node] != r) {
2461 const int next = root[node];
2462 root[node] = r;
2463 node = next;
2464 }
2465 return r;
2466 };
2467 for (const auto& pair : arc_by_decreasing_lp_values) {
2468 if (num_components == 2) break;
2469 const int tail = get_root_and_compress_path(tails[pair.second]);
2470 const int head = get_root_and_compress_path(heads[pair.second]);
2471 if (tail != head) {
2472 // Update the decomposition forest, note that the number of nodes is
2473 // growing.
2474 const int new_node = parent.size();
2475 parent.push_back(new_node);
2476 parent[head] = new_node;
2477 parent[tail] = new_node;
2478 --num_components;
2479
2480 // It is important that the union-find representative is the same node.
2481 root.push_back(new_node);
2482 root[head] = new_node;
2483 root[tail] = new_node;
2484 }
2485 }
2486
2487 // For each node in the decomposition forest, try to add a cut for the set
2488 // formed by the nodes and its children. To do that efficiently, we first
2489 // order the nodes so that for each node in a tree, the set of children forms
2490 // a consecutive span in the pre_order vector. This vector just lists the
2491 // nodes in the "pre-order" graph traversal order. The Spans will point inside
2492 // the pre_order vector, it is why we initialize it once and for all.
2493 int new_size = 0;
2494 std::vector<int> pre_order(num_nodes);
2495 std::vector<absl::Span<const int>> subsets;
2496 {
2497 std::vector<absl::InlinedVector<int, 2>> graph(parent.size());
2498 for (int i = 0; i < parent.size(); ++i) {
2499 if (parent[i] != i) graph[parent[i]].push_back(i);
2500 }
2501 std::vector<int> queue;
2502 std::vector<bool> seen(graph.size(), false);
2503 std::vector<int> start_index(parent.size());
2504 for (int i = num_nodes; i < parent.size(); ++i) {
2505 // Note that because of the way we constructed 'parent', the graph is a
2506 // binary tree. This is not required for the correctness of the algorithm
2507 // here though.
2508 CHECK(graph[i].empty() || graph[i].size() == 2);
2509 if (parent[i] != i) continue;
2510
2511 // Explore the subtree rooted at node i.
2512 CHECK(!seen[i]);
2513 queue.push_back(i);
2514 while (!queue.empty()) {
2515 const int node = queue.back();
2516 if (seen[node]) {
2517 queue.pop_back();
2518 // All the children of node are in the span [start, end) of the
2519 // pre_order vector.
2520 const int start = start_index[node];
2521 if (new_size - start > 1) {
2522 subsets.emplace_back(&pre_order[start], new_size - start);
2523 }
2524 continue;
2525 }
2526 seen[node] = true;
2527 start_index[node] = new_size;
2528 if (node < num_nodes) pre_order[new_size++] = node;
2529 for (const int child : graph[node]) {
2530 if (!seen[child]) queue.push_back(child);
2531 }
2532 }
2533 }
2534 }
2535
2536 // Compute the total demands in order to know the minimum incoming/outgoing
2537 // flow.
2538 int64_t total_demands = 0;
2539 if (!demands.empty()) {
2540 for (const int64_t demand : demands) total_demands += demand;
2541 }
2542
2543 // Process each subsets and add any violated cut.
2544 CHECK_EQ(pre_order.size(), num_nodes);
2545 std::vector<bool> in_subset(num_nodes, false);
2546 for (const absl::Span<const int> subset : subsets) {
2547 CHECK_GT(subset.size(), 1);
2548 CHECK_LT(subset.size(), num_nodes);
2549
2550 // These fields will be left untouched if demands.empty().
2551 bool contain_depot = false;
2552 int64_t subset_demand = 0;
2553
2554 // Initialize "in_subset" and the subset demands.
2555 for (const int n : subset) {
2556 in_subset[n] = true;
2557 if (!demands.empty()) {
2558 if (n == 0) contain_depot = true;
2559 subset_demand += demands[n];
2560 }
2561 }
2562
2563 // Compute a lower bound on the outgoing flow.
2564 //
2565 // TODO(user): This lower bound assume all nodes in subset must be served,
2566 // which is not the case. For TSP we do the correct thing in
2567 // AddOutgoingCut() but not for CVRP... Fix!!
2568 //
2569 // TODO(user): It could be very interesting to see if this "min outgoing
2570 // flow" cannot be automatically infered from the constraint in the
2571 // precedence graph. This might work if we assume that any kind of path
2572 // cumul constraint is encoded with constraints:
2573 // [edge => value_head >= value_tail + edge_weight].
2574 // We could take the minimum incoming edge weight per node in the set, and
2575 // use the cumul variable domain to infer some capacity.
2576 int64_t min_outgoing_flow = 1;
2577 if (!demands.empty()) {
2578 min_outgoing_flow =
2579 contain_depot
2580 ? (total_demands - subset_demand + capacity - 1) / capacity
2581 : (subset_demand + capacity - 1) / capacity;
2582 }
2583
2584 // We still need to serve nodes with a demand of zero, and in the corner
2585 // case where all node in subset have a zero demand, the formula above
2586 // result in a min_outgoing_flow of zero.
2587 min_outgoing_flow = std::max(min_outgoing_flow, int64_t{1});
2588
2589 // Compute the current outgoing flow out of the subset.
2590 //
2591 // This can take a significant portion of the running time, it is why it is
2592 // faster to do it only on arcs with non-zero lp values which should be in
2593 // linear number rather than the total number of arc which can be quadratic.
2594 //
2595 // TODO(user): For the symmetric case there is an even faster algo. See if
2596 // it can be generalized to the asymmetric one if become needed.
2597 // Reference is algo 6.4 of the "The Traveling Salesman Problem" book
2598 // mentionned above.
2599 double outgoing_flow = 0.0;
2600 for (const auto arc : relevant_arcs) {
2601 if (in_subset[arc.tail] && !in_subset[arc.head]) {
2602 outgoing_flow += arc.lp_value;
2603 }
2604 }
2605
2606 // Add a cut if the current outgoing flow is not enough.
2607 if (outgoing_flow < min_outgoing_flow - 1e-6) {
2608 AddOutgoingCut(num_nodes, subset.size(), in_subset, tails, heads,
2609 literals, literal_lp_values,
2610 /*rhs_lower_bound=*/min_outgoing_flow, lp_values, manager,
2611 model);
2612 }
2613
2614 // Sparse clean up.
2615 for (const int n : subset) in_subset[n] = false;
2616 }
2617}
2618
2619namespace {
2620
2621// Returns for each literal its integer view, or the view of its negation.
2622std::vector<IntegerVariable> GetAssociatedVariables(
2623 const std::vector<Literal>& literals, Model* model) {
2624 auto* encoder = model->GetOrCreate<IntegerEncoder>();
2625 std::vector<IntegerVariable> result;
2626 for (const Literal l : literals) {
2627 const IntegerVariable direct_view = encoder->GetLiteralView(l);
2628 if (direct_view != kNoIntegerVariable) {
2629 result.push_back(direct_view);
2630 } else {
2631 result.push_back(encoder->GetLiteralView(l.Negated()));
2632 DCHECK_NE(result.back(), kNoIntegerVariable);
2633 }
2634 }
2635 return result;
2636}
2637
2638} // namespace
2639
2640// We use a basic algorithm to detect components that are not connected to the
2641// rest of the graph in the LP solution, and add cuts to force some arcs to
2642// enter and leave this component from outside.
2644 int num_nodes, const std::vector<int>& tails, const std::vector<int>& heads,
2645 const std::vector<Literal>& literals, Model* model) {
2646 CutGenerator result;
2647 result.vars = GetAssociatedVariables(literals, model);
2648 result.generate_cuts =
2649 [num_nodes, tails, heads, literals, model](
2651 LinearConstraintManager* manager) {
2653 num_nodes, tails, heads, literals, lp_values,
2654 /*demands=*/{}, /*capacity=*/0, manager, model);
2655 return true;
2656 };
2657 return result;
2658}
2659
2661 const std::vector<int>& tails,
2662 const std::vector<int>& heads,
2663 const std::vector<Literal>& literals,
2664 const std::vector<int64_t>& demands,
2665 int64_t capacity, Model* model) {
2666 CutGenerator result;
2667 result.vars = GetAssociatedVariables(literals, model);
2668 result.generate_cuts =
2669 [num_nodes, tails, heads, demands, capacity, literals, model](
2671 LinearConstraintManager* manager) {
2672 SeparateSubtourInequalities(num_nodes, tails, heads, literals,
2673 lp_values, demands, capacity, manager,
2674 model);
2675 return true;
2676 };
2677 return result;
2678}
2679
2680std::function<IntegerLiteral()>
2682 // Gather all 0-1 variables that appear in this LP.
2683 std::vector<IntegerVariable> variables;
2684 for (IntegerVariable var : integer_variables_) {
2685 if (integer_trail_->LowerBound(var) == 0 &&
2686 integer_trail_->UpperBound(var) == 1) {
2687 variables.push_back(var);
2688 }
2689 }
2690 VLOG(1) << "HeuristicLPMostInfeasibleBinary has " << variables.size()
2691 << " variables.";
2692
2693 return [this, variables]() {
2694 const double kEpsilon = 1e-6;
2695 // Find most fractional value.
2696 IntegerVariable fractional_var = kNoIntegerVariable;
2697 double fractional_distance_best = -1.0;
2698 for (const IntegerVariable var : variables) {
2699 // Skip ignored and fixed variables.
2700 if (integer_trail_->IsCurrentlyIgnored(var)) continue;
2701 const IntegerValue lb = integer_trail_->LowerBound(var);
2702 const IntegerValue ub = integer_trail_->UpperBound(var);
2703 if (lb == ub) continue;
2704
2705 // Check variable's support is fractional.
2706 const double lp_value = this->GetSolutionValue(var);
2707 const double fractional_distance =
2708 std::min(std::ceil(lp_value - kEpsilon) - lp_value,
2709 lp_value - std::floor(lp_value + kEpsilon));
2710 if (fractional_distance < kEpsilon) continue;
2711
2712 // Keep variable if it is farther from integrality than the previous.
2713 if (fractional_distance > fractional_distance_best) {
2714 fractional_var = var;
2715 fractional_distance_best = fractional_distance;
2716 }
2717 }
2718
2719 if (fractional_var != kNoIntegerVariable) {
2720 IntegerLiteral::GreaterOrEqual(fractional_var, IntegerValue(1));
2721 }
2722 return IntegerLiteral();
2723 };
2724}
2725
2726std::function<IntegerLiteral()>
2728 // Gather all 0-1 variables that appear in this LP.
2729 std::vector<IntegerVariable> variables;
2730 for (IntegerVariable var : integer_variables_) {
2731 if (integer_trail_->LowerBound(var) == 0 &&
2732 integer_trail_->UpperBound(var) == 1) {
2733 variables.push_back(var);
2734 }
2735 }
2736 VLOG(1) << "HeuristicLpReducedCostBinary has " << variables.size()
2737 << " variables.";
2738
2739 // Store average of reduced cost from 1 to 0. The best heuristic only sets
2740 // variables to one and cares about cost to zero, even though classic
2741 // pseudocost will use max_var min(cost_to_one[var], cost_to_zero[var]).
2742 const int num_vars = variables.size();
2743 std::vector<double> cost_to_zero(num_vars, 0.0);
2744 std::vector<int> num_cost_to_zero(num_vars);
2745 int num_calls = 0;
2746
2747 return [=]() mutable {
2748 const double kEpsilon = 1e-6;
2749
2750 // Every 10000 calls, decay pseudocosts.
2751 num_calls++;
2752 if (num_calls == 10000) {
2753 for (int i = 0; i < num_vars; i++) {
2754 cost_to_zero[i] /= 2;
2755 num_cost_to_zero[i] /= 2;
2756 }
2757 num_calls = 0;
2758 }
2759
2760 // Accumulate pseudo-costs of all unassigned variables.
2761 for (int i = 0; i < num_vars; i++) {
2762 const IntegerVariable var = variables[i];
2763 // Skip ignored and fixed variables.
2764 if (integer_trail_->IsCurrentlyIgnored(var)) continue;
2765 const IntegerValue lb = integer_trail_->LowerBound(var);
2766 const IntegerValue ub = integer_trail_->UpperBound(var);
2767 if (lb == ub) continue;
2768
2769 const double rc = this->GetSolutionReducedCost(var);
2770 // Skip reduced costs that are nonzero because of numerical issues.
2771 if (std::abs(rc) < kEpsilon) continue;
2772
2773 const double value = std::round(this->GetSolutionValue(var));
2774 if (value == 1.0 && rc < 0.0) {
2775 cost_to_zero[i] -= rc;
2776 num_cost_to_zero[i]++;
2777 }
2778 }
2779
2780 // Select noninstantiated variable with highest pseudo-cost.
2781 int selected_index = -1;
2782 double best_cost = 0.0;
2783 for (int i = 0; i < num_vars; i++) {
2784 const IntegerVariable var = variables[i];
2785 // Skip ignored and fixed variables.
2786 if (integer_trail_->IsCurrentlyIgnored(var)) continue;
2787 if (integer_trail_->IsFixed(var)) continue;
2788
2789 if (num_cost_to_zero[i] > 0 &&
2790 best_cost < cost_to_zero[i] / num_cost_to_zero[i]) {
2791 best_cost = cost_to_zero[i] / num_cost_to_zero[i];
2792 selected_index = i;
2793 }
2794 }
2795
2796 if (selected_index >= 0) {
2797 return IntegerLiteral::GreaterOrEqual(variables[selected_index],
2798 IntegerValue(1));
2799 }
2800 return IntegerLiteral();
2801 };
2802}
2803
2804void LinearProgrammingConstraint::UpdateAverageReducedCosts() {
2805 const int num_vars = integer_variables_.size();
2806 if (sum_cost_down_.size() < num_vars) {
2807 sum_cost_down_.resize(num_vars, 0.0);
2808 num_cost_down_.resize(num_vars, 0);
2809 sum_cost_up_.resize(num_vars, 0.0);
2810 num_cost_up_.resize(num_vars, 0);
2811 rc_scores_.resize(num_vars, 0.0);
2812 }
2813
2814 // Decay averages.
2815 num_calls_since_reduced_cost_averages_reset_++;
2816 if (num_calls_since_reduced_cost_averages_reset_ == 10000) {
2817 for (int i = 0; i < num_vars; i++) {
2818 sum_cost_up_[i] /= 2;
2819 num_cost_up_[i] /= 2;
2820 sum_cost_down_[i] /= 2;
2821 num_cost_down_[i] /= 2;
2822 }
2823 num_calls_since_reduced_cost_averages_reset_ = 0;
2824 }
2825
2826 // Accumulate reduced costs of all unassigned variables.
2827 for (int i = 0; i < num_vars; i++) {
2828 const IntegerVariable var = integer_variables_[i];
2829
2830 // Skip ignored and fixed variables.
2831 if (integer_trail_->IsCurrentlyIgnored(var)) continue;
2832 if (integer_trail_->IsFixed(var)) continue;
2833
2834 // Skip reduced costs that are zero or close.
2835 const double rc = lp_reduced_cost_[i];
2836 if (std::abs(rc) < kCpEpsilon) continue;
2837
2838 if (rc < 0.0) {
2839 sum_cost_down_[i] -= rc;
2840 num_cost_down_[i]++;
2841 } else {
2842 sum_cost_up_[i] += rc;
2843 num_cost_up_[i]++;
2844 }
2845 }
2846
2847 // Tricky, we artificially reset the rc_rev_int_repository_ to level zero
2848 // so that the rev_rc_start_ is zero.
2849 rc_rev_int_repository_.SetLevel(0);
2850 rc_rev_int_repository_.SetLevel(trail_->CurrentDecisionLevel());
2851 rev_rc_start_ = 0;
2852
2853 // Cache the new score (higher is better) using the average reduced costs
2854 // as a signal.
2855 positions_by_decreasing_rc_score_.clear();
2856 for (int i = 0; i < num_vars; i++) {
2857 // If only one direction exist, we takes its value divided by 2, so that
2858 // such variable should have a smaller cost than the min of the two side
2859 // except if one direction have a really high reduced costs.
2860 const double a_up =
2861 num_cost_up_[i] > 0 ? sum_cost_up_[i] / num_cost_up_[i] : 0.0;
2862 const double a_down =
2863 num_cost_down_[i] > 0 ? sum_cost_down_[i] / num_cost_down_[i] : 0.0;
2864 if (num_cost_down_[i] > 0 && num_cost_up_[i] > 0) {
2865 rc_scores_[i] = std::min(a_up, a_down);
2866 } else {
2867 rc_scores_[i] = 0.5 * (a_down + a_up);
2868 }
2869
2870 // We ignore scores of zero (i.e. no data) and will follow the default
2871 // search heuristic if all variables are like this.
2872 if (rc_scores_[i] > 0.0) {
2873 positions_by_decreasing_rc_score_.push_back({-rc_scores_[i], i});
2874 }
2875 }
2876 std::sort(positions_by_decreasing_rc_score_.begin(),
2877 positions_by_decreasing_rc_score_.end());
2878}
2879
2880// TODO(user): Remove duplication with HeuristicLpReducedCostBinary().
2881std::function<IntegerLiteral()>
2883 return [this]() { return this->LPReducedCostAverageDecision(); };
2884}
2885
2886IntegerLiteral LinearProgrammingConstraint::LPReducedCostAverageDecision() {
2887 // Select noninstantiated variable with highest positive average reduced cost.
2888 int selected_index = -1;
2889 const int size = positions_by_decreasing_rc_score_.size();
2890 rc_rev_int_repository_.SaveState(&rev_rc_start_);
2891 for (int i = rev_rc_start_; i < size; ++i) {
2892 const int index = positions_by_decreasing_rc_score_[i].second;
2893 const IntegerVariable var = integer_variables_[index];
2894 if (integer_trail_->IsCurrentlyIgnored(var)) continue;
2895 if (integer_trail_->IsFixed(var)) continue;
2896 selected_index = index;
2897 rev_rc_start_ = i;
2898 break;
2899 }
2900
2901 if (selected_index == -1) return IntegerLiteral();
2902 const IntegerVariable var = integer_variables_[selected_index];
2903
2904 // If ceil(value) is current upper bound, try var == upper bound first.
2905 // Guarding with >= prevents numerical problems.
2906 // With 0/1 variables, this will tend to try setting to 1 first,
2907 // which produces more shallow trees.
2908 const IntegerValue ub = integer_trail_->UpperBound(var);
2909 const IntegerValue value_ceil(
2910 std::ceil(this->GetSolutionValue(var) - kCpEpsilon));
2911 if (value_ceil >= ub) {
2913 }
2914
2915 // If floor(value) is current lower bound, try var == lower bound first.
2916 // Guarding with <= prevents numerical problems.
2917 const IntegerValue lb = integer_trail_->LowerBound(var);
2918 const IntegerValue value_floor(
2919 std::floor(this->GetSolutionValue(var) + kCpEpsilon));
2920 if (value_floor <= lb) {
2922 }
2923
2924 // Here lb < value_floor <= value_ceil < ub.
2925 // Try the most promising split between var <= floor or var >= ceil.
2926 const double a_up =
2927 num_cost_up_[selected_index] > 0
2928 ? sum_cost_up_[selected_index] / num_cost_up_[selected_index]
2929 : 0.0;
2930 const double a_down =
2931 num_cost_down_[selected_index] > 0
2932 ? sum_cost_down_[selected_index] / num_cost_down_[selected_index]
2933 : 0.0;
2934 if (a_down < a_up) {
2935 return IntegerLiteral::LowerOrEqual(var, value_floor);
2936 } else {
2937 return IntegerLiteral::GreaterOrEqual(var, value_ceil);
2938 }
2939}
2940
2942 std::string result = "LP statistics:\n";
2943 absl::StrAppend(&result, " final dimension: ", DimensionString(), "\n");
2944 absl::StrAppend(&result, " total number of simplex iterations: ",
2945 total_num_simplex_iterations_, "\n");
2946 absl::StrAppend(&result, " num solves: \n");
2947 for (int i = 0; i < num_solves_by_status_.size(); ++i) {
2948 if (num_solves_by_status_[i] == 0) continue;
2949 absl::StrAppend(&result, " - #",
2951 num_solves_by_status_[i], "\n");
2952 }
2953 absl::StrAppend(&result, constraint_manager_.Statistics());
2954 return result;
2955}
2956
2957} // namespace sat
2958} // namespace operations_research
int64_t max
Definition: alldiff_cst.cc:140
int64_t min
Definition: alldiff_cst.cc:139
#define CHECK(condition)
Definition: base/logging.h:495
#define DCHECK_NE(val1, val2)
Definition: base/logging.h:892
#define CHECK_LT(val1, val2)
Definition: base/logging.h:706
#define CHECK_EQ(val1, val2)
Definition: base/logging.h:703
#define CHECK_GE(val1, val2)
Definition: base/logging.h:707
#define CHECK_GT(val1, val2)
Definition: base/logging.h:708
#define CHECK_NE(val1, val2)
Definition: base/logging.h:704
#define DCHECK_GT(val1, val2)
Definition: base/logging.h:896
#define DCHECK(condition)
Definition: base/logging.h:890
#define VLOG(verboselevel)
Definition: base/logging.h:984
void assign(size_type n, const value_type &val)
void resize(size_type new_size)
size_type size() const
void push_back(const value_type &x)
static int64_t GCD64(int64_t x, int64_t y)
Definition: mathutil.h:107
void SetLevel(int level) final
Definition: rev.h:134
void SaveState(T *object)
Definition: rev.h:61
A simple class to enforce both an elapsed time limit and a deterministic time limit in the same threa...
Definition: time_limit.h:106
bool LimitReached()
Returns true when the external limit is true, or the deterministic time is over the deterministic lim...
Definition: time_limit.h:546
void SetVariableBounds(ColIndex col, Fractional lower_bound, Fractional upper_bound)
Definition: lp_data.cc:249
void SetObjectiveOffset(Fractional objective_offset)
Definition: lp_data.cc:331
void SetCoefficient(RowIndex row, ColIndex col, Fractional value)
Definition: lp_data.cc:317
void SetConstraintBounds(RowIndex row, Fractional lower_bound, Fractional upper_bound)
Definition: lp_data.cc:309
void SetObjectiveCoefficient(ColIndex col, Fractional value)
Definition: lp_data.cc:326
std::string GetDimensionString() const
Definition: lp_data.cc:425
Fractional objective_scaling_factor() const
Definition: lp_data.h:261
const SparseColumn & GetSparseColumn(ColIndex col) const
Definition: lp_data.cc:409
Fractional VariableScalingFactor(ColIndex col) const
Fractional UnscaleVariableValue(ColIndex col, Fractional value) const
Fractional UnscaleReducedCost(ColIndex col, Fractional value) const
Fractional UnscaleDualValue(RowIndex row, Fractional value) const
const GlopParameters & GetParameters() const
const DenseRow & GetDualRayRowCombination() const
Fractional GetVariableValue(ColIndex col) const
void SetIntegralityScale(ColIndex col, Fractional scale)
VariableStatus GetVariableStatus(ColIndex col) const
Fractional GetReducedCost(ColIndex col) const
const DenseColumn & GetDualRay() const
ABSL_MUST_USE_RESULT Status Solve(const LinearProgram &lp, TimeLimit *time_limit)
const ScatteredRow & GetUnitRowLeftInverse(RowIndex row)
Fractional GetDualValue(RowIndex row) const
ConstraintStatus GetConstraintStatus(RowIndex row) const
void LoadStateForNextSolve(const BasisState &state)
ColIndex GetBasis(RowIndex row) const
void SetParameters(const GlopParameters &parameters)
LinearConstraint * mutable_cut()
Definition: cuts.h:259
bool TrySimpleKnapsack(const LinearConstraint base_ct, const std::vector< double > &lp_values, const std::vector< IntegerValue > &lower_bounds, const std::vector< IntegerValue > &upper_bounds)
Definition: cuts.cc:1185
void WatchIntegerVariable(IntegerVariable i, int id, int watch_index=-1)
Definition: integer.h:1593
void WatchUpperBound(IntegerVariable var, int id, int watch_index=-1)
Definition: integer.h:1587
void SetPropagatorPriority(int id, int priority)
Definition: integer.cc:2051
int Register(PropagatorInterface *propagator)
Definition: integer.cc:2028
void AddLpVariable(IntegerVariable var)
Definition: cuts.h:118
void ProcessUpperBoundedConstraintWithSlackCreation(bool substitute_only_inner_variables, IntegerVariable first_slack, const absl::StrongVector< IntegerVariable, double > &lp_values, LinearConstraint *cut, std::vector< SlackInfo > *slack_infos)
Definition: cuts.cc:1613
bool DebugSlack(IntegerVariable first_slack, const LinearConstraint &initial_cut, const LinearConstraint &cut, const std::vector< SlackInfo > &info)
Definition: cuts.cc:1746
void RecomputeCacheAndSeparateSomeImpliedBoundCuts(const absl::StrongVector< IntegerVariable, double > &lp_values)
Definition: cuts.cc:1603
const IntegerVariable GetLiteralView(Literal lit) const
Definition: integer.h:499
void ComputeCut(RoundingOptions options, const std::vector< double > &lp_values, const std::vector< IntegerValue > &lower_bounds, const std::vector< IntegerValue > &upper_bounds, ImpliedBoundsProcessor *ib_processor, LinearConstraint *cut)
Definition: cuts.cc:731
ABSL_MUST_USE_RESULT bool Enqueue(IntegerLiteral i_lit, absl::Span< const Literal > literal_reason, absl::Span< const IntegerLiteral > integer_reason)
Definition: integer.cc:1048
bool IsCurrentlyIgnored(IntegerVariable i) const
Definition: integer.h:705
bool IsFixed(IntegerVariable i) const
Definition: integer.h:1453
IntegerLiteral LowerBoundAsLiteral(IntegerVariable i) const
Definition: integer.h:1477
bool ReportConflict(absl::Span< const Literal > literal_reason, absl::Span< const IntegerLiteral > integer_reason)
Definition: integer.h:924
IntegerValue UpperBound(IntegerVariable i) const
Definition: integer.h:1449
IntegerValue LevelZeroUpperBound(IntegerVariable var) const
Definition: integer.h:1534
IntegerValue LevelZeroLowerBound(IntegerVariable var) const
Definition: integer.h:1529
void RelaxLinearReason(IntegerValue slack, absl::Span< const IntegerValue > coeffs, std::vector< IntegerLiteral > *reason) const
Definition: integer.cc:826
IntegerValue LowerBound(IntegerVariable i) const
Definition: integer.h:1445
IntegerLiteral UpperBoundAsLiteral(IntegerVariable i) const
Definition: integer.h:1482
bool IsFixedAtLevelZero(IntegerVariable var) const
Definition: integer.h:1539
void RemoveLevelZeroBounds(std::vector< IntegerLiteral > *reason) const
Definition: integer.cc:960
void RegisterReversibleClass(ReversibleInterface *rev)
Definition: integer.h:947
bool ChangeLp(const absl::StrongVector< IntegerVariable, double > &lp_solution, glop::BasisState *solution_state)
void SetObjectiveCoefficient(IntegerVariable var, IntegerValue coeff)
ConstraintIndex Add(LinearConstraint ct, bool *added=nullptr)
const std::vector< ConstraintIndex > & LpConstraints() const
bool AddCut(LinearConstraint ct, std::string type_name, const absl::StrongVector< IntegerVariable, double > &lp_solution, std::string extra_info="")
const absl::StrongVector< ConstraintIndex, ConstraintInfo > & AllConstraints() const
std::function< IntegerLiteral()> HeuristicLpReducedCostBinary(Model *model)
bool IncrementalPropagate(const std::vector< int > &watch_indices) override
std::function< IntegerLiteral()> HeuristicLpMostInfeasibleBinary(Model *model)
void SetObjectiveCoefficient(IntegerVariable ivar, IntegerValue coeff)
Class that owns everything related to a particular optimization model.
Definition: sat/model.h:42
void ConvertToLinearConstraint(const std::vector< IntegerVariable > &integer_variables, IntegerValue upper_bound, LinearConstraint *result)
bool Add(glop::ColIndex col, IntegerValue value)
bool AddLinearExpressionMultiple(IntegerValue multiplier, const std::vector< std::pair< glop::ColIndex, IntegerValue > > &terms)
std::vector< std::pair< glop::ColIndex, IntegerValue > > GetTerms()
void TransferToManager(const absl::StrongVector< IntegerVariable, double > &lp_solution, LinearConstraintManager *manager)
std::vector< Literal > * MutableConflict()
Definition: sat_base.h:364
void ProcessVariables(const std::vector< double > &lp_values, const std::vector< IntegerValue > &lower_bounds, const std::vector< IntegerValue > &upper_bounds)
void AddOneConstraint(glop::RowIndex, const std::vector< std::pair< glop::ColIndex, IntegerValue > > &terms, IntegerValue lb, IntegerValue ub)
std::vector< std::vector< std::pair< glop::RowIndex, IntegerValue > > > InterestingCandidates(ModelRandomGenerator *random)
int64_t b
int64_t a
Block * next
SatParameters parameters
const std::string name
const Constraint * ct
int64_t value
IntVar * var
Definition: expr_array.cc:1874
absl::Status status
Definition: g_gurobi.cc:35
double upper_bound
double lower_bound
GRBmodel * model
int arc
int index
const bool DEBUG_MODE
Definition: macros.h:24
ColIndex col
Definition: markowitz.cc:183
RowIndex row
Definition: markowitz.cc:182
const Collection::value_type::second_type & FindOrDie(const Collection &collection, const typename Collection::value_type::first_type &key)
Definition: map_util.h:206
StrictITIVector< ColIndex, Fractional > DenseRow
Definition: lp_types.h:303
std::string GetProblemStatusString(ProblemStatus problem_status)
Definition: lp_types.cc:19
ColIndex RowToColIndex(RowIndex row)
Definition: lp_types.h:49
RowIndex ColToRowIndex(ColIndex col)
Definition: lp_types.h:52
const double kEpsilon
Definition: lp_types.h:87
StrictITIVector< RowIndex, Fractional > DenseColumn
Definition: lp_types.h:332
IntegerValue FloorRatio(IntegerValue dividend, IntegerValue positive_divisor)
Definition: integer.h:98
CutGenerator CreateCVRPCutGenerator(int num_nodes, const std::vector< int > &tails, const std::vector< int > &heads, const std::vector< Literal > &literals, const std::vector< int64_t > &demands, int64_t capacity, Model *model)
bool AddProductTo(IntegerValue a, IntegerValue b, IntegerValue *result)
Definition: integer.h:121
constexpr IntegerValue kMaxIntegerValue(std::numeric_limits< IntegerValue::ValueType >::max() - 1)
IntType IntTypeAbs(IntType t)
Definition: integer.h:85
constexpr IntegerValue kMinIntegerValue(-kMaxIntegerValue.value())
const IntegerVariable kNoIntegerVariable(-1)
void MakeAllCoefficientsPositive(LinearConstraint *constraint)
IntegerVariable PositiveVariable(IntegerVariable i)
Definition: integer.h:149
std::vector< IntegerVariable > NegationOf(const std::vector< IntegerVariable > &vars)
Definition: integer.cc:47
IntegerValue ComputeInfinityNorm(const LinearConstraint &constraint)
void SeparateSubtourInequalities(int num_nodes, const std::vector< int > &tails, const std::vector< int > &heads, const std::vector< Literal > &literals, const absl::StrongVector< IntegerVariable, double > &lp_values, absl::Span< const int64_t > demands, int64_t capacity, LinearConstraintManager *manager, Model *model)
bool VariableIsPositive(IntegerVariable i)
Definition: integer.h:145
void DivideByGCD(LinearConstraint *constraint)
CutGenerator CreateStronglyConnectedGraphCutGenerator(int num_nodes, const std::vector< int > &tails, const std::vector< int > &heads, const std::vector< Literal > &literals, Model *model)
double ComputeActivity(const LinearConstraint &constraint, const absl::StrongVector< IntegerVariable, double > &values)
double ToDouble(IntegerValue value)
Definition: integer.h:77
Collection of objects used to extend the Constraint Solver library.
int64_t CapAdd(int64_t x, int64_t y)
int64_t CapSub(int64_t x, int64_t y)
std::pair< int64_t, int64_t > Arc
Definition: search.cc:3434
int64_t CapProd(int64_t x, int64_t y)
int64_t demand
Definition: resource.cc:125
int64_t bound
int64_t capacity
int64_t tail
int64_t head
int64_t start
std::vector< IntegerVariable > vars
Definition: cuts.h:48
std::function< bool(const absl::StrongVector< IntegerVariable, double > &lp_values, LinearConstraintManager *manager)> generate_cuts
Definition: cuts.h:52
static IntegerLiteral LowerOrEqual(IntegerVariable i, IntegerValue bound)
Definition: integer.h:1393
static IntegerLiteral GreaterOrEqual(IntegerVariable i, IntegerValue bound)
Definition: integer.h:1387
const double coeff
#define VLOG_IS_ON(verboselevel)
Definition: vlog_is_on.h:44