OR-Tools  9.2
linear_programming_constraint.cc
Go to the documentation of this file.
1// Copyright 2010-2021 Google LLC
2// Licensed under the Apache License, Version 2.0 (the "License");
3// you may not use this file except in compliance with the License.
4// You may obtain a copy of the License at
5//
6// http://www.apache.org/licenses/LICENSE-2.0
7//
8// Unless required by applicable law or agreed to in writing, software
9// distributed under the License is distributed on an "AS IS" BASIS,
10// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11// See the License for the specific language governing permissions and
12// limitations under the License.
13
15
16#include <algorithm>
17#include <cmath>
18#include <cstdint>
19#include <iterator>
20#include <limits>
21#include <string>
22#include <utility>
23#include <vector>
24
25#include "absl/container/flat_hash_map.h"
26#include "absl/numeric/int128.h"
36#include "ortools/glop/status.h"
40#include "ortools/sat/integer.h"
43
44namespace operations_research {
45namespace sat {
46
47using glop::ColIndex;
49using glop::RowIndex;
50
52 if (is_sparse_) {
53 for (const glop::ColIndex col : non_zeros_) {
54 dense_vector_[col] = IntegerValue(0);
55 }
56 dense_vector_.resize(size, IntegerValue(0));
57 } else {
58 dense_vector_.assign(size, IntegerValue(0));
59 }
60 for (const glop::ColIndex col : non_zeros_) {
61 is_zeros_[col] = true;
62 }
63 is_zeros_.resize(size, true);
64 non_zeros_.clear();
65 is_sparse_ = true;
66}
67
68bool ScatteredIntegerVector::Add(glop::ColIndex col, IntegerValue value) {
69 const int64_t add = CapAdd(value.value(), dense_vector_[col].value());
72 return false;
73 dense_vector_[col] = IntegerValue(add);
74 if (is_sparse_ && is_zeros_[col]) {
75 is_zeros_[col] = false;
76 non_zeros_.push_back(col);
77 }
78 return true;
79}
80
82 IntegerValue multiplier,
83 const std::vector<std::pair<glop::ColIndex, IntegerValue>>& terms) {
84 const double threshold = 0.1 * static_cast<double>(dense_vector_.size());
85 if (is_sparse_ && static_cast<double>(terms.size()) < threshold) {
86 for (const std::pair<glop::ColIndex, IntegerValue> term : terms) {
87 if (is_zeros_[term.first]) {
88 is_zeros_[term.first] = false;
89 non_zeros_.push_back(term.first);
90 }
91 if (!AddProductTo(multiplier, term.second, &dense_vector_[term.first])) {
92 return false;
93 }
94 }
95 if (static_cast<double>(non_zeros_.size()) > threshold) {
96 is_sparse_ = false;
97 }
98 } else {
99 is_sparse_ = false;
100 for (const std::pair<glop::ColIndex, IntegerValue> term : terms) {
101 if (!AddProductTo(multiplier, term.second, &dense_vector_[term.first])) {
102 return false;
103 }
104 }
105 }
106 return true;
107}
108
110 const std::vector<IntegerVariable>& integer_variables,
111 IntegerValue upper_bound, LinearConstraint* result) {
112 result->vars.clear();
113 result->coeffs.clear();
114 if (is_sparse_) {
115 std::sort(non_zeros_.begin(), non_zeros_.end());
116 for (const glop::ColIndex col : non_zeros_) {
117 const IntegerValue coeff = dense_vector_[col];
118 if (coeff == 0) continue;
119 result->vars.push_back(integer_variables[col.value()]);
120 result->coeffs.push_back(coeff);
121 }
122 } else {
123 const int size = dense_vector_.size();
124 for (glop::ColIndex col(0); col < size; ++col) {
125 const IntegerValue coeff = dense_vector_[col];
126 if (coeff == 0) continue;
127 result->vars.push_back(integer_variables[col.value()]);
128 result->coeffs.push_back(coeff);
129 }
130 }
131 result->lb = kMinIntegerValue;
132 result->ub = upper_bound;
133}
134
135std::vector<std::pair<glop::ColIndex, IntegerValue>>
137 std::vector<std::pair<glop::ColIndex, IntegerValue>> result;
138 if (is_sparse_) {
139 std::sort(non_zeros_.begin(), non_zeros_.end());
140 for (const glop::ColIndex col : non_zeros_) {
141 const IntegerValue coeff = dense_vector_[col];
142 if (coeff != 0) result.push_back({col, coeff});
143 }
144 } else {
145 const int size = dense_vector_.size();
146 for (glop::ColIndex col(0); col < size; ++col) {
147 const IntegerValue coeff = dense_vector_[col];
148 if (coeff != 0) result.push_back({col, coeff});
149 }
150 }
151 return result;
152}
153
154// TODO(user): make SatParameters singleton too, otherwise changing them after
155// a constraint was added will have no effect on this class.
157 : constraint_manager_(model),
158 parameters_(*(model->GetOrCreate<SatParameters>())),
159 model_(model),
160 time_limit_(model->GetOrCreate<TimeLimit>()),
161 integer_trail_(model->GetOrCreate<IntegerTrail>()),
162 trail_(model->GetOrCreate<Trail>()),
163 integer_encoder_(model->GetOrCreate<IntegerEncoder>()),
164 random_(model->GetOrCreate<ModelRandomGenerator>()),
165 implied_bounds_processor_({}, integer_trail_,
166 model->GetOrCreate<ImpliedBounds>()),
167 dispatcher_(model->GetOrCreate<LinearProgrammingDispatcher>()),
168 expanded_lp_solution_(
170 // Tweak the default parameters to make the solve incremental.
172 parameters.set_use_dual_simplex(true);
173 simplex_.SetParameters(parameters);
174 if (parameters_.use_branching_in_lp() ||
175 parameters_.search_branching() == SatParameters::LP_SEARCH) {
176 compute_reduced_cost_averages_ = true;
177 }
178
179 // Register our local rev int repository.
180 integer_trail_->RegisterReversibleClass(&rc_rev_int_repository_);
181}
182
184 const LinearConstraint& ct) {
185 DCHECK(!lp_constraint_is_registered_);
186 constraint_manager_.Add(ct);
187
188 // We still create the mirror variable right away though.
189 //
190 // TODO(user): clean this up? Note that it is important that the variable
191 // in lp_data_ never changes though, so we can restart from the current
192 // lp solution and be incremental (even if the constraints changed).
193 for (const IntegerVariable var : ct.vars) {
194 GetOrCreateMirrorVariable(PositiveVariable(var));
195 }
196}
197
198glop::ColIndex LinearProgrammingConstraint::GetOrCreateMirrorVariable(
199 IntegerVariable positive_variable) {
200 DCHECK(VariableIsPositive(positive_variable));
201 const auto it = mirror_lp_variable_.find(positive_variable);
202 if (it == mirror_lp_variable_.end()) {
203 const glop::ColIndex col(integer_variables_.size());
204 implied_bounds_processor_.AddLpVariable(positive_variable);
205 mirror_lp_variable_[positive_variable] = col;
206 integer_variables_.push_back(positive_variable);
207 lp_solution_.push_back(std::numeric_limits<double>::infinity());
208 lp_reduced_cost_.push_back(0.0);
209 (*dispatcher_)[positive_variable] = this;
210
211 const int index = std::max(positive_variable.value(),
212 NegationOf(positive_variable).value());
213 if (index >= expanded_lp_solution_.size()) {
214 expanded_lp_solution_.resize(index + 1, 0.0);
215 }
216 return col;
217 }
218 return it->second;
219}
220
222 IntegerValue coeff) {
223 CHECK(!lp_constraint_is_registered_);
224 objective_is_defined_ = true;
225 IntegerVariable pos_var = VariableIsPositive(ivar) ? ivar : NegationOf(ivar);
226 if (ivar != pos_var) coeff = -coeff;
227
228 constraint_manager_.SetObjectiveCoefficient(pos_var, coeff);
229 const glop::ColIndex col = GetOrCreateMirrorVariable(pos_var);
230 integer_objective_.push_back({col, coeff});
231 objective_infinity_norm_ =
232 std::max(objective_infinity_norm_, IntTypeAbs(coeff));
233}
234
235// TODO(user): As the search progress, some variables might get fixed. Exploit
236// this to reduce the number of variables in the LP and in the
237// ConstraintManager? We might also detect during the search that two variable
238// are equivalent.
239//
240// TODO(user): On TSP/VRP with a lot of cuts, this can take 20% of the overall
241// running time. We should be able to almost remove most of this from the
242// profile by being more incremental (modulo LP scaling).
243//
244// TODO(user): A longer term idea for LP with a lot of variables is to not
245// add all variables to each LP solve and do some "sifting". That can be useful
246// for TSP for instance where the number of edges is large, but only a small
247// fraction will be used in the optimal solution.
248bool LinearProgrammingConstraint::CreateLpFromConstraintManager() {
249 // Fill integer_lp_.
250 integer_lp_.clear();
251 infinity_norms_.clear();
252 const auto& all_constraints = constraint_manager_.AllConstraints();
253 for (const auto index : constraint_manager_.LpConstraints()) {
254 const LinearConstraint& ct = all_constraints[index].constraint;
255
256 integer_lp_.push_back(LinearConstraintInternal());
257 LinearConstraintInternal& new_ct = integer_lp_.back();
258 new_ct.lb = ct.lb;
259 new_ct.ub = ct.ub;
260 const int size = ct.vars.size();
261 IntegerValue infinity_norm(0);
262 if (ct.lb > ct.ub) {
263 VLOG(1) << "Trivial infeasible bound in an LP constraint";
264 return false;
265 }
266 if (ct.lb > kMinIntegerValue) {
267 infinity_norm = std::max(infinity_norm, IntTypeAbs(ct.lb));
268 }
269 if (ct.ub < kMaxIntegerValue) {
270 infinity_norm = std::max(infinity_norm, IntTypeAbs(ct.ub));
271 }
272 for (int i = 0; i < size; ++i) {
273 // We only use positive variable inside this class.
274 IntegerVariable var = ct.vars[i];
275 IntegerValue coeff = ct.coeffs[i];
276 if (!VariableIsPositive(var)) {
277 var = NegationOf(var);
278 coeff = -coeff;
279 }
280 infinity_norm = std::max(infinity_norm, IntTypeAbs(coeff));
281 new_ct.terms.push_back({GetOrCreateMirrorVariable(var), coeff});
282 }
283 infinity_norms_.push_back(infinity_norm);
284
285 // Important to keep lp_data_ "clean".
286 std::sort(new_ct.terms.begin(), new_ct.terms.end());
287 }
288
289 // Copy the integer_lp_ into lp_data_.
290 lp_data_.Clear();
291 for (int i = 0; i < integer_variables_.size(); ++i) {
292 CHECK_EQ(glop::ColIndex(i), lp_data_.CreateNewVariable());
293 }
294
295 // We remove fixed variables from the objective. This should help the LP
296 // scaling, but also our integer reason computation.
297 int new_size = 0;
298 objective_infinity_norm_ = 0;
299 for (const auto entry : integer_objective_) {
300 const IntegerVariable var = integer_variables_[entry.first.value()];
301 if (integer_trail_->IsFixedAtLevelZero(var)) {
302 integer_objective_offset_ +=
303 entry.second * integer_trail_->LevelZeroLowerBound(var);
304 continue;
305 }
306 objective_infinity_norm_ =
307 std::max(objective_infinity_norm_, IntTypeAbs(entry.second));
308 integer_objective_[new_size++] = entry;
309 lp_data_.SetObjectiveCoefficient(entry.first, ToDouble(entry.second));
310 }
311 objective_infinity_norm_ =
312 std::max(objective_infinity_norm_, IntTypeAbs(integer_objective_offset_));
313 integer_objective_.resize(new_size);
314 lp_data_.SetObjectiveOffset(ToDouble(integer_objective_offset_));
315
316 for (const LinearConstraintInternal& ct : integer_lp_) {
317 const ConstraintIndex row = lp_data_.CreateNewConstraint();
318 lp_data_.SetConstraintBounds(row, ToDouble(ct.lb), ToDouble(ct.ub));
319 for (const auto& term : ct.terms) {
320 lp_data_.SetCoefficient(row, term.first, ToDouble(term.second));
321 }
322 }
323 lp_data_.NotifyThatColumnsAreClean();
324
325 // We scale the LP using the level zero bounds that we later override
326 // with the current ones.
327 //
328 // TODO(user): As part of the scaling, we may also want to shift the initial
329 // variable bounds so that each variable contain the value zero in their
330 // domain. Maybe just once and for all at the beginning.
331 const int num_vars = integer_variables_.size();
332 for (int i = 0; i < num_vars; i++) {
333 const IntegerVariable cp_var = integer_variables_[i];
334 const double lb = ToDouble(integer_trail_->LevelZeroLowerBound(cp_var));
335 const double ub = ToDouble(integer_trail_->LevelZeroUpperBound(cp_var));
336 lp_data_.SetVariableBounds(glop::ColIndex(i), lb, ub);
337 }
338
339 // TODO(user): As we have an idea of the LP optimal after the first solves,
340 // maybe we can adapt the scaling accordingly.
341 glop::GlopParameters params;
342 params.set_cost_scaling(glop::GlopParameters::MEAN_COST_SCALING);
343 scaler_.Scale(params, &lp_data_);
344 UpdateBoundsOfLpVariables();
345
346 // Set the information for the step to polish the LP basis. All our variables
347 // are integer, but for now, we just try to minimize the fractionality of the
348 // binary variables.
349 if (parameters_.polish_lp_solution()) {
350 simplex_.ClearIntegralityScales();
351 for (int i = 0; i < num_vars; ++i) {
352 const IntegerVariable cp_var = integer_variables_[i];
353 const IntegerValue lb = integer_trail_->LevelZeroLowerBound(cp_var);
354 const IntegerValue ub = integer_trail_->LevelZeroUpperBound(cp_var);
355 if (lb != 0 || ub != 1) continue;
356 simplex_.SetIntegralityScale(
357 glop::ColIndex(i),
358 1.0 / scaler_.VariableScalingFactor(glop::ColIndex(i)));
359 }
360 }
361
362 lp_data_.NotifyThatColumnsAreClean();
363 VLOG(1) << "LP relaxation: " << lp_data_.GetDimensionString() << ". "
364 << constraint_manager_.AllConstraints().size()
365 << " Managed constraints.";
366 return true;
367}
368
369LPSolveInfo LinearProgrammingConstraint::SolveLpForBranching() {
370 LPSolveInfo info;
371 glop::BasisState basis_state = simplex_.GetState();
372
373 const glop::Status status = simplex_.Solve(lp_data_, time_limit_);
374 total_num_simplex_iterations_ += simplex_.GetNumberOfIterations();
375 simplex_.LoadStateForNextSolve(basis_state);
376 if (!status.ok()) {
377 VLOG(1) << "The LP solver encountered an error: " << status.error_message();
378 info.status = glop::ProblemStatus::ABNORMAL;
379 return info;
380 }
381 info.status = simplex_.GetProblemStatus();
382 if (info.status == glop::ProblemStatus::OPTIMAL ||
383 info.status == glop::ProblemStatus::DUAL_FEASIBLE) {
384 // Record the objective bound.
385 info.lp_objective = simplex_.GetObjectiveValue();
386 info.new_obj_bound = IntegerValue(
387 static_cast<int64_t>(std::ceil(info.lp_objective - kCpEpsilon)));
388 }
389 return info;
390}
391
392void LinearProgrammingConstraint::FillReducedCostReasonIn(
393 const glop::DenseRow& reduced_costs,
394 std::vector<IntegerLiteral>* integer_reason) {
395 integer_reason->clear();
396 const int num_vars = integer_variables_.size();
397 for (int i = 0; i < num_vars; i++) {
398 const double rc = reduced_costs[glop::ColIndex(i)];
399 if (rc > kLpEpsilon) {
400 integer_reason->push_back(
401 integer_trail_->LowerBoundAsLiteral(integer_variables_[i]));
402 } else if (rc < -kLpEpsilon) {
403 integer_reason->push_back(
404 integer_trail_->UpperBoundAsLiteral(integer_variables_[i]));
405 }
406 }
407
408 integer_trail_->RemoveLevelZeroBounds(integer_reason);
409}
410
411bool LinearProgrammingConstraint::BranchOnVar(IntegerVariable positive_var) {
412 // From the current LP solution, branch on the given var if fractional.
413 DCHECK(lp_solution_is_set_);
414 const double current_value = GetSolutionValue(positive_var);
415 DCHECK_GT(std::abs(current_value - std::round(current_value)), kCpEpsilon);
416
417 // Used as empty reason in this method.
418 integer_reason_.clear();
419
420 bool deductions_were_made = false;
421
422 UpdateBoundsOfLpVariables();
423
424 const IntegerValue current_obj_lb = integer_trail_->LowerBound(objective_cp_);
425 // This will try to branch in both direction around the LP value of the
426 // given variable and push any deduction done this way.
427
428 const glop::ColIndex lp_var = GetOrCreateMirrorVariable(positive_var);
429 const double current_lb = ToDouble(integer_trail_->LowerBound(positive_var));
430 const double current_ub = ToDouble(integer_trail_->UpperBound(positive_var));
431 const double factor = scaler_.VariableScalingFactor(lp_var);
432 if (current_value < current_lb || current_value > current_ub) {
433 return false;
434 }
435
436 // Form LP1 var <= floor(current_value)
437 const double new_ub = std::floor(current_value);
438 lp_data_.SetVariableBounds(lp_var, current_lb * factor, new_ub * factor);
439
440 LPSolveInfo lower_branch_info = SolveLpForBranching();
441 if (lower_branch_info.status != glop::ProblemStatus::OPTIMAL &&
442 lower_branch_info.status != glop::ProblemStatus::DUAL_FEASIBLE &&
443 lower_branch_info.status != glop::ProblemStatus::DUAL_UNBOUNDED) {
444 return false;
445 }
446
447 if (lower_branch_info.status == glop::ProblemStatus::DUAL_UNBOUNDED) {
448 // Push the other branch.
449 const IntegerLiteral deduction = IntegerLiteral::GreaterOrEqual(
450 positive_var, IntegerValue(std::ceil(current_value)));
451 if (!integer_trail_->Enqueue(deduction, {}, integer_reason_)) {
452 return false;
453 }
454 deductions_were_made = true;
455 } else if (lower_branch_info.new_obj_bound <= current_obj_lb) {
456 return false;
457 }
458
459 // Form LP2 var >= ceil(current_value)
460 const double new_lb = std::ceil(current_value);
461 lp_data_.SetVariableBounds(lp_var, new_lb * factor, current_ub * factor);
462
463 LPSolveInfo upper_branch_info = SolveLpForBranching();
464 if (upper_branch_info.status != glop::ProblemStatus::OPTIMAL &&
465 upper_branch_info.status != glop::ProblemStatus::DUAL_FEASIBLE &&
466 upper_branch_info.status != glop::ProblemStatus::DUAL_UNBOUNDED) {
467 return deductions_were_made;
468 }
469
470 if (upper_branch_info.status == glop::ProblemStatus::DUAL_UNBOUNDED) {
471 // Push the other branch if not infeasible.
472 if (lower_branch_info.status != glop::ProblemStatus::DUAL_UNBOUNDED) {
473 const IntegerLiteral deduction = IntegerLiteral::LowerOrEqual(
474 positive_var, IntegerValue(std::floor(current_value)));
475 if (!integer_trail_->Enqueue(deduction, {}, integer_reason_)) {
476 return deductions_were_made;
477 }
478 deductions_were_made = true;
479 }
480 } else if (upper_branch_info.new_obj_bound <= current_obj_lb) {
481 return deductions_were_made;
482 }
483
484 IntegerValue approximate_obj_lb = kMinIntegerValue;
485
486 if (lower_branch_info.status == glop::ProblemStatus::DUAL_UNBOUNDED &&
487 upper_branch_info.status == glop::ProblemStatus::DUAL_UNBOUNDED) {
488 return integer_trail_->ReportConflict(integer_reason_);
489 } else if (lower_branch_info.status == glop::ProblemStatus::DUAL_UNBOUNDED) {
490 approximate_obj_lb = upper_branch_info.new_obj_bound;
491 } else if (upper_branch_info.status == glop::ProblemStatus::DUAL_UNBOUNDED) {
492 approximate_obj_lb = lower_branch_info.new_obj_bound;
493 } else {
494 approximate_obj_lb = std::min(lower_branch_info.new_obj_bound,
495 upper_branch_info.new_obj_bound);
496 }
497
498 // NOTE: On some problems, the approximate_obj_lb could be inexact which add
499 // some tolerance to CP-SAT where currently there is none.
500 if (approximate_obj_lb <= current_obj_lb) return deductions_were_made;
501
502 // Push the bound to the trail.
503 const IntegerLiteral deduction =
504 IntegerLiteral::GreaterOrEqual(objective_cp_, approximate_obj_lb);
505 if (!integer_trail_->Enqueue(deduction, {}, integer_reason_)) {
506 return deductions_were_made;
507 }
508
509 return true;
510}
511
513 DCHECK(!lp_constraint_is_registered_);
514 lp_constraint_is_registered_ = true;
515 model->GetOrCreate<LinearProgrammingConstraintCollection>()->push_back(this);
516
517 // Note fdid, this is not really needed by should lead to better cache
518 // locality.
519 std::sort(integer_objective_.begin(), integer_objective_.end());
520
521 // Set the LP to its initial content.
522 if (!parameters_.add_lp_constraints_lazily()) {
523 constraint_manager_.AddAllConstraintsToLp();
524 }
525 if (!CreateLpFromConstraintManager()) {
526 model->GetOrCreate<SatSolver>()->NotifyThatModelIsUnsat();
527 return;
528 }
529
530 GenericLiteralWatcher* watcher = model->GetOrCreate<GenericLiteralWatcher>();
531 const int watcher_id = watcher->Register(this);
532 const int num_vars = integer_variables_.size();
533 for (int i = 0; i < num_vars; i++) {
534 watcher->WatchIntegerVariable(integer_variables_[i], watcher_id, i);
535 }
536 if (objective_is_defined_) {
537 watcher->WatchUpperBound(objective_cp_, watcher_id);
538 }
539 watcher->SetPropagatorPriority(watcher_id, 2);
540 watcher->AlwaysCallAtLevelZero(watcher_id);
541
542 // Registering it with the trail make sure this class is always in sync when
543 // it is used in the decision heuristics.
544 integer_trail_->RegisterReversibleClass(this);
545 watcher->RegisterReversibleInt(watcher_id, &rev_optimal_constraints_size_);
546}
547
549 optimal_constraints_.resize(rev_optimal_constraints_size_);
550 if (lp_solution_is_set_ && level < lp_solution_level_) {
551 lp_solution_is_set_ = false;
552 }
553
554 // Special case for level zero, we "reload" any previously known optimal
555 // solution from that level.
556 //
557 // TODO(user): Keep all optimal solution in the current branch?
558 // TODO(user): Still try to add cuts/constraints though!
559 if (level == 0 && !level_zero_lp_solution_.empty()) {
560 lp_solution_is_set_ = true;
561 lp_solution_ = level_zero_lp_solution_;
562 lp_solution_level_ = 0;
563 for (int i = 0; i < lp_solution_.size(); i++) {
564 expanded_lp_solution_[integer_variables_[i]] = lp_solution_[i];
565 expanded_lp_solution_[NegationOf(integer_variables_[i])] =
566 -lp_solution_[i];
567 }
568 }
569}
570
572 for (const IntegerVariable var : generator.vars) {
573 GetOrCreateMirrorVariable(VariableIsPositive(var) ? var : NegationOf(var));
574 }
575 cut_generators_.push_back(std::move(generator));
576}
577
579 const std::vector<int>& watch_indices) {
580 if (!lp_solution_is_set_) return Propagate();
581
582 // At level zero, if there is still a chance to add cuts or lazy constraints,
583 // we re-run the LP.
584 if (trail_->CurrentDecisionLevel() == 0 && !lp_at_level_zero_is_final_) {
585 return Propagate();
586 }
587
588 // Check whether the change breaks the current LP solution. If it does, call
589 // Propagate() on the current LP.
590 for (const int index : watch_indices) {
591 const double lb =
592 ToDouble(integer_trail_->LowerBound(integer_variables_[index]));
593 const double ub =
594 ToDouble(integer_trail_->UpperBound(integer_variables_[index]));
595 const double value = lp_solution_[index];
596 if (value < lb - kCpEpsilon || value > ub + kCpEpsilon) return Propagate();
597 }
598
599 // TODO(user): The saved lp solution is still valid given the current variable
600 // bounds, so the LP optimal didn't change. However we might still want to add
601 // new cuts or new lazy constraints?
602 //
603 // TODO(user): Propagate the last optimal_constraint? Note that we need
604 // to be careful since the reversible int in IntegerSumLE are not registered.
605 // However, because we delete "optimalconstraints" on backtrack, we might not
606 // care.
607 return true;
608}
609
610glop::Fractional LinearProgrammingConstraint::GetVariableValueAtCpScale(
611 glop::ColIndex var) {
612 return scaler_.UnscaleVariableValue(var, simplex_.GetVariableValue(var));
613}
614
616 IntegerVariable variable) const {
617 return lp_solution_[gtl::FindOrDie(mirror_lp_variable_, variable).value()];
618}
619
621 IntegerVariable variable) const {
622 return lp_reduced_cost_[gtl::FindOrDie(mirror_lp_variable_, variable)
623 .value()];
624}
625
626void LinearProgrammingConstraint::UpdateBoundsOfLpVariables() {
627 const int num_vars = integer_variables_.size();
628 for (int i = 0; i < num_vars; i++) {
629 const IntegerVariable cp_var = integer_variables_[i];
630 const double lb = ToDouble(integer_trail_->LowerBound(cp_var));
631 const double ub = ToDouble(integer_trail_->UpperBound(cp_var));
632 const double factor = scaler_.VariableScalingFactor(glop::ColIndex(i));
633 lp_data_.SetVariableBounds(glop::ColIndex(i), lb * factor, ub * factor);
634 }
635}
636
637bool LinearProgrammingConstraint::SolveLp() {
638 if (trail_->CurrentDecisionLevel() == 0) {
639 lp_at_level_zero_is_final_ = false;
640 }
641
642 const auto status = simplex_.Solve(lp_data_, time_limit_);
643 total_num_simplex_iterations_ += simplex_.GetNumberOfIterations();
644 if (!status.ok()) {
645 VLOG(1) << "The LP solver encountered an error: " << status.error_message();
646 simplex_.ClearStateForNextSolve();
647 return false;
648 }
649 average_degeneracy_.AddData(CalculateDegeneracy());
650 if (average_degeneracy_.CurrentAverage() >= 1000.0) {
651 VLOG(2) << "High average degeneracy: "
652 << average_degeneracy_.CurrentAverage();
653 }
654
655 const int status_as_int = static_cast<int>(simplex_.GetProblemStatus());
656 if (status_as_int >= num_solves_by_status_.size()) {
657 num_solves_by_status_.resize(status_as_int + 1);
658 }
659 num_solves_++;
660 num_solves_by_status_[status_as_int]++;
661 VLOG(2) << "lvl:" << trail_->CurrentDecisionLevel() << " "
662 << simplex_.GetProblemStatus()
663 << " iter:" << simplex_.GetNumberOfIterations()
664 << " obj:" << simplex_.GetObjectiveValue();
665
667 lp_solution_is_set_ = true;
668 lp_solution_level_ = trail_->CurrentDecisionLevel();
669 const int num_vars = integer_variables_.size();
670 for (int i = 0; i < num_vars; i++) {
671 const glop::Fractional value =
672 GetVariableValueAtCpScale(glop::ColIndex(i));
673 lp_solution_[i] = value;
674 expanded_lp_solution_[integer_variables_[i]] = value;
675 expanded_lp_solution_[NegationOf(integer_variables_[i])] = -value;
676 }
677
678 if (lp_solution_level_ == 0) {
679 level_zero_lp_solution_ = lp_solution_;
680 }
681 }
682 return true;
683}
684
685bool LinearProgrammingConstraint::AddCutFromConstraints(
686 const std::string& name,
687 const std::vector<std::pair<RowIndex, IntegerValue>>& integer_multipliers) {
688 // This is initialized to a valid linear constraint (by taking linear
689 // combination of the LP rows) and will be transformed into a cut if
690 // possible.
691 //
692 // TODO(user): For CG cuts, Ideally this linear combination should have only
693 // one fractional variable (basis_col). But because of imprecision, we get a
694 // bunch of fractional entry with small coefficient (relative to the one of
695 // basis_col). We try to handle that in IntegerRoundingCut(), but it might be
696 // better to add small multiple of the involved rows to get rid of them.
697 IntegerValue cut_ub;
698 if (!ComputeNewLinearConstraint(integer_multipliers, &tmp_scattered_vector_,
699 &cut_ub)) {
700 VLOG(1) << "Issue, overflow!";
701 return false;
702 }
703
704 // Important: because we use integer_multipliers below, we cannot just
705 // divide by GCD or call PreventOverflow() here.
706 //
707 // TODO(user): the conversion col_index -> IntegerVariable is slow and could
708 // in principle be removed. Easy for cuts, but not so much for
709 // implied_bounds_processor_. Note that in theory this could allow us to
710 // use Literal directly without the need to have an IntegerVariable for them.
711 tmp_scattered_vector_.ConvertToLinearConstraint(integer_variables_, cut_ub,
712 &cut_);
713
714 // Note that the base constraint we use are currently always tight.
715 // It is not a requirement though.
716 if (DEBUG_MODE) {
717 const double norm = ToDouble(ComputeInfinityNorm(cut_));
718 const double activity = ComputeActivity(cut_, expanded_lp_solution_);
719 if (std::abs(activity - ToDouble(cut_.ub)) / norm > 1e-4) {
720 VLOG(1) << "Cut not tight " << activity << " <= " << ToDouble(cut_.ub);
721 return false;
722 }
723 }
724 CHECK(constraint_manager_.DebugCheckConstraint(cut_));
725
726 // We will create "artificial" variables after this index that will be
727 // substitued back into LP variables afterwards. Also not that we only use
728 // positive variable indices for these new variables, so that algorithm that
729 // take their negation will not mess up the indexing.
730 const IntegerVariable first_new_var(expanded_lp_solution_.size());
731 CHECK_EQ(first_new_var.value() % 2, 0);
732
733 LinearConstraint copy_in_debug;
734 if (DEBUG_MODE) {
735 copy_in_debug = cut_;
736 }
737
738 // Unlike for the knapsack cuts, it might not be always beneficial to
739 // process the implied bounds even though it seems to be better in average.
740 //
741 // TODO(user): Perform more experiments, in particular with which bound we use
742 // and if we complement or not before the MIR rounding. Other solvers seems
743 // to try different complementation strategies in a "potprocessing" and we
744 // don't. Try this too.
745 tmp_ib_slack_infos_.clear();
746 implied_bounds_processor_.ProcessUpperBoundedConstraintWithSlackCreation(
747 /*substitute_only_inner_variables=*/false, first_new_var,
748 expanded_lp_solution_, &cut_, &tmp_ib_slack_infos_);
749 DCHECK(implied_bounds_processor_.DebugSlack(first_new_var, copy_in_debug,
750 cut_, tmp_ib_slack_infos_));
751
752 // Fills data for IntegerRoundingCut().
753 //
754 // Note(user): we use the current bound here, so the reasonement will only
755 // produce locally valid cut if we call this at a non-root node. We could
756 // use the level zero bounds if we wanted to generate a globally valid cut
757 // at another level. For now this is only called at level zero anyway.
758 tmp_lp_values_.clear();
759 tmp_var_lbs_.clear();
760 tmp_var_ubs_.clear();
761 for (const IntegerVariable var : cut_.vars) {
762 if (var >= first_new_var) {
764 const auto& info =
765 tmp_ib_slack_infos_[(var.value() - first_new_var.value()) / 2];
766 tmp_lp_values_.push_back(info.lp_value);
767 tmp_var_lbs_.push_back(info.lb);
768 tmp_var_ubs_.push_back(info.ub);
769 } else {
770 tmp_lp_values_.push_back(expanded_lp_solution_[var]);
771 tmp_var_lbs_.push_back(integer_trail_->LevelZeroLowerBound(var));
772 tmp_var_ubs_.push_back(integer_trail_->LevelZeroUpperBound(var));
773 }
774 }
775
776 // Add slack.
777 // definition: integer_lp_[row] + slack_row == bound;
778 const IntegerVariable first_slack(
779 first_new_var + IntegerVariable(2 * tmp_ib_slack_infos_.size()));
780 tmp_slack_rows_.clear();
781 tmp_slack_bounds_.clear();
782 for (const auto pair : integer_multipliers) {
783 const RowIndex row = pair.first;
784 const IntegerValue coeff = pair.second;
785 const auto status = simplex_.GetConstraintStatus(row);
787
788 tmp_lp_values_.push_back(0.0);
789 cut_.vars.push_back(first_slack +
790 2 * IntegerVariable(tmp_slack_rows_.size()));
791 tmp_slack_rows_.push_back(row);
792 cut_.coeffs.push_back(coeff);
793
794 const IntegerValue diff(
795 CapSub(integer_lp_[row].ub.value(), integer_lp_[row].lb.value()));
796 if (coeff > 0) {
797 tmp_slack_bounds_.push_back(integer_lp_[row].ub);
798 tmp_var_lbs_.push_back(IntegerValue(0));
799 tmp_var_ubs_.push_back(diff);
800 } else {
801 tmp_slack_bounds_.push_back(integer_lp_[row].lb);
802 tmp_var_lbs_.push_back(-diff);
803 tmp_var_ubs_.push_back(IntegerValue(0));
804 }
805 }
806
807 bool at_least_one_added = false;
808
809 // Try cover approach to find cut.
810 {
811 if (cover_cut_helper_.TrySimpleKnapsack(cut_, tmp_lp_values_, tmp_var_lbs_,
812 tmp_var_ubs_)) {
813 at_least_one_added |= PostprocessAndAddCut(
814 absl::StrCat(name, "_K"), cover_cut_helper_.Info(), first_new_var,
815 first_slack, tmp_ib_slack_infos_, cover_cut_helper_.mutable_cut());
816 }
817 }
818
819 // Try integer rounding heuristic to find cut.
820 {
821 RoundingOptions options;
822 options.max_scaling = parameters_.max_integer_rounding_scaling();
823 integer_rounding_cut_helper_.ComputeCut(options, tmp_lp_values_,
824 tmp_var_lbs_, tmp_var_ubs_,
825 &implied_bounds_processor_, &cut_);
826 at_least_one_added |= PostprocessAndAddCut(
827 name,
828 absl::StrCat("num_lifted_booleans=",
829 integer_rounding_cut_helper_.NumLiftedBooleans()),
830 first_new_var, first_slack, tmp_ib_slack_infos_, &cut_);
831 }
832 return at_least_one_added;
833}
834
835bool LinearProgrammingConstraint::PostprocessAndAddCut(
836 const std::string& name, const std::string& info,
837 IntegerVariable first_new_var, IntegerVariable first_slack,
838 const std::vector<ImpliedBoundsProcessor::SlackInfo>& ib_slack_infos,
839 LinearConstraint* cut) {
840 // Compute the activity. Warning: the cut no longer have the same size so we
841 // cannot use tmp_lp_values_. Note that the substitution below shouldn't
842 // change the activity by definition.
843 double activity = 0.0;
844 for (int i = 0; i < cut->vars.size(); ++i) {
845 if (cut->vars[i] < first_new_var) {
846 activity +=
847 ToDouble(cut->coeffs[i]) * expanded_lp_solution_[cut->vars[i]];
848 }
849 }
850 const double kMinViolation = 1e-4;
851 const double violation = activity - ToDouble(cut->ub);
852 if (violation < kMinViolation) {
853 VLOG(3) << "Bad cut " << activity << " <= " << ToDouble(cut->ub);
854 return false;
855 }
856
857 // Substitute any slack left.
858 {
859 int num_slack = 0;
860 tmp_scattered_vector_.ClearAndResize(integer_variables_.size());
861 IntegerValue cut_ub = cut->ub;
862 bool overflow = false;
863 for (int i = 0; i < cut->vars.size(); ++i) {
864 const IntegerVariable var = cut->vars[i];
865
866 // Simple copy for non-slack variables.
867 if (var < first_new_var) {
868 const glop::ColIndex col =
869 gtl::FindOrDie(mirror_lp_variable_, PositiveVariable(var));
870 if (VariableIsPositive(var)) {
871 tmp_scattered_vector_.Add(col, cut->coeffs[i]);
872 } else {
873 tmp_scattered_vector_.Add(col, -cut->coeffs[i]);
874 }
875 continue;
876 }
877
878 // Replace slack from bound substitution.
879 if (var < first_slack) {
880 const IntegerValue multiplier = cut->coeffs[i];
881 const int index = (var.value() - first_new_var.value()) / 2;
882 CHECK_LT(index, ib_slack_infos.size());
883
884 tmp_terms_.clear();
885 for (const std::pair<IntegerVariable, IntegerValue>& term :
886 ib_slack_infos[index].terms) {
887 tmp_terms_.push_back(
888 {gtl::FindOrDie(mirror_lp_variable_,
889 PositiveVariable(term.first)),
890 VariableIsPositive(term.first) ? term.second : -term.second});
891 }
892 if (!tmp_scattered_vector_.AddLinearExpressionMultiple(multiplier,
893 tmp_terms_)) {
894 overflow = true;
895 break;
896 }
897 if (!AddProductTo(multiplier, -ib_slack_infos[index].offset, &cut_ub)) {
898 overflow = true;
899 break;
900 }
901 continue;
902 }
903
904 // Replace slack from LP constraints.
905 ++num_slack;
906 const int slack_index = (var.value() - first_slack.value()) / 2;
907 const glop::RowIndex row = tmp_slack_rows_[slack_index];
908 const IntegerValue multiplier = -cut->coeffs[i];
909 if (!tmp_scattered_vector_.AddLinearExpressionMultiple(
910 multiplier, integer_lp_[row].terms)) {
911 overflow = true;
912 break;
913 }
914
915 // Update rhs.
916 if (!AddProductTo(multiplier, tmp_slack_bounds_[slack_index], &cut_ub)) {
917 overflow = true;
918 break;
919 }
920 }
921
922 if (overflow) {
923 VLOG(1) << "Overflow in slack removal.";
924 return false;
925 }
926
927 VLOG(3) << " num_slack: " << num_slack;
928 tmp_scattered_vector_.ConvertToLinearConstraint(integer_variables_, cut_ub,
929 cut);
930 }
931
932 // Display some stats used for investigation of cut generation.
933 const std::string extra_info =
934 absl::StrCat(info, " num_ib_substitutions=", ib_slack_infos.size());
935
936 const double new_violation =
937 ComputeActivity(*cut, expanded_lp_solution_) - ToDouble(cut_.ub);
938 if (std::abs(violation - new_violation) >= 1e-4) {
939 VLOG(1) << "Violation discrepancy after slack removal. "
940 << " before = " << violation << " after = " << new_violation;
941 }
942
943 DivideByGCD(cut);
944 return constraint_manager_.AddCut(*cut, name, expanded_lp_solution_,
945 extra_info);
946}
947
948// TODO(user): This can be still too slow on some problems like
949// 30_70_45_05_100.mps.gz. Not this actual function, but the set of computation
950// it triggers. We should add heuristics to abort earlier if a cut is not
951// promising. Or only test a few positions and not all rows.
952void LinearProgrammingConstraint::AddCGCuts() {
953 const RowIndex num_rows = lp_data_.num_constraints();
954 for (RowIndex row(0); row < num_rows; ++row) {
955 ColIndex basis_col = simplex_.GetBasis(row);
956 const Fractional lp_value = GetVariableValueAtCpScale(basis_col);
957
958 // Only consider fractional basis element. We ignore element that are close
959 // to an integer to reduce the amount of positions we try.
960 //
961 // TODO(user): We could just look at the diff with std::floor() in the hope
962 // that when we are just under an integer, the exact computation below will
963 // also be just under it.
964 if (std::abs(lp_value - std::round(lp_value)) < 0.01) continue;
965
966 // If this variable is a slack, we ignore it. This is because the
967 // corresponding row is not tight under the given lp values.
968 if (basis_col >= integer_variables_.size()) continue;
969
970 if (time_limit_->LimitReached()) break;
971
972 // TODO(user): Avoid code duplication between the sparse/dense path.
973 double magnitude = 0.0;
974 tmp_lp_multipliers_.clear();
975 const glop::ScatteredRow& lambda = simplex_.GetUnitRowLeftInverse(row);
976 if (lambda.non_zeros.empty()) {
977 for (RowIndex row(0); row < num_rows; ++row) {
978 const double value = lambda.values[glop::RowToColIndex(row)];
979 if (std::abs(value) < kZeroTolerance) continue;
980
981 // There should be no BASIC status, but they could be imprecision
982 // in the GetUnitRowLeftInverse() code? not sure, so better be safe.
983 const auto status = simplex_.GetConstraintStatus(row);
985 VLOG(1) << "BASIC row not expected! " << value;
986 continue;
987 }
988
989 magnitude = std::max(magnitude, std::abs(value));
990 tmp_lp_multipliers_.push_back({row, value});
991 }
992 } else {
993 for (const ColIndex col : lambda.non_zeros) {
994 const RowIndex row = glop::ColToRowIndex(col);
995 const double value = lambda.values[col];
996 if (std::abs(value) < kZeroTolerance) continue;
997
998 const auto status = simplex_.GetConstraintStatus(row);
1000 VLOG(1) << "BASIC row not expected! " << value;
1001 continue;
1002 }
1003
1004 magnitude = std::max(magnitude, std::abs(value));
1005 tmp_lp_multipliers_.push_back({row, value});
1006 }
1007 }
1008 if (tmp_lp_multipliers_.empty()) continue;
1009
1010 Fractional scaling;
1011 for (int i = 0; i < 2; ++i) {
1012 if (i == 1) {
1013 // Try other sign.
1014 //
1015 // TODO(user): Maybe add an heuristic to know beforehand which sign to
1016 // use?
1017 for (std::pair<RowIndex, double>& p : tmp_lp_multipliers_) {
1018 p.second = -p.second;
1019 }
1020 }
1021
1022 // TODO(user): We use a lower value here otherwise we might run into
1023 // overflow while computing the cut. This should be fixable.
1024 tmp_integer_multipliers_ =
1025 ScaleLpMultiplier(/*take_objective_into_account=*/false,
1026 tmp_lp_multipliers_, &scaling, /*max_pow=*/52);
1027 AddCutFromConstraints("CG", tmp_integer_multipliers_);
1028 }
1029 }
1030}
1031
1032namespace {
1033
1034// For each element of a, adds a random one in b and append the pair to output.
1035void RandomPick(const std::vector<RowIndex>& a, const std::vector<RowIndex>& b,
1036 ModelRandomGenerator* random,
1037 std::vector<std::pair<RowIndex, RowIndex>>* output) {
1038 if (a.empty() || b.empty()) return;
1039 for (const RowIndex row : a) {
1040 const RowIndex other = b[absl::Uniform<int>(*random, 0, b.size())];
1041 if (other != row) {
1042 output->push_back({row, other});
1043 }
1044 }
1045}
1046
1047template <class ListOfTerms>
1048IntegerValue GetCoeff(ColIndex col, const ListOfTerms& terms) {
1049 for (const auto& term : terms) {
1050 if (term.first == col) return term.second;
1051 }
1052 return IntegerValue(0);
1053}
1054
1055} // namespace
1056
1057// Because we know the objective is integer, the constraint objective >= lb can
1058// sometime cut the current lp optimal, and it can make a big difference to add
1059// it. Or at least use it when constructing more advanced cuts. See
1060// 'multisetcover_batch_0_case_115_instance_0_small_subset_elements_3_sumreqs
1061// _1295_candidates_41.fzn'
1062//
1063// TODO(user): It might be better to just integrate this with the MIR code so
1064// that we not only consider MIR1 involving the objective but we also consider
1065// combining it with other constraints.
1066void LinearProgrammingConstraint::AddObjectiveCut() {
1067 if (integer_objective_.size() <= 1) return;
1068
1069 // We only try to add such cut if the LB objective is "far" from the current
1070 // objective lower bound. Note that this is in term of the "internal" integer
1071 // objective.
1072 const double obj_lp_value = simplex_.GetObjectiveValue();
1073 const IntegerValue obj_lower_bound =
1074 integer_trail_->LevelZeroLowerBound(objective_cp_);
1075 if (obj_lp_value + 1.0 >= ToDouble(obj_lower_bound)) return;
1076
1077 tmp_lp_values_.clear();
1078 tmp_var_lbs_.clear();
1079 tmp_var_ubs_.clear();
1080
1081 // We negate everything to have a <= base constraint.
1082 LinearConstraint objective_ct;
1083 objective_ct.lb = kMinIntegerValue;
1084 objective_ct.ub = integer_objective_offset_ -
1085 integer_trail_->LevelZeroLowerBound(objective_cp_);
1086 IntegerValue obj_coeff_magnitude(0);
1087 for (const auto& [col, coeff] : integer_objective_) {
1088 const IntegerVariable var = integer_variables_[col.value()];
1089 objective_ct.vars.push_back(var);
1090 tmp_lp_values_.push_back(expanded_lp_solution_[var]);
1091 tmp_var_lbs_.push_back(integer_trail_->LevelZeroLowerBound(var));
1092 tmp_var_ubs_.push_back(integer_trail_->LevelZeroUpperBound(var));
1093 objective_ct.coeffs.push_back(-coeff);
1094 obj_coeff_magnitude = std::max(obj_coeff_magnitude, IntTypeAbs(coeff));
1095 }
1096
1097 // If the magnitude is small enough, just try to add the full objective. Other
1098 // cuts will be derived in subsequent passes. Otherwise, try normal cut
1099 // heuristic that should result in a cut with reasonable coefficients.
1100 if (obj_coeff_magnitude < 1e9) {
1101 const bool added = constraint_manager_.AddCut(objective_ct, "Objective",
1102 expanded_lp_solution_);
1103 if (added) return;
1104 }
1105
1106 // Try knapsack.
1107 {
1108 cut_ = objective_ct;
1109 if (cover_cut_helper_.TrySimpleKnapsack(cut_, tmp_lp_values_, tmp_var_lbs_,
1110 tmp_var_ubs_)) {
1111 constraint_manager_.AddCut(cut_, "Objective_K", expanded_lp_solution_);
1112 }
1113 }
1114
1115 // Try MIR1.
1116 {
1117 cut_ = objective_ct;
1118 RoundingOptions options;
1119 options.max_scaling = parameters_.max_integer_rounding_scaling();
1120 integer_rounding_cut_helper_.ComputeCut(options, tmp_lp_values_,
1121 tmp_var_lbs_, tmp_var_ubs_,
1122 &implied_bounds_processor_, &cut_);
1123
1124 // Note that the cut will not be added if it is not good enough.
1125 constraint_manager_.AddCut(cut_, "Objective_MIR", expanded_lp_solution_);
1126 }
1127}
1128
1129void LinearProgrammingConstraint::AddMirCuts() {
1130 // Heuristic to generate MIR_n cuts by combining a small number of rows. This
1131 // works greedily and follow more or less the MIR cut description in the
1132 // literature. We have a current cut, and we add one more row to it while
1133 // eliminating a variable of the current cut whose LP value is far from its
1134 // bound.
1135 //
1136 // A notable difference is that we randomize the variable we eliminate and
1137 // the row we use to do so. We still have weights to indicate our preferred
1138 // choices. This allows to generate different cuts when called again and
1139 // again.
1140 //
1141 // TODO(user): We could combine n rows to make sure we eliminate n variables
1142 // far away from their bounds by solving exactly in integer small linear
1143 // system.
1145 integer_variables_.size(), IntegerValue(0));
1146 SparseBitset<ColIndex> non_zeros_(ColIndex(integer_variables_.size()));
1147
1148 // We compute all the rows that are tight, these will be used as the base row
1149 // for the MIR_n procedure below.
1150 const RowIndex num_rows = lp_data_.num_constraints();
1151 std::vector<std::pair<RowIndex, IntegerValue>> base_rows;
1152 absl::StrongVector<RowIndex, double> row_weights(num_rows.value(), 0.0);
1153 for (RowIndex row(0); row < num_rows; ++row) {
1154 const auto status = simplex_.GetConstraintStatus(row);
1155 if (status == glop::ConstraintStatus::BASIC) continue;
1156 if (status == glop::ConstraintStatus::FREE) continue;
1157
1160 base_rows.push_back({row, IntegerValue(1)});
1161 }
1164 base_rows.push_back({row, IntegerValue(-1)});
1165 }
1166
1167 // For now, we use the dual values for the row "weights".
1168 //
1169 // Note that we use the dual at LP scale so that it make more sense when we
1170 // compare different rows since the LP has been scaled.
1171 //
1172 // TODO(user): In Kati Wolter PhD "Implementation of Cutting Plane
1173 // Separators for Mixed Integer Programs" which describe SCIP's MIR cuts
1174 // implementation (or at least an early version of it), a more complex score
1175 // is used.
1176 //
1177 // Note(user): Because we only consider tight rows under the current lp
1178 // solution (i.e. non-basic rows), most should have a non-zero dual values.
1179 // But there is some degenerate problem where these rows have a really low
1180 // weight (or even zero), and having only weight of exactly zero in
1181 // std::discrete_distribution will result in a crash.
1182 row_weights[row] = std::max(1e-8, std::abs(simplex_.GetDualValue(row)));
1183 }
1184
1185 std::vector<double> weights;
1187 std::vector<std::pair<RowIndex, IntegerValue>> integer_multipliers;
1188 for (const std::pair<RowIndex, IntegerValue>& entry : base_rows) {
1189 if (time_limit_->LimitReached()) break;
1190
1191 // First try to generate a cut directly from this base row (MIR1).
1192 //
1193 // Note(user): We abort on success like it seems to be done in the
1194 // literature. Note that we don't succeed that often in generating an
1195 // efficient cut, so I am not sure aborting will make a big difference
1196 // speedwise. We might generate similar cuts though, but hopefully the cut
1197 // management can deal with that.
1198 integer_multipliers = {entry};
1199 if (AddCutFromConstraints("MIR_1", integer_multipliers)) {
1200 continue;
1201 }
1202
1203 // Cleanup.
1204 for (const ColIndex col : non_zeros_.PositionsSetAtLeastOnce()) {
1205 dense_cut[col] = IntegerValue(0);
1206 }
1207 non_zeros_.SparseClearAll();
1208
1209 // Copy cut.
1210 const IntegerValue multiplier = entry.second;
1211 for (const std::pair<ColIndex, IntegerValue> term :
1212 integer_lp_[entry.first].terms) {
1213 const ColIndex col = term.first;
1214 const IntegerValue coeff = term.second;
1215 non_zeros_.Set(col);
1216 dense_cut[col] += coeff * multiplier;
1217 }
1218
1219 used_rows.assign(num_rows.value(), false);
1220 used_rows[entry.first] = true;
1221
1222 // We will aggregate at most kMaxAggregation more rows.
1223 //
1224 // TODO(user): optim + tune.
1225 const int kMaxAggregation = 5;
1226 for (int i = 0; i < kMaxAggregation; ++i) {
1227 // First pick a variable to eliminate. We currently pick a random one with
1228 // a weight that depend on how far it is from its closest bound.
1229 IntegerValue max_magnitude(0);
1230 weights.clear();
1231 std::vector<ColIndex> col_candidates;
1232 for (const ColIndex col : non_zeros_.PositionsSetAtLeastOnce()) {
1233 if (dense_cut[col] == 0) continue;
1234
1235 max_magnitude = std::max(max_magnitude, IntTypeAbs(dense_cut[col]));
1236 const int col_degree =
1237 lp_data_.GetSparseColumn(col).num_entries().value();
1238 if (col_degree <= 1) continue;
1240 continue;
1241 }
1242
1243 const IntegerVariable var = integer_variables_[col.value()];
1244 const double lp_value = expanded_lp_solution_[var];
1245 const double lb = ToDouble(integer_trail_->LevelZeroLowerBound(var));
1246 const double ub = ToDouble(integer_trail_->LevelZeroUpperBound(var));
1247 const double bound_distance = std::min(ub - lp_value, lp_value - lb);
1248 if (bound_distance > 1e-2) {
1249 weights.push_back(bound_distance);
1250 col_candidates.push_back(col);
1251 }
1252 }
1253 if (col_candidates.empty()) break;
1254
1255 const ColIndex var_to_eliminate =
1256 col_candidates[std::discrete_distribution<>(weights.begin(),
1257 weights.end())(*random_)];
1258
1259 // What rows can we add to eliminate var_to_eliminate?
1260 std::vector<RowIndex> possible_rows;
1261 weights.clear();
1262 for (const auto entry : lp_data_.GetSparseColumn(var_to_eliminate)) {
1263 const RowIndex row = entry.row();
1264 const auto status = simplex_.GetConstraintStatus(row);
1265 if (status == glop::ConstraintStatus::BASIC) continue;
1266 if (status == glop::ConstraintStatus::FREE) continue;
1267
1268 // We disallow all the rows that contain a variable that we already
1269 // eliminated (or are about to). This mean that we choose rows that
1270 // form a "triangular" matrix on the position we choose to eliminate.
1271 if (used_rows[row]) continue;
1272 used_rows[row] = true;
1273
1274 // TODO(user): Instead of using FIXED_VALUE consider also both direction
1275 // when we almost have an equality? that is if the LP constraints bounds
1276 // are close from each others (<1e-6 ?). Initial experiments shows it
1277 // doesn't change much, so I kept this version for now. Note that it
1278 // might just be better to use the side that constrain the current lp
1279 // optimal solution (that we get from the status).
1280 bool add_row = false;
1283 if (entry.coefficient() > 0.0) {
1284 if (dense_cut[var_to_eliminate] < 0) add_row = true;
1285 } else {
1286 if (dense_cut[var_to_eliminate] > 0) add_row = true;
1287 }
1288 }
1291 if (entry.coefficient() > 0.0) {
1292 if (dense_cut[var_to_eliminate] > 0) add_row = true;
1293 } else {
1294 if (dense_cut[var_to_eliminate] < 0) add_row = true;
1295 }
1296 }
1297 if (add_row) {
1298 possible_rows.push_back(row);
1299 weights.push_back(row_weights[row]);
1300 }
1301 }
1302 if (possible_rows.empty()) break;
1303
1304 const RowIndex row_to_combine =
1305 possible_rows[std::discrete_distribution<>(weights.begin(),
1306 weights.end())(*random_)];
1307 const IntegerValue to_combine_coeff =
1308 GetCoeff(var_to_eliminate, integer_lp_[row_to_combine].terms);
1309 CHECK_NE(to_combine_coeff, 0);
1310
1311 IntegerValue mult1 = -to_combine_coeff;
1312 IntegerValue mult2 = dense_cut[var_to_eliminate];
1313 CHECK_NE(mult2, 0);
1314 if (mult1 < 0) {
1315 mult1 = -mult1;
1316 mult2 = -mult2;
1317 }
1318
1319 const IntegerValue gcd = IntegerValue(
1320 MathUtil::GCD64(std::abs(mult1.value()), std::abs(mult2.value())));
1321 CHECK_NE(gcd, 0);
1322 mult1 /= gcd;
1323 mult2 /= gcd;
1324
1325 // Overflow detection.
1326 //
1327 // TODO(user): do that in the possible_rows selection? only problem is
1328 // that we do not have the integer coefficient there...
1329 for (std::pair<RowIndex, IntegerValue>& entry : integer_multipliers) {
1330 max_magnitude = std::max(max_magnitude, IntTypeAbs(entry.second));
1331 }
1332 if (CapAdd(CapProd(max_magnitude.value(), std::abs(mult1.value())),
1333 CapProd(infinity_norms_[row_to_combine].value(),
1334 std::abs(mult2.value()))) ==
1336 break;
1337 }
1338
1339 for (std::pair<RowIndex, IntegerValue>& entry : integer_multipliers) {
1340 entry.second *= mult1;
1341 }
1342 integer_multipliers.push_back({row_to_combine, mult2});
1343
1344 // TODO(user): Not supper efficient to recombine the rows.
1345 if (AddCutFromConstraints(absl::StrCat("MIR_", i + 2),
1346 integer_multipliers)) {
1347 break;
1348 }
1349
1350 // Minor optim: the computation below is only needed if we do one more
1351 // iteration.
1352 if (i + 1 == kMaxAggregation) break;
1353
1354 for (ColIndex col : non_zeros_.PositionsSetAtLeastOnce()) {
1355 dense_cut[col] *= mult1;
1356 }
1357 for (const std::pair<ColIndex, IntegerValue> term :
1358 integer_lp_[row_to_combine].terms) {
1359 const ColIndex col = term.first;
1360 const IntegerValue coeff = term.second;
1361 non_zeros_.Set(col);
1362 dense_cut[col] += coeff * mult2;
1363 }
1364 }
1365 }
1366}
1367
1368void LinearProgrammingConstraint::AddZeroHalfCuts() {
1369 if (time_limit_->LimitReached()) return;
1370
1371 tmp_lp_values_.clear();
1372 tmp_var_lbs_.clear();
1373 tmp_var_ubs_.clear();
1374 for (const IntegerVariable var : integer_variables_) {
1375 tmp_lp_values_.push_back(expanded_lp_solution_[var]);
1376 tmp_var_lbs_.push_back(integer_trail_->LevelZeroLowerBound(var));
1377 tmp_var_ubs_.push_back(integer_trail_->LevelZeroUpperBound(var));
1378 }
1379
1380 // TODO(user): See if it make sense to try to use implied bounds there.
1381 zero_half_cut_helper_.ProcessVariables(tmp_lp_values_, tmp_var_lbs_,
1382 tmp_var_ubs_);
1383 for (glop::RowIndex row(0); row < integer_lp_.size(); ++row) {
1384 // Even though we could use non-tight row, for now we prefer to use tight
1385 // ones.
1386 const auto status = simplex_.GetConstraintStatus(row);
1387 if (status == glop::ConstraintStatus::BASIC) continue;
1388 if (status == glop::ConstraintStatus::FREE) continue;
1389
1390 zero_half_cut_helper_.AddOneConstraint(
1391 row, integer_lp_[row].terms, integer_lp_[row].lb, integer_lp_[row].ub);
1392 }
1393 for (const std::vector<std::pair<RowIndex, IntegerValue>>& multipliers :
1394 zero_half_cut_helper_.InterestingCandidates(random_)) {
1395 if (time_limit_->LimitReached()) break;
1396
1397 // TODO(user): Make sure that if the resulting linear coefficients are not
1398 // too high, we do try a "divisor" of two and thus try a true zero-half cut
1399 // instead of just using our best MIR heuristic (which might still be better
1400 // though).
1401 AddCutFromConstraints("ZERO_HALF", multipliers);
1402 }
1403}
1404
1405void LinearProgrammingConstraint::UpdateSimplexIterationLimit(
1406 const int64_t min_iter, const int64_t max_iter) {
1407 if (parameters_.linearization_level() < 2) return;
1408 const int64_t num_degenerate_columns = CalculateDegeneracy();
1409 const int64_t num_cols = simplex_.GetProblemNumCols().value();
1410 if (num_cols <= 0) {
1411 return;
1412 }
1413 CHECK_GT(num_cols, 0);
1414 const int64_t decrease_factor = (10 * num_degenerate_columns) / num_cols;
1416 // We reached here probably because we predicted wrong. We use this as a
1417 // signal to increase the iterations or punish less for degeneracy compare
1418 // to the other part.
1419 if (is_degenerate_) {
1420 next_simplex_iter_ /= std::max(int64_t{1}, decrease_factor);
1421 } else {
1422 next_simplex_iter_ *= 2;
1423 }
1424 } else if (simplex_.GetProblemStatus() == glop::ProblemStatus::OPTIMAL) {
1425 if (is_degenerate_) {
1426 next_simplex_iter_ /= std::max(int64_t{1}, 2 * decrease_factor);
1427 } else {
1428 // This is the most common case. We use the size of the problem to
1429 // determine the limit and ignore the previous limit.
1430 next_simplex_iter_ = num_cols / 40;
1431 }
1432 }
1433 next_simplex_iter_ =
1434 std::max(min_iter, std::min(max_iter, next_simplex_iter_));
1435}
1436
1438 UpdateBoundsOfLpVariables();
1439
1440 // TODO(user): It seems the time we loose by not stopping early might be worth
1441 // it because we end up with a better explanation at optimality.
1443 if (/* DISABLES CODE */ (false) && objective_is_defined_) {
1444 // We put a limit on the dual objective since there is no point increasing
1445 // it past our current objective upper-bound (we will already fail as soon
1446 // as we pass it). Note that this limit is properly transformed using the
1447 // objective scaling factor and offset stored in lp_data_.
1448 //
1449 // Note that we use a bigger epsilon here to be sure that if we abort
1450 // because of this, we will report a conflict.
1451 parameters.set_objective_upper_limit(
1452 static_cast<double>(integer_trail_->UpperBound(objective_cp_).value() +
1453 100.0 * kCpEpsilon));
1454 }
1455
1456 // Put an iteration limit on the work we do in the simplex for this call. Note
1457 // that because we are "incremental", even if we don't solve it this time we
1458 // will make progress towards a solve in the lower node of the tree search.
1459 if (trail_->CurrentDecisionLevel() == 0) {
1460 // TODO(user): Dynamically change the iteration limit for root node as
1461 // well.
1462 parameters.set_max_number_of_iterations(2000);
1463 } else {
1464 parameters.set_max_number_of_iterations(next_simplex_iter_);
1465 }
1466 if (parameters_.use_exact_lp_reason()) {
1467 parameters.set_change_status_to_imprecise(false);
1468 parameters.set_primal_feasibility_tolerance(1e-7);
1469 parameters.set_dual_feasibility_tolerance(1e-7);
1470 }
1471
1472 simplex_.SetParameters(parameters);
1474 if (!SolveLp()) return true;
1475
1476 // Add new constraints to the LP and resolve?
1477 const int max_cuts_rounds =
1478 parameters_.cut_level() <= 0
1479 ? 0
1480 : (trail_->CurrentDecisionLevel() == 0
1481 ? parameters_.max_cut_rounds_at_level_zero()
1482 : 1);
1483 int cuts_round = 0;
1484 while (simplex_.GetProblemStatus() == glop::ProblemStatus::OPTIMAL &&
1485 cuts_round < max_cuts_rounds) {
1486 // We wait for the first batch of problem constraints to be added before we
1487 // begin to generate cuts. Note that we rely on num_solves_ since on some
1488 // problems there is no other constriants than the cuts.
1489 cuts_round++;
1490 if (num_solves_ > 1) {
1491 // This must be called first.
1492 implied_bounds_processor_.RecomputeCacheAndSeparateSomeImpliedBoundCuts(
1493 expanded_lp_solution_);
1494
1495 // The "generic" cuts are currently part of this class as they are using
1496 // data from the current LP.
1497 //
1498 // TODO(user): Refactor so that they are just normal cut generators?
1499 if (trail_->CurrentDecisionLevel() == 0) {
1500 if (parameters_.add_objective_cut()) AddObjectiveCut();
1501 if (parameters_.add_mir_cuts()) AddMirCuts();
1502 if (parameters_.add_cg_cuts()) AddCGCuts();
1503 if (parameters_.add_zero_half_cuts()) AddZeroHalfCuts();
1504 }
1505
1506 // Try to add cuts.
1507 if (!cut_generators_.empty() &&
1508 (trail_->CurrentDecisionLevel() == 0 ||
1509 !parameters_.only_add_cuts_at_level_zero())) {
1510 for (const CutGenerator& generator : cut_generators_) {
1511 if (!generator.generate_cuts(expanded_lp_solution_,
1512 &constraint_manager_)) {
1513 return false;
1514 }
1515 }
1516 }
1517
1518 implied_bounds_processor_.IbCutPool().TransferToManager(
1519 expanded_lp_solution_, &constraint_manager_);
1520 }
1521
1522 glop::BasisState state = simplex_.GetState();
1523 if (constraint_manager_.ChangeLp(expanded_lp_solution_, &state)) {
1524 simplex_.LoadStateForNextSolve(state);
1525 if (!CreateLpFromConstraintManager()) {
1526 return integer_trail_->ReportConflict({});
1527 }
1528 const double old_obj = simplex_.GetObjectiveValue();
1529 if (!SolveLp()) return true;
1531 VLOG(1) << "Relaxation improvement " << old_obj << " -> "
1532 << simplex_.GetObjectiveValue()
1533 << " diff: " << simplex_.GetObjectiveValue() - old_obj
1534 << " level: " << trail_->CurrentDecisionLevel();
1535 }
1536 } else {
1537 if (trail_->CurrentDecisionLevel() == 0) {
1538 lp_at_level_zero_is_final_ = true;
1539 }
1540 break;
1541 }
1542 }
1543
1544 // A dual-unbounded problem is infeasible. We use the dual ray reason.
1546 if (parameters_.use_exact_lp_reason()) {
1547 if (!FillExactDualRayReason()) return true;
1548 } else {
1549 FillReducedCostReasonIn(simplex_.GetDualRayRowCombination(),
1550 &integer_reason_);
1551 }
1552 return integer_trail_->ReportConflict(integer_reason_);
1553 }
1554
1555 // TODO(user): Update limits for DUAL_UNBOUNDED status as well.
1556 UpdateSimplexIterationLimit(/*min_iter=*/10, /*max_iter=*/1000);
1557
1558 // Optimality deductions if problem has an objective.
1559 if (objective_is_defined_ &&
1562 // TODO(user): Maybe do a bit less computation when we cannot propagate
1563 // anything.
1564 if (parameters_.use_exact_lp_reason()) {
1565 if (!ExactLpReasonning()) return false;
1566
1567 // Display when the inexact bound would have propagated more.
1568 if (VLOG_IS_ON(2)) {
1569 const double relaxed_optimal_objective = simplex_.GetObjectiveValue();
1570 const IntegerValue approximate_new_lb(static_cast<int64_t>(
1571 std::ceil(relaxed_optimal_objective - kCpEpsilon)));
1572 const IntegerValue propagated_lb =
1573 integer_trail_->LowerBound(objective_cp_);
1574 if (approximate_new_lb > propagated_lb) {
1575 VLOG(2) << "LP objective [ " << ToDouble(propagated_lb) << ", "
1576 << ToDouble(integer_trail_->UpperBound(objective_cp_))
1577 << " ] approx_lb += "
1578 << ToDouble(approximate_new_lb - propagated_lb) << " gap: "
1579 << integer_trail_->UpperBound(objective_cp_) - propagated_lb;
1580 }
1581 }
1582 } else {
1583 // Try to filter optimal objective value. Note that GetObjectiveValue()
1584 // already take care of the scaling so that it returns an objective in the
1585 // CP world.
1586 FillReducedCostReasonIn(simplex_.GetReducedCosts(), &integer_reason_);
1587 const double objective_cp_ub =
1588 ToDouble(integer_trail_->UpperBound(objective_cp_));
1589 const double relaxed_optimal_objective = simplex_.GetObjectiveValue();
1590 ReducedCostStrengtheningDeductions(objective_cp_ub -
1591 relaxed_optimal_objective);
1592 if (!deductions_.empty()) {
1593 deductions_reason_ = integer_reason_;
1594 deductions_reason_.push_back(
1595 integer_trail_->UpperBoundAsLiteral(objective_cp_));
1596 }
1597
1598 // Push new objective lb.
1599 const IntegerValue approximate_new_lb(static_cast<int64_t>(
1600 std::ceil(relaxed_optimal_objective - kCpEpsilon)));
1601 if (approximate_new_lb > integer_trail_->LowerBound(objective_cp_)) {
1602 const IntegerLiteral deduction =
1603 IntegerLiteral::GreaterOrEqual(objective_cp_, approximate_new_lb);
1604 if (!integer_trail_->Enqueue(deduction, {}, integer_reason_)) {
1605 return false;
1606 }
1607 }
1608
1609 // Push reduced cost strengthening bounds.
1610 if (!deductions_.empty()) {
1611 const int trail_index_with_same_reason = integer_trail_->Index();
1612 for (const IntegerLiteral deduction : deductions_) {
1613 if (!integer_trail_->Enqueue(deduction, {}, deductions_reason_,
1614 trail_index_with_same_reason)) {
1615 return false;
1616 }
1617 }
1618 }
1619 }
1620 }
1621
1622 // Copy more info about the current solution.
1624 CHECK(lp_solution_is_set_);
1625
1626 lp_objective_ = simplex_.GetObjectiveValue();
1627 lp_solution_is_integer_ = true;
1628 const int num_vars = integer_variables_.size();
1629 for (int i = 0; i < num_vars; i++) {
1630 lp_reduced_cost_[i] = scaler_.UnscaleReducedCost(
1631 glop::ColIndex(i), simplex_.GetReducedCost(glop::ColIndex(i)));
1632 if (std::abs(lp_solution_[i] - std::round(lp_solution_[i])) >
1633 kCpEpsilon) {
1634 lp_solution_is_integer_ = false;
1635 }
1636 }
1637
1638 if (compute_reduced_cost_averages_) {
1639 UpdateAverageReducedCosts();
1640 }
1641 }
1642
1643 if (parameters_.use_branching_in_lp() && objective_is_defined_ &&
1644 trail_->CurrentDecisionLevel() == 0 && !is_degenerate_ &&
1645 lp_solution_is_set_ && !lp_solution_is_integer_ &&
1646 parameters_.linearization_level() >= 2 &&
1647 compute_reduced_cost_averages_ &&
1649 count_since_last_branching_++;
1650 if (count_since_last_branching_ < branching_frequency_) {
1651 return true;
1652 }
1653 count_since_last_branching_ = 0;
1654 bool branching_successful = false;
1655
1656 // Strong branching on top max_num_branches variable.
1657 const int max_num_branches = 3;
1658 const int num_vars = integer_variables_.size();
1659 std::vector<std::pair<double, IntegerVariable>> branching_vars;
1660 for (int i = 0; i < num_vars; ++i) {
1661 const IntegerVariable var = integer_variables_[i];
1662 const IntegerVariable positive_var = PositiveVariable(var);
1663
1664 // Skip non fractional variables.
1665 const double current_value = GetSolutionValue(positive_var);
1666 if (std::abs(current_value - std::round(current_value)) <= kCpEpsilon) {
1667 continue;
1668 }
1669
1670 // Skip ignored variables.
1671 if (integer_trail_->IsCurrentlyIgnored(var)) continue;
1672
1673 // We can use any metric to select a variable to branch on. Reduced cost
1674 // average is one of the most promissing metric. It captures the history
1675 // of the objective bound improvement in LP due to changes in the given
1676 // variable bounds.
1677 //
1678 // NOTE: We also experimented using PseudoCosts and most recent reduced
1679 // cost as metrics but it doesn't give better results on benchmarks.
1680 const double cost_i = rc_scores_[i];
1681 std::pair<double, IntegerVariable> branching_var =
1682 std::make_pair(-cost_i, positive_var);
1683 auto iterator = std::lower_bound(branching_vars.begin(),
1684 branching_vars.end(), branching_var);
1685
1686 branching_vars.insert(iterator, branching_var);
1687 if (branching_vars.size() > max_num_branches) {
1688 branching_vars.resize(max_num_branches);
1689 }
1690 }
1691
1692 for (const std::pair<double, IntegerVariable>& branching_var :
1693 branching_vars) {
1694 const IntegerVariable positive_var = branching_var.second;
1695 VLOG(2) << "Branching on: " << positive_var;
1696 if (BranchOnVar(positive_var)) {
1697 VLOG(2) << "Branching successful.";
1698 branching_successful = true;
1699 } else {
1700 break;
1701 }
1702 }
1703 if (!branching_successful) {
1704 branching_frequency_ *= 2;
1705 }
1706 }
1707 return true;
1708}
1709
1710// Returns kMinIntegerValue in case of overflow.
1711//
1712// TODO(user): Because of PreventOverflow(), this should actually never happen.
1713IntegerValue LinearProgrammingConstraint::GetImpliedLowerBound(
1714 const LinearConstraint& terms) const {
1715 IntegerValue lower_bound(0);
1716 const int size = terms.vars.size();
1717 for (int i = 0; i < size; ++i) {
1718 const IntegerVariable var = terms.vars[i];
1719 const IntegerValue coeff = terms.coeffs[i];
1720 CHECK_NE(coeff, 0);
1721 const IntegerValue bound = coeff > 0 ? integer_trail_->LowerBound(var)
1722 : integer_trail_->UpperBound(var);
1724 }
1725 return lower_bound;
1726}
1727
1728bool LinearProgrammingConstraint::PossibleOverflow(
1729 const LinearConstraint& constraint) {
1730 IntegerValue lower_bound(0);
1731 const int size = constraint.vars.size();
1732 for (int i = 0; i < size; ++i) {
1733 const IntegerVariable var = constraint.vars[i];
1734 const IntegerValue coeff = constraint.coeffs[i];
1735 CHECK_NE(coeff, 0);
1736 const IntegerValue bound = coeff > 0
1737 ? integer_trail_->LevelZeroLowerBound(var)
1738 : integer_trail_->LevelZeroUpperBound(var);
1740 return true;
1741 }
1742 }
1743 const int64_t slack = CapAdd(lower_bound.value(), -constraint.ub.value());
1744 if (slack == std::numeric_limits<int64_t>::min() ||
1746 return true;
1747 }
1748 return false;
1749}
1750
1751namespace {
1752
1753absl::int128 FloorRatio128(absl::int128 x, IntegerValue positive_div) {
1754 absl::int128 div128(positive_div.value());
1755 absl::int128 result = x / div128;
1756 if (result * div128 > x) return result - 1;
1757 return result;
1758}
1759
1760} // namespace
1761
1762void LinearProgrammingConstraint::PreventOverflow(LinearConstraint* constraint,
1763 int max_pow) {
1764 // First, make all coefficient positive.
1765 MakeAllCoefficientsPositive(constraint);
1766
1767 // Compute the min/max possible partial sum. Note that we need to use the
1768 // level zero bounds here since we might use this cut after backtrack.
1769 double sum_min = std::min(0.0, ToDouble(-constraint->ub));
1770 double sum_max = std::max(0.0, ToDouble(-constraint->ub));
1771 const int size = constraint->vars.size();
1772 for (int i = 0; i < size; ++i) {
1773 const IntegerVariable var = constraint->vars[i];
1774 const double coeff = ToDouble(constraint->coeffs[i]);
1775 sum_min +=
1776 coeff *
1777 std::min(0.0, ToDouble(integer_trail_->LevelZeroLowerBound(var)));
1778 sum_max +=
1779 coeff *
1780 std::max(0.0, ToDouble(integer_trail_->LevelZeroUpperBound(var)));
1781 }
1782 const double max_value = std::max({sum_max, -sum_min, sum_max - sum_min});
1783
1784 const IntegerValue divisor(std::ceil(std::ldexp(max_value, -max_pow)));
1785 if (divisor <= 1) return;
1786
1787 // To be correct, we need to shift all variable so that they are positive.
1788 //
1789 // Important: One might be tempted to think that using the current variable
1790 // bounds is okay here since we only use this to derive cut/constraint that
1791 // only needs to be locally valid. However, in some corner cases (like when
1792 // one term become zero), we might loose the fact that we used one of the
1793 // variable bound to derive the new constraint, so we will miss it in the
1794 // explanation !!
1795 //
1796 // TODO(user): This code is tricky and similar to the one to generate cuts.
1797 // Test and may reduce the duplication? note however that here we use int128
1798 // to deal with potential overflow.
1799 int new_size = 0;
1800 absl::int128 adjust = 0;
1801 for (int i = 0; i < size; ++i) {
1802 const IntegerValue old_coeff = constraint->coeffs[i];
1803 const IntegerValue new_coeff = FloorRatio(old_coeff, divisor);
1804
1805 // Compute the rhs adjustement.
1806 const absl::int128 remainder =
1807 absl::int128(old_coeff.value()) -
1808 absl::int128(new_coeff.value()) * absl::int128(divisor.value());
1809 adjust +=
1810 remainder *
1811 absl::int128(
1812 integer_trail_->LevelZeroLowerBound(constraint->vars[i]).value());
1813
1814 if (new_coeff == 0) continue;
1815 constraint->vars[new_size] = constraint->vars[i];
1816 constraint->coeffs[new_size] = new_coeff;
1817 ++new_size;
1818 }
1819 constraint->vars.resize(new_size);
1820 constraint->coeffs.resize(new_size);
1821
1822 constraint->ub = IntegerValue(static_cast<int64_t>(
1823 FloorRatio128(absl::int128(constraint->ub.value()) - adjust, divisor)));
1824}
1825
1826// TODO(user): combine this with RelaxLinearReason() to avoid the extra
1827// magnitude vector and the weird precondition of RelaxLinearReason().
1828void LinearProgrammingConstraint::SetImpliedLowerBoundReason(
1829 const LinearConstraint& terms, IntegerValue slack) {
1830 integer_reason_.clear();
1831 std::vector<IntegerValue> magnitudes;
1832 const int size = terms.vars.size();
1833 for (int i = 0; i < size; ++i) {
1834 const IntegerVariable var = terms.vars[i];
1835 const IntegerValue coeff = terms.coeffs[i];
1836 CHECK_NE(coeff, 0);
1837 if (coeff > 0) {
1838 magnitudes.push_back(coeff);
1839 integer_reason_.push_back(integer_trail_->LowerBoundAsLiteral(var));
1840 } else {
1841 magnitudes.push_back(-coeff);
1842 integer_reason_.push_back(integer_trail_->UpperBoundAsLiteral(var));
1843 }
1844 }
1845 CHECK_GE(slack, 0);
1846 if (slack > 0) {
1847 integer_trail_->RelaxLinearReason(slack, magnitudes, &integer_reason_);
1848 }
1849 integer_trail_->RemoveLevelZeroBounds(&integer_reason_);
1850}
1851
1852std::vector<std::pair<RowIndex, IntegerValue>>
1853LinearProgrammingConstraint::ScaleLpMultiplier(
1854 bool take_objective_into_account,
1855 const std::vector<std::pair<RowIndex, double>>& lp_multipliers,
1856 Fractional* scaling, int max_pow) const {
1857 double max_sum = 0.0;
1858 tmp_cp_multipliers_.clear();
1859 for (const std::pair<RowIndex, double>& p : lp_multipliers) {
1860 const RowIndex row = p.first;
1861 const Fractional lp_multi = p.second;
1862
1863 // We ignore small values since these are likely errors and will not
1864 // contribute much to the new lp constraint anyway.
1865 if (std::abs(lp_multi) < kZeroTolerance) continue;
1866
1867 // Remove trivial bad cases.
1868 //
1869 // TODO(user): It might be better (when possible) to use the OPTIMAL row
1870 // status since in most situation we do want the constraint we add to be
1871 // tight under the current LP solution. Only for infeasible problem we might
1872 // not have access to the status.
1873 if (lp_multi > 0.0 && integer_lp_[row].ub >= kMaxIntegerValue) {
1874 continue;
1875 }
1876 if (lp_multi < 0.0 && integer_lp_[row].lb <= kMinIntegerValue) {
1877 continue;
1878 }
1879
1880 const Fractional cp_multi = scaler_.UnscaleDualValue(row, lp_multi);
1881 tmp_cp_multipliers_.push_back({row, cp_multi});
1882 max_sum += ToDouble(infinity_norms_[row]) * std::abs(cp_multi);
1883 }
1884
1885 // This behave exactly like if we had another "objective" constraint with
1886 // an lp_multi of 1.0 and a cp_multi of 1.0.
1887 if (take_objective_into_account) {
1888 max_sum += ToDouble(objective_infinity_norm_);
1889 }
1890
1891 *scaling = 1.0;
1892 std::vector<std::pair<RowIndex, IntegerValue>> integer_multipliers;
1893 if (max_sum == 0.0) {
1894 // Empty linear combinaison.
1895 return integer_multipliers;
1896 }
1897
1898 // We want max_sum * scaling to be <= 2 ^ max_pow and fit on an int64_t.
1899 // We use a power of 2 as this seems to work better.
1900 const double threshold = std::ldexp(1, max_pow) / max_sum;
1901 if (threshold < 1.0) {
1902 // TODO(user): we currently do not support scaling down, so we just abort
1903 // in this case.
1904 return integer_multipliers;
1905 }
1906 while (2 * *scaling <= threshold) *scaling *= 2;
1907
1908 // Scale the multipliers by *scaling.
1909 //
1910 // TODO(user): Maybe use int128 to avoid overflow?
1911 for (const auto entry : tmp_cp_multipliers_) {
1912 const IntegerValue coeff(std::round(entry.second * (*scaling)));
1913 if (coeff != 0) integer_multipliers.push_back({entry.first, coeff});
1914 }
1915 return integer_multipliers;
1916}
1917
1918bool LinearProgrammingConstraint::ComputeNewLinearConstraint(
1919 const std::vector<std::pair<RowIndex, IntegerValue>>& integer_multipliers,
1920 ScatteredIntegerVector* scattered_vector, IntegerValue* upper_bound) const {
1921 // Initialize the new constraint.
1922 *upper_bound = 0;
1923 scattered_vector->ClearAndResize(integer_variables_.size());
1924
1925 // Compute the new constraint by taking the linear combination given by
1926 // integer_multipliers of the integer constraints in integer_lp_.
1927 for (const std::pair<RowIndex, IntegerValue> term : integer_multipliers) {
1928 const RowIndex row = term.first;
1929 const IntegerValue multiplier = term.second;
1930 CHECK_LT(row, integer_lp_.size());
1931
1932 // Update the constraint.
1933 if (!scattered_vector->AddLinearExpressionMultiple(
1934 multiplier, integer_lp_[row].terms)) {
1935 return false;
1936 }
1937
1938 // Update the upper bound.
1939 const IntegerValue bound =
1940 multiplier > 0 ? integer_lp_[row].ub : integer_lp_[row].lb;
1941 if (!AddProductTo(multiplier, bound, upper_bound)) return false;
1942 }
1943
1944 return true;
1945}
1946
1947// TODO(user): no need to update the multipliers.
1948void LinearProgrammingConstraint::AdjustNewLinearConstraint(
1949 std::vector<std::pair<glop::RowIndex, IntegerValue>>* integer_multipliers,
1950 ScatteredIntegerVector* scattered_vector, IntegerValue* upper_bound) const {
1951 const IntegerValue kMaxWantedCoeff(1e18);
1952 for (std::pair<RowIndex, IntegerValue>& term : *integer_multipliers) {
1953 const RowIndex row = term.first;
1954 const IntegerValue multiplier = term.second;
1955 if (multiplier == 0) continue;
1956
1957 // We will only allow change of the form "multiplier += to_add" with to_add
1958 // in [-negative_limit, positive_limit].
1959 IntegerValue negative_limit = kMaxWantedCoeff;
1960 IntegerValue positive_limit = kMaxWantedCoeff;
1961
1962 // Make sure we never change the sign of the multiplier, except if the
1963 // row is an equality in which case we don't care.
1964 if (integer_lp_[row].ub != integer_lp_[row].lb) {
1965 if (multiplier > 0) {
1966 negative_limit = std::min(negative_limit, multiplier);
1967 } else {
1968 positive_limit = std::min(positive_limit, -multiplier);
1969 }
1970 }
1971
1972 // Make sure upper_bound + to_add * row_bound never overflow.
1973 const IntegerValue row_bound =
1974 multiplier > 0 ? integer_lp_[row].ub : integer_lp_[row].lb;
1975 if (row_bound != 0) {
1976 const IntegerValue limit1 = FloorRatio(
1977 std::max(IntegerValue(0), kMaxWantedCoeff - IntTypeAbs(*upper_bound)),
1978 IntTypeAbs(row_bound));
1979 const IntegerValue limit2 =
1980 FloorRatio(kMaxWantedCoeff, IntTypeAbs(row_bound));
1981 if ((*upper_bound > 0) == (row_bound > 0)) { // Same sign.
1982 positive_limit = std::min(positive_limit, limit1);
1983 negative_limit = std::min(negative_limit, limit2);
1984 } else {
1985 negative_limit = std::min(negative_limit, limit1);
1986 positive_limit = std::min(positive_limit, limit2);
1987 }
1988 }
1989
1990 // If we add the row to the scattered_vector, diff will indicate by how much
1991 // |upper_bound - ImpliedLB(scattered_vector)| will change. That correspond
1992 // to increasing the multiplier by 1.
1993 //
1994 // At this stage, we are not sure computing sum coeff * bound will not
1995 // overflow, so we use floating point numbers. It is fine to do so since
1996 // this is not directly involved in the actual exact constraint generation:
1997 // these variables are just used in an heuristic.
1998 double positive_diff = ToDouble(row_bound);
1999 double negative_diff = ToDouble(row_bound);
2000
2001 // TODO(user): we could relax a bit some of the condition and allow a sign
2002 // change. It is just trickier to compute the diff when we allow such
2003 // changes.
2004 for (const auto entry : integer_lp_[row].terms) {
2005 const ColIndex col = entry.first;
2006 const IntegerValue coeff = entry.second;
2007 const IntegerValue abs_coef = IntTypeAbs(coeff);
2008 CHECK_NE(coeff, 0);
2009
2010 const IntegerVariable var = integer_variables_[col.value()];
2011 const IntegerValue lb = integer_trail_->LowerBound(var);
2012 const IntegerValue ub = integer_trail_->UpperBound(var);
2013
2014 // Moving a variable away from zero seems to improve the bound even
2015 // if it reduces the number of non-zero. Note that this is because of
2016 // this that positive_diff and negative_diff are not the same.
2017 const IntegerValue current = (*scattered_vector)[col];
2018 if (current == 0) {
2019 const IntegerValue overflow_limit(
2020 FloorRatio(kMaxWantedCoeff, abs_coef));
2021 positive_limit = std::min(positive_limit, overflow_limit);
2022 negative_limit = std::min(negative_limit, overflow_limit);
2023 if (coeff > 0) {
2024 positive_diff -= ToDouble(coeff) * ToDouble(lb);
2025 negative_diff -= ToDouble(coeff) * ToDouble(ub);
2026 } else {
2027 positive_diff -= ToDouble(coeff) * ToDouble(ub);
2028 negative_diff -= ToDouble(coeff) * ToDouble(lb);
2029 }
2030 continue;
2031 }
2032
2033 // We don't want to change the sign of current (except if the variable is
2034 // fixed) or to have an overflow.
2035 //
2036 // Corner case:
2037 // - IntTypeAbs(current) can be larger than kMaxWantedCoeff!
2038 // - The code assumes that 2 * kMaxWantedCoeff do not overflow.
2039 const IntegerValue current_magnitude = IntTypeAbs(current);
2040 const IntegerValue other_direction_limit = FloorRatio(
2041 lb == ub
2042 ? kMaxWantedCoeff + std::min(current_magnitude,
2043 kMaxIntegerValue - kMaxWantedCoeff)
2044 : current_magnitude,
2045 abs_coef);
2046 const IntegerValue same_direction_limit(FloorRatio(
2047 std::max(IntegerValue(0), kMaxWantedCoeff - current_magnitude),
2048 abs_coef));
2049 if ((current > 0) == (coeff > 0)) { // Same sign.
2050 negative_limit = std::min(negative_limit, other_direction_limit);
2051 positive_limit = std::min(positive_limit, same_direction_limit);
2052 } else {
2053 negative_limit = std::min(negative_limit, same_direction_limit);
2054 positive_limit = std::min(positive_limit, other_direction_limit);
2055 }
2056
2057 // This is how diff change.
2058 const IntegerValue implied = current > 0 ? lb : ub;
2059 if (implied != 0) {
2060 positive_diff -= ToDouble(coeff) * ToDouble(implied);
2061 negative_diff -= ToDouble(coeff) * ToDouble(implied);
2062 }
2063 }
2064
2065 // Only add a multiple of this row if it tighten the final constraint.
2066 // The positive_diff/negative_diff are supposed to be integer modulo the
2067 // double precision, so we only add a multiple if they seems far away from
2068 // zero.
2069 IntegerValue to_add(0);
2070 if (positive_diff <= -1.0 && positive_limit > 0) {
2071 to_add = positive_limit;
2072 }
2073 if (negative_diff >= 1.0 && negative_limit > 0) {
2074 // Pick this if it is better than the positive sign.
2075 if (to_add == 0 ||
2076 std::abs(ToDouble(negative_limit) * negative_diff) >
2077 std::abs(ToDouble(positive_limit) * positive_diff)) {
2078 to_add = -negative_limit;
2079 }
2080 }
2081 if (to_add != 0) {
2082 term.second += to_add;
2083 *upper_bound += to_add * row_bound;
2084
2085 // TODO(user): we could avoid checking overflow here, but this is likely
2086 // not in the hot loop.
2087 CHECK(scattered_vector->AddLinearExpressionMultiple(
2088 to_add, integer_lp_[row].terms));
2089 }
2090 }
2091}
2092
2093// The "exact" computation go as follow:
2094//
2095// Given any INTEGER linear combination of the LP constraints, we can create a
2096// new integer constraint that is valid (its computation must not overflow
2097// though). Lets call this "linear_combination <= ub". We can then always add to
2098// it the inequality "objective_terms <= objective_var", so we get:
2099// ImpliedLB(objective_terms + linear_combination) - ub <= objective_var.
2100// where ImpliedLB() is computed from the variable current bounds.
2101//
2102// Now, if we use for the linear combination and approximation of the optimal
2103// negated dual LP values (by scaling them and rounding them to integer), we
2104// will get an EXACT objective lower bound that is more or less the same as the
2105// inexact bound given by the LP relaxation. This allows to derive exact reasons
2106// for any propagation done by this constraint.
2107bool LinearProgrammingConstraint::ExactLpReasonning() {
2108 // Clear old reason and deductions.
2109 integer_reason_.clear();
2110 deductions_.clear();
2111 deductions_reason_.clear();
2112
2113 // The row multipliers will be the negation of the LP duals.
2114 //
2115 // TODO(user): Provide and use a sparse API in Glop to get the duals.
2116 const RowIndex num_rows = simplex_.GetProblemNumRows();
2117 tmp_lp_multipliers_.clear();
2118 for (RowIndex row(0); row < num_rows; ++row) {
2119 const double value = -simplex_.GetDualValue(row);
2120 if (std::abs(value) < kZeroTolerance) continue;
2121 tmp_lp_multipliers_.push_back({row, value});
2122 }
2123
2124 Fractional scaling;
2125 tmp_integer_multipliers_ = ScaleLpMultiplier(
2126 /*take_objective_into_account=*/true, tmp_lp_multipliers_, &scaling);
2127
2128 IntegerValue rc_ub;
2129 if (!ComputeNewLinearConstraint(tmp_integer_multipliers_,
2130 &tmp_scattered_vector_, &rc_ub)) {
2131 VLOG(1) << "Issue while computing the exact LP reason. Aborting.";
2132 return true;
2133 }
2134
2135 // The "objective constraint" behave like if the unscaled cp multiplier was
2136 // 1.0, so we will multiply it by this number and add it to reduced_costs.
2137 const IntegerValue obj_scale(std::round(scaling));
2138 if (obj_scale == 0) {
2139 VLOG(1) << "Overflow during exact LP reasoning. scaling=" << scaling;
2140 return true;
2141 }
2142 CHECK(tmp_scattered_vector_.AddLinearExpressionMultiple(obj_scale,
2143 integer_objective_));
2144 CHECK(AddProductTo(-obj_scale, integer_objective_offset_, &rc_ub));
2145 AdjustNewLinearConstraint(&tmp_integer_multipliers_, &tmp_scattered_vector_,
2146 &rc_ub);
2147
2148 // Create the IntegerSumLE that will allow to propagate the objective and more
2149 // generally do the reduced cost fixing.
2150 tmp_scattered_vector_.ConvertToLinearConstraint(integer_variables_, rc_ub,
2151 &tmp_constraint_);
2152 tmp_constraint_.vars.push_back(objective_cp_);
2153 tmp_constraint_.coeffs.push_back(-obj_scale);
2154 DivideByGCD(&tmp_constraint_);
2155 PreventOverflow(&tmp_constraint_);
2156 DCHECK(!PossibleOverflow(tmp_constraint_));
2157 DCHECK(constraint_manager_.DebugCheckConstraint(tmp_constraint_));
2158
2159 // Corner case where prevent overflow removed all terms.
2160 if (tmp_constraint_.vars.empty()) {
2161 trail_->MutableConflict()->clear();
2162 return tmp_constraint_.ub >= 0;
2163 }
2164
2165 IntegerSumLE* cp_constraint =
2166 new IntegerSumLE({}, tmp_constraint_.vars, tmp_constraint_.coeffs,
2167 tmp_constraint_.ub, model_);
2168 if (trail_->CurrentDecisionLevel() == 0) {
2169 // Since we will never ask the reason for a constraint at level 0, we just
2170 // keep the last one.
2171 optimal_constraints_.clear();
2172 }
2173 optimal_constraints_.emplace_back(cp_constraint);
2174 rev_optimal_constraints_size_ = optimal_constraints_.size();
2175 if (!cp_constraint->PropagateAtLevelZero()) return false;
2176 return cp_constraint->Propagate();
2177}
2178
2179bool LinearProgrammingConstraint::FillExactDualRayReason() {
2180 Fractional scaling;
2181 const glop::DenseColumn ray = simplex_.GetDualRay();
2182 tmp_lp_multipliers_.clear();
2183 for (RowIndex row(0); row < ray.size(); ++row) {
2184 const double value = ray[row];
2185 if (std::abs(value) < kZeroTolerance) continue;
2186 tmp_lp_multipliers_.push_back({row, value});
2187 }
2188 tmp_integer_multipliers_ = ScaleLpMultiplier(
2189 /*take_objective_into_account=*/false, tmp_lp_multipliers_, &scaling);
2190
2191 IntegerValue new_constraint_ub;
2192 if (!ComputeNewLinearConstraint(tmp_integer_multipliers_,
2193 &tmp_scattered_vector_, &new_constraint_ub)) {
2194 VLOG(1) << "Isse while computing the exact dual ray reason. Aborting.";
2195 return false;
2196 }
2197
2198 AdjustNewLinearConstraint(&tmp_integer_multipliers_, &tmp_scattered_vector_,
2199 &new_constraint_ub);
2200
2201 tmp_scattered_vector_.ConvertToLinearConstraint(
2202 integer_variables_, new_constraint_ub, &tmp_constraint_);
2203 DivideByGCD(&tmp_constraint_);
2204 PreventOverflow(&tmp_constraint_);
2205 DCHECK(!PossibleOverflow(tmp_constraint_));
2206 DCHECK(constraint_manager_.DebugCheckConstraint(tmp_constraint_));
2207
2208 const IntegerValue implied_lb = GetImpliedLowerBound(tmp_constraint_);
2209 if (implied_lb <= tmp_constraint_.ub) {
2210 VLOG(1) << "LP exact dual ray not infeasible,"
2211 << " implied_lb: " << implied_lb.value() / scaling
2212 << " ub: " << tmp_constraint_.ub.value() / scaling;
2213 return false;
2214 }
2215 const IntegerValue slack = (implied_lb - tmp_constraint_.ub) - 1;
2216 SetImpliedLowerBoundReason(tmp_constraint_, slack);
2217 return true;
2218}
2219
2220int64_t LinearProgrammingConstraint::CalculateDegeneracy() {
2221 const glop::ColIndex num_vars = simplex_.GetProblemNumCols();
2222 int num_non_basic_with_zero_rc = 0;
2223 for (glop::ColIndex i(0); i < num_vars; ++i) {
2224 const double rc = simplex_.GetReducedCost(i);
2225 if (rc != 0.0) continue;
2227 continue;
2228 }
2229 num_non_basic_with_zero_rc++;
2230 }
2231 const int64_t num_cols = simplex_.GetProblemNumCols().value();
2232 is_degenerate_ = num_non_basic_with_zero_rc >= 0.3 * num_cols;
2233 return num_non_basic_with_zero_rc;
2234}
2235
2236void LinearProgrammingConstraint::ReducedCostStrengtheningDeductions(
2237 double cp_objective_delta) {
2238 deductions_.clear();
2239
2240 // TRICKY: while simplex_.GetObjectiveValue() use the objective scaling factor
2241 // stored in the lp_data_, all the other functions like GetReducedCost() or
2242 // GetVariableValue() do not.
2243 const double lp_objective_delta =
2244 cp_objective_delta / lp_data_.objective_scaling_factor();
2245 const int num_vars = integer_variables_.size();
2246 for (int i = 0; i < num_vars; i++) {
2247 const IntegerVariable cp_var = integer_variables_[i];
2248 const glop::ColIndex lp_var = glop::ColIndex(i);
2249 const double rc = simplex_.GetReducedCost(lp_var);
2250 const double value = simplex_.GetVariableValue(lp_var);
2251
2252 if (rc == 0.0) continue;
2253 const double lp_other_bound = value + lp_objective_delta / rc;
2254 const double cp_other_bound =
2255 scaler_.UnscaleVariableValue(lp_var, lp_other_bound);
2256
2257 if (rc > kLpEpsilon) {
2258 const double ub = ToDouble(integer_trail_->UpperBound(cp_var));
2259 const double new_ub = std::floor(cp_other_bound + kCpEpsilon);
2260 if (new_ub < ub) {
2261 // TODO(user): Because rc > kLpEpsilon, the lower_bound of cp_var
2262 // will be part of the reason returned by FillReducedCostsReason(), but
2263 // we actually do not need it here. Same below.
2264 const IntegerValue new_ub_int(static_cast<IntegerValue>(new_ub));
2265 deductions_.push_back(IntegerLiteral::LowerOrEqual(cp_var, new_ub_int));
2266 }
2267 } else if (rc < -kLpEpsilon) {
2268 const double lb = ToDouble(integer_trail_->LowerBound(cp_var));
2269 const double new_lb = std::ceil(cp_other_bound - kCpEpsilon);
2270 if (new_lb > lb) {
2271 const IntegerValue new_lb_int(static_cast<IntegerValue>(new_lb));
2272 deductions_.push_back(
2273 IntegerLiteral::GreaterOrEqual(cp_var, new_lb_int));
2274 }
2275 }
2276 }
2277}
2278
2279namespace {
2280
2281// Add a cut of the form Sum_{outgoing arcs from S} lp >= rhs_lower_bound.
2282//
2283// Note that we used to also add the same cut for the incoming arcs, but because
2284// of flow conservation on these problems, the outgoing flow is always the same
2285// as the incoming flow, so adding this extra cut doesn't seem relevant.
2286void AddOutgoingCut(
2287 int num_nodes, int subset_size, const std::vector<bool>& in_subset,
2288 const std::vector<int>& tails, const std::vector<int>& heads,
2289 const std::vector<Literal>& literals,
2290 const std::vector<double>& literal_lp_values, int64_t rhs_lower_bound,
2292 LinearConstraintManager* manager, Model* model) {
2293 // A node is said to be optional if it can be excluded from the subcircuit,
2294 // in which case there is a self-loop on that node.
2295 // If there are optional nodes, use extended formula:
2296 // sum(cut) >= 1 - optional_loop_in - optional_loop_out
2297 // where optional_loop_in's node is in subset, optional_loop_out's is out.
2298 // TODO(user): Favor optional loops fixed to zero at root.
2299 int num_optional_nodes_in = 0;
2300 int num_optional_nodes_out = 0;
2301 int optional_loop_in = -1;
2302 int optional_loop_out = -1;
2303 for (int i = 0; i < tails.size(); ++i) {
2304 if (tails[i] != heads[i]) continue;
2305 if (in_subset[tails[i]]) {
2306 num_optional_nodes_in++;
2307 if (optional_loop_in == -1 ||
2308 literal_lp_values[i] < literal_lp_values[optional_loop_in]) {
2309 optional_loop_in = i;
2310 }
2311 } else {
2312 num_optional_nodes_out++;
2313 if (optional_loop_out == -1 ||
2314 literal_lp_values[i] < literal_lp_values[optional_loop_out]) {
2315 optional_loop_out = i;
2316 }
2317 }
2318 }
2319
2320 // TODO(user): The lower bound for CVRP is computed assuming all nodes must be
2321 // served, if it is > 1 we lower it to one in the presence of optional nodes.
2322 if (num_optional_nodes_in + num_optional_nodes_out > 0) {
2323 CHECK_GE(rhs_lower_bound, 1);
2324 rhs_lower_bound = 1;
2325 }
2326
2327 LinearConstraintBuilder outgoing(model, IntegerValue(rhs_lower_bound),
2329 double sum_outgoing = 0.0;
2330
2331 // Add outgoing arcs, compute outgoing flow.
2332 for (int i = 0; i < tails.size(); ++i) {
2333 if (in_subset[tails[i]] && !in_subset[heads[i]]) {
2334 sum_outgoing += literal_lp_values[i];
2335 CHECK(outgoing.AddLiteralTerm(literals[i], IntegerValue(1)));
2336 }
2337 }
2338
2339 // Support optional nodes if any.
2340 if (num_optional_nodes_in + num_optional_nodes_out > 0) {
2341 // When all optionals of one side are excluded in lp solution, no cut.
2342 if (num_optional_nodes_in == subset_size &&
2343 (optional_loop_in == -1 ||
2344 literal_lp_values[optional_loop_in] > 1.0 - 1e-6)) {
2345 return;
2346 }
2347 if (num_optional_nodes_out == num_nodes - subset_size &&
2348 (optional_loop_out == -1 ||
2349 literal_lp_values[optional_loop_out] > 1.0 - 1e-6)) {
2350 return;
2351 }
2352
2353 // There is no mandatory node in subset, add optional_loop_in.
2354 if (num_optional_nodes_in == subset_size) {
2355 CHECK(
2356 outgoing.AddLiteralTerm(literals[optional_loop_in], IntegerValue(1)));
2357 sum_outgoing += literal_lp_values[optional_loop_in];
2358 }
2359
2360 // There is no mandatory node out of subset, add optional_loop_out.
2361 if (num_optional_nodes_out == num_nodes - subset_size) {
2362 CHECK(outgoing.AddLiteralTerm(literals[optional_loop_out],
2363 IntegerValue(1)));
2364 sum_outgoing += literal_lp_values[optional_loop_out];
2365 }
2366 }
2367
2368 if (sum_outgoing < rhs_lower_bound - 1e-6) {
2369 manager->AddCut(outgoing.Build(), "Circuit", lp_values);
2370 }
2371}
2372
2373} // namespace
2374
2375// We roughly follow the algorithm described in section 6 of "The Traveling
2376// Salesman Problem, A computational Study", David L. Applegate, Robert E.
2377// Bixby, Vasek Chvatal, William J. Cook.
2378//
2379// Note that this is mainly a "symmetric" case algo, but it does still work for
2380// the asymmetric case.
2382 int num_nodes, const std::vector<int>& tails, const std::vector<int>& heads,
2383 const std::vector<Literal>& literals,
2385 absl::Span<const int64_t> demands, int64_t capacity,
2386 LinearConstraintManager* manager, Model* model) {
2387 if (num_nodes <= 2) return;
2388
2389 // We will collect only the arcs with a positive lp_values to speed up some
2390 // computation below.
2391 struct Arc {
2392 int tail;
2393 int head;
2394 double lp_value;
2395 };
2396 std::vector<Arc> relevant_arcs;
2397
2398 // Sort the arcs by non-increasing lp_values.
2399 std::vector<double> literal_lp_values(literals.size());
2400 std::vector<std::pair<double, int>> arc_by_decreasing_lp_values;
2401 auto* encoder = model->GetOrCreate<IntegerEncoder>();
2402 for (int i = 0; i < literals.size(); ++i) {
2403 double lp_value;
2404 const IntegerVariable direct_view = encoder->GetLiteralView(literals[i]);
2405 if (direct_view != kNoIntegerVariable) {
2406 lp_value = lp_values[direct_view];
2407 } else {
2408 lp_value =
2409 1.0 - lp_values[encoder->GetLiteralView(literals[i].Negated())];
2410 }
2411 literal_lp_values[i] = lp_value;
2412
2413 if (lp_value < 1e-6) continue;
2414 relevant_arcs.push_back({tails[i], heads[i], lp_value});
2415 arc_by_decreasing_lp_values.push_back({lp_value, i});
2416 }
2417 std::sort(arc_by_decreasing_lp_values.begin(),
2418 arc_by_decreasing_lp_values.end(),
2419 std::greater<std::pair<double, int>>());
2420
2421 // We will do a union-find by adding one by one the arc of the lp solution
2422 // in the order above. Every intermediate set during this construction will
2423 // be a candidate for a cut.
2424 //
2425 // In parallel to the union-find, to efficiently reconstruct these sets (at
2426 // most num_nodes), we construct a "decomposition forest" of the different
2427 // connected components. Note that we don't exploit any asymmetric nature of
2428 // the graph here. This is exactly the algo 6.3 in the book above.
2429 int num_components = num_nodes;
2430 std::vector<int> parent(num_nodes);
2431 std::vector<int> root(num_nodes);
2432 for (int i = 0; i < num_nodes; ++i) {
2433 parent[i] = i;
2434 root[i] = i;
2435 }
2436 auto get_root_and_compress_path = [&root](int node) {
2437 int r = node;
2438 while (root[r] != r) r = root[r];
2439 while (root[node] != r) {
2440 const int next = root[node];
2441 root[node] = r;
2442 node = next;
2443 }
2444 return r;
2445 };
2446 for (const auto pair : arc_by_decreasing_lp_values) {
2447 if (num_components == 2) break;
2448 const int tail = get_root_and_compress_path(tails[pair.second]);
2449 const int head = get_root_and_compress_path(heads[pair.second]);
2450 if (tail != head) {
2451 // Update the decomposition forest, note that the number of nodes is
2452 // growing.
2453 const int new_node = parent.size();
2454 parent.push_back(new_node);
2455 parent[head] = new_node;
2456 parent[tail] = new_node;
2457 --num_components;
2458
2459 // It is important that the union-find representative is the same node.
2460 root.push_back(new_node);
2461 root[head] = new_node;
2462 root[tail] = new_node;
2463 }
2464 }
2465
2466 // For each node in the decomposition forest, try to add a cut for the set
2467 // formed by the nodes and its children. To do that efficiently, we first
2468 // order the nodes so that for each node in a tree, the set of children forms
2469 // a consecutive span in the pre_order vector. This vector just lists the
2470 // nodes in the "pre-order" graph traversal order. The Spans will point inside
2471 // the pre_order vector, it is why we initialize it once and for all.
2472 int new_size = 0;
2473 std::vector<int> pre_order(num_nodes);
2474 std::vector<absl::Span<const int>> subsets;
2475 {
2476 std::vector<absl::InlinedVector<int, 2>> graph(parent.size());
2477 for (int i = 0; i < parent.size(); ++i) {
2478 if (parent[i] != i) graph[parent[i]].push_back(i);
2479 }
2480 std::vector<int> queue;
2481 std::vector<bool> seen(graph.size(), false);
2482 std::vector<int> start_index(parent.size());
2483 for (int i = num_nodes; i < parent.size(); ++i) {
2484 // Note that because of the way we constructed 'parent', the graph is a
2485 // binary tree. This is not required for the correctness of the algorithm
2486 // here though.
2487 CHECK(graph[i].empty() || graph[i].size() == 2);
2488 if (parent[i] != i) continue;
2489
2490 // Explore the subtree rooted at node i.
2491 CHECK(!seen[i]);
2492 queue.push_back(i);
2493 while (!queue.empty()) {
2494 const int node = queue.back();
2495 if (seen[node]) {
2496 queue.pop_back();
2497 // All the children of node are in the span [start, end) of the
2498 // pre_order vector.
2499 const int start = start_index[node];
2500 if (new_size - start > 1) {
2501 subsets.emplace_back(&pre_order[start], new_size - start);
2502 }
2503 continue;
2504 }
2505 seen[node] = true;
2506 start_index[node] = new_size;
2507 if (node < num_nodes) pre_order[new_size++] = node;
2508 for (const int child : graph[node]) {
2509 if (!seen[child]) queue.push_back(child);
2510 }
2511 }
2512 }
2513 }
2514
2515 // Compute the total demands in order to know the minimum incoming/outgoing
2516 // flow.
2517 int64_t total_demands = 0;
2518 if (!demands.empty()) {
2519 for (const int64_t demand : demands) total_demands += demand;
2520 }
2521
2522 // Process each subsets and add any violated cut.
2523 CHECK_EQ(pre_order.size(), num_nodes);
2524 std::vector<bool> in_subset(num_nodes, false);
2525 for (const absl::Span<const int> subset : subsets) {
2526 CHECK_GT(subset.size(), 1);
2527 CHECK_LT(subset.size(), num_nodes);
2528
2529 // These fields will be left untouched if demands.empty().
2530 bool contain_depot = false;
2531 int64_t subset_demand = 0;
2532
2533 // Initialize "in_subset" and the subset demands.
2534 for (const int n : subset) {
2535 in_subset[n] = true;
2536 if (!demands.empty()) {
2537 if (n == 0) contain_depot = true;
2538 subset_demand += demands[n];
2539 }
2540 }
2541
2542 // Compute a lower bound on the outgoing flow.
2543 //
2544 // TODO(user): This lower bound assume all nodes in subset must be served,
2545 // which is not the case. For TSP we do the correct thing in
2546 // AddOutgoingCut() but not for CVRP... Fix!!
2547 //
2548 // TODO(user): It could be very interesting to see if this "min outgoing
2549 // flow" cannot be automatically infered from the constraint in the
2550 // precedence graph. This might work if we assume that any kind of path
2551 // cumul constraint is encoded with constraints:
2552 // [edge => value_head >= value_tail + edge_weight].
2553 // We could take the minimum incoming edge weight per node in the set, and
2554 // use the cumul variable domain to infer some capacity.
2555 int64_t min_outgoing_flow = 1;
2556 if (!demands.empty()) {
2557 min_outgoing_flow =
2558 contain_depot
2559 ? (total_demands - subset_demand + capacity - 1) / capacity
2560 : (subset_demand + capacity - 1) / capacity;
2561 }
2562
2563 // We still need to serve nodes with a demand of zero, and in the corner
2564 // case where all node in subset have a zero demand, the formula above
2565 // result in a min_outgoing_flow of zero.
2566 min_outgoing_flow = std::max(min_outgoing_flow, int64_t{1});
2567
2568 // Compute the current outgoing flow out of the subset.
2569 //
2570 // This can take a significant portion of the running time, it is why it is
2571 // faster to do it only on arcs with non-zero lp values which should be in
2572 // linear number rather than the total number of arc which can be quadratic.
2573 //
2574 // TODO(user): For the symmetric case there is an even faster algo. See if
2575 // it can be generalized to the asymmetric one if become needed.
2576 // Reference is algo 6.4 of the "The Traveling Salesman Problem" book
2577 // mentionned above.
2578 double outgoing_flow = 0.0;
2579 for (const auto arc : relevant_arcs) {
2580 if (in_subset[arc.tail] && !in_subset[arc.head]) {
2581 outgoing_flow += arc.lp_value;
2582 }
2583 }
2584
2585 // Add a cut if the current outgoing flow is not enough.
2586 if (outgoing_flow < min_outgoing_flow - 1e-6) {
2587 AddOutgoingCut(num_nodes, subset.size(), in_subset, tails, heads,
2588 literals, literal_lp_values,
2589 /*rhs_lower_bound=*/min_outgoing_flow, lp_values, manager,
2590 model);
2591 }
2592
2593 // Sparse clean up.
2594 for (const int n : subset) in_subset[n] = false;
2595 }
2596}
2597
2598namespace {
2599
2600// Returns for each literal its integer view, or the view of its negation.
2601std::vector<IntegerVariable> GetAssociatedVariables(
2602 const std::vector<Literal>& literals, Model* model) {
2603 auto* encoder = model->GetOrCreate<IntegerEncoder>();
2604 std::vector<IntegerVariable> result;
2605 for (const Literal l : literals) {
2606 const IntegerVariable direct_view = encoder->GetLiteralView(l);
2607 if (direct_view != kNoIntegerVariable) {
2608 result.push_back(direct_view);
2609 } else {
2610 result.push_back(encoder->GetLiteralView(l.Negated()));
2611 DCHECK_NE(result.back(), kNoIntegerVariable);
2612 }
2613 }
2614 return result;
2615}
2616
2617} // namespace
2618
2619// We use a basic algorithm to detect components that are not connected to the
2620// rest of the graph in the LP solution, and add cuts to force some arcs to
2621// enter and leave this component from outside.
2623 int num_nodes, const std::vector<int>& tails, const std::vector<int>& heads,
2624 const std::vector<Literal>& literals, Model* model) {
2625 CutGenerator result;
2626 result.vars = GetAssociatedVariables(literals, model);
2627 result.generate_cuts =
2628 [num_nodes, tails, heads, literals, model](
2630 LinearConstraintManager* manager) {
2632 num_nodes, tails, heads, literals, lp_values,
2633 /*demands=*/{}, /*capacity=*/0, manager, model);
2634 return true;
2635 };
2636 return result;
2637}
2638
2640 const std::vector<int>& tails,
2641 const std::vector<int>& heads,
2642 const std::vector<Literal>& literals,
2643 const std::vector<int64_t>& demands,
2644 int64_t capacity, Model* model) {
2645 CutGenerator result;
2646 result.vars = GetAssociatedVariables(literals, model);
2647 result.generate_cuts =
2648 [num_nodes, tails, heads, demands, capacity, literals, model](
2650 LinearConstraintManager* manager) {
2651 SeparateSubtourInequalities(num_nodes, tails, heads, literals,
2652 lp_values, demands, capacity, manager,
2653 model);
2654 return true;
2655 };
2656 return result;
2657}
2658
2659std::function<IntegerLiteral()>
2661 // Gather all 0-1 variables that appear in this LP.
2662 std::vector<IntegerVariable> variables;
2663 for (IntegerVariable var : integer_variables_) {
2664 if (integer_trail_->LowerBound(var) == 0 &&
2665 integer_trail_->UpperBound(var) == 1) {
2666 variables.push_back(var);
2667 }
2668 }
2669 VLOG(1) << "HeuristicLPMostInfeasibleBinary has " << variables.size()
2670 << " variables.";
2671
2672 return [this, variables]() {
2673 const double kEpsilon = 1e-6;
2674 // Find most fractional value.
2675 IntegerVariable fractional_var = kNoIntegerVariable;
2676 double fractional_distance_best = -1.0;
2677 for (const IntegerVariable var : variables) {
2678 // Skip ignored and fixed variables.
2679 if (integer_trail_->IsCurrentlyIgnored(var)) continue;
2680 const IntegerValue lb = integer_trail_->LowerBound(var);
2681 const IntegerValue ub = integer_trail_->UpperBound(var);
2682 if (lb == ub) continue;
2683
2684 // Check variable's support is fractional.
2685 const double lp_value = this->GetSolutionValue(var);
2686 const double fractional_distance =
2687 std::min(std::ceil(lp_value - kEpsilon) - lp_value,
2688 lp_value - std::floor(lp_value + kEpsilon));
2689 if (fractional_distance < kEpsilon) continue;
2690
2691 // Keep variable if it is farther from integrality than the previous.
2692 if (fractional_distance > fractional_distance_best) {
2693 fractional_var = var;
2694 fractional_distance_best = fractional_distance;
2695 }
2696 }
2697
2698 if (fractional_var != kNoIntegerVariable) {
2699 IntegerLiteral::GreaterOrEqual(fractional_var, IntegerValue(1));
2700 }
2701 return IntegerLiteral();
2702 };
2703}
2704
2705std::function<IntegerLiteral()>
2707 // Gather all 0-1 variables that appear in this LP.
2708 std::vector<IntegerVariable> variables;
2709 for (IntegerVariable var : integer_variables_) {
2710 if (integer_trail_->LowerBound(var) == 0 &&
2711 integer_trail_->UpperBound(var) == 1) {
2712 variables.push_back(var);
2713 }
2714 }
2715 VLOG(1) << "HeuristicLpReducedCostBinary has " << variables.size()
2716 << " variables.";
2717
2718 // Store average of reduced cost from 1 to 0. The best heuristic only sets
2719 // variables to one and cares about cost to zero, even though classic
2720 // pseudocost will use max_var min(cost_to_one[var], cost_to_zero[var]).
2721 const int num_vars = variables.size();
2722 std::vector<double> cost_to_zero(num_vars, 0.0);
2723 std::vector<int> num_cost_to_zero(num_vars);
2724 int num_calls = 0;
2725
2726 return [=]() mutable {
2727 const double kEpsilon = 1e-6;
2728
2729 // Every 10000 calls, decay pseudocosts.
2730 num_calls++;
2731 if (num_calls == 10000) {
2732 for (int i = 0; i < num_vars; i++) {
2733 cost_to_zero[i] /= 2;
2734 num_cost_to_zero[i] /= 2;
2735 }
2736 num_calls = 0;
2737 }
2738
2739 // Accumulate pseudo-costs of all unassigned variables.
2740 for (int i = 0; i < num_vars; i++) {
2741 const IntegerVariable var = variables[i];
2742 // Skip ignored and fixed variables.
2743 if (integer_trail_->IsCurrentlyIgnored(var)) continue;
2744 const IntegerValue lb = integer_trail_->LowerBound(var);
2745 const IntegerValue ub = integer_trail_->UpperBound(var);
2746 if (lb == ub) continue;
2747
2748 const double rc = this->GetSolutionReducedCost(var);
2749 // Skip reduced costs that are nonzero because of numerical issues.
2750 if (std::abs(rc) < kEpsilon) continue;
2751
2752 const double value = std::round(this->GetSolutionValue(var));
2753 if (value == 1.0 && rc < 0.0) {
2754 cost_to_zero[i] -= rc;
2755 num_cost_to_zero[i]++;
2756 }
2757 }
2758
2759 // Select noninstantiated variable with highest pseudo-cost.
2760 int selected_index = -1;
2761 double best_cost = 0.0;
2762 for (int i = 0; i < num_vars; i++) {
2763 const IntegerVariable var = variables[i];
2764 // Skip ignored and fixed variables.
2765 if (integer_trail_->IsCurrentlyIgnored(var)) continue;
2766 if (integer_trail_->IsFixed(var)) continue;
2767
2768 if (num_cost_to_zero[i] > 0 &&
2769 best_cost < cost_to_zero[i] / num_cost_to_zero[i]) {
2770 best_cost = cost_to_zero[i] / num_cost_to_zero[i];
2771 selected_index = i;
2772 }
2773 }
2774
2775 if (selected_index >= 0) {
2776 return IntegerLiteral::GreaterOrEqual(variables[selected_index],
2777 IntegerValue(1));
2778 }
2779 return IntegerLiteral();
2780 };
2781}
2782
2783void LinearProgrammingConstraint::UpdateAverageReducedCosts() {
2784 const int num_vars = integer_variables_.size();
2785 if (sum_cost_down_.size() < num_vars) {
2786 sum_cost_down_.resize(num_vars, 0.0);
2787 num_cost_down_.resize(num_vars, 0);
2788 sum_cost_up_.resize(num_vars, 0.0);
2789 num_cost_up_.resize(num_vars, 0);
2790 rc_scores_.resize(num_vars, 0.0);
2791 }
2792
2793 // Decay averages.
2794 num_calls_since_reduced_cost_averages_reset_++;
2795 if (num_calls_since_reduced_cost_averages_reset_ == 10000) {
2796 for (int i = 0; i < num_vars; i++) {
2797 sum_cost_up_[i] /= 2;
2798 num_cost_up_[i] /= 2;
2799 sum_cost_down_[i] /= 2;
2800 num_cost_down_[i] /= 2;
2801 }
2802 num_calls_since_reduced_cost_averages_reset_ = 0;
2803 }
2804
2805 // Accumulate reduced costs of all unassigned variables.
2806 for (int i = 0; i < num_vars; i++) {
2807 const IntegerVariable var = integer_variables_[i];
2808
2809 // Skip ignored and fixed variables.
2810 if (integer_trail_->IsCurrentlyIgnored(var)) continue;
2811 if (integer_trail_->IsFixed(var)) continue;
2812
2813 // Skip reduced costs that are zero or close.
2814 const double rc = lp_reduced_cost_[i];
2815 if (std::abs(rc) < kCpEpsilon) continue;
2816
2817 if (rc < 0.0) {
2818 sum_cost_down_[i] -= rc;
2819 num_cost_down_[i]++;
2820 } else {
2821 sum_cost_up_[i] += rc;
2822 num_cost_up_[i]++;
2823 }
2824 }
2825
2826 // Tricky, we artificially reset the rc_rev_int_repository_ to level zero
2827 // so that the rev_rc_start_ is zero.
2828 rc_rev_int_repository_.SetLevel(0);
2829 rc_rev_int_repository_.SetLevel(trail_->CurrentDecisionLevel());
2830 rev_rc_start_ = 0;
2831
2832 // Cache the new score (higher is better) using the average reduced costs
2833 // as a signal.
2834 positions_by_decreasing_rc_score_.clear();
2835 for (int i = 0; i < num_vars; i++) {
2836 // If only one direction exist, we takes its value divided by 2, so that
2837 // such variable should have a smaller cost than the min of the two side
2838 // except if one direction have a really high reduced costs.
2839 const double a_up =
2840 num_cost_up_[i] > 0 ? sum_cost_up_[i] / num_cost_up_[i] : 0.0;
2841 const double a_down =
2842 num_cost_down_[i] > 0 ? sum_cost_down_[i] / num_cost_down_[i] : 0.0;
2843 if (num_cost_down_[i] > 0 && num_cost_up_[i] > 0) {
2844 rc_scores_[i] = std::min(a_up, a_down);
2845 } else {
2846 rc_scores_[i] = 0.5 * (a_down + a_up);
2847 }
2848
2849 // We ignore scores of zero (i.e. no data) and will follow the default
2850 // search heuristic if all variables are like this.
2851 if (rc_scores_[i] > 0.0) {
2852 positions_by_decreasing_rc_score_.push_back({-rc_scores_[i], i});
2853 }
2854 }
2855 std::sort(positions_by_decreasing_rc_score_.begin(),
2856 positions_by_decreasing_rc_score_.end());
2857}
2858
2859// TODO(user): Remove duplication with HeuristicLpReducedCostBinary().
2860std::function<IntegerLiteral()>
2862 return [this]() { return this->LPReducedCostAverageDecision(); };
2863}
2864
2865IntegerLiteral LinearProgrammingConstraint::LPReducedCostAverageDecision() {
2866 // Select noninstantiated variable with highest positive average reduced cost.
2867 int selected_index = -1;
2868 const int size = positions_by_decreasing_rc_score_.size();
2869 rc_rev_int_repository_.SaveState(&rev_rc_start_);
2870 for (int i = rev_rc_start_; i < size; ++i) {
2871 const int index = positions_by_decreasing_rc_score_[i].second;
2872 const IntegerVariable var = integer_variables_[index];
2873 if (integer_trail_->IsCurrentlyIgnored(var)) continue;
2874 if (integer_trail_->IsFixed(var)) continue;
2875 selected_index = index;
2876 rev_rc_start_ = i;
2877 break;
2878 }
2879
2880 if (selected_index == -1) return IntegerLiteral();
2881 const IntegerVariable var = integer_variables_[selected_index];
2882
2883 // If ceil(value) is current upper bound, try var == upper bound first.
2884 // Guarding with >= prevents numerical problems.
2885 // With 0/1 variables, this will tend to try setting to 1 first,
2886 // which produces more shallow trees.
2887 const IntegerValue ub = integer_trail_->UpperBound(var);
2888 const IntegerValue value_ceil(
2889 std::ceil(this->GetSolutionValue(var) - kCpEpsilon));
2890 if (value_ceil >= ub) {
2892 }
2893
2894 // If floor(value) is current lower bound, try var == lower bound first.
2895 // Guarding with <= prevents numerical problems.
2896 const IntegerValue lb = integer_trail_->LowerBound(var);
2897 const IntegerValue value_floor(
2898 std::floor(this->GetSolutionValue(var) + kCpEpsilon));
2899 if (value_floor <= lb) {
2901 }
2902
2903 // Here lb < value_floor <= value_ceil < ub.
2904 // Try the most promising split between var <= floor or var >= ceil.
2905 const double a_up =
2906 num_cost_up_[selected_index] > 0
2907 ? sum_cost_up_[selected_index] / num_cost_up_[selected_index]
2908 : 0.0;
2909 const double a_down =
2910 num_cost_down_[selected_index] > 0
2911 ? sum_cost_down_[selected_index] / num_cost_down_[selected_index]
2912 : 0.0;
2913 if (a_down < a_up) {
2914 return IntegerLiteral::LowerOrEqual(var, value_floor);
2915 } else {
2916 return IntegerLiteral::GreaterOrEqual(var, value_ceil);
2917 }
2918}
2919
2921 std::string result = "LP statistics:\n";
2922 absl::StrAppend(&result, " final dimension: ", DimensionString(), "\n");
2923 absl::StrAppend(&result, " total number of simplex iterations: ",
2924 total_num_simplex_iterations_, "\n");
2925 absl::StrAppend(&result, " num solves: \n");
2926 for (int i = 0; i < num_solves_by_status_.size(); ++i) {
2927 if (num_solves_by_status_[i] == 0) continue;
2928 absl::StrAppend(&result, " - #",
2930 num_solves_by_status_[i], "\n");
2931 }
2932 absl::StrAppend(&result, constraint_manager_.Statistics());
2933 return result;
2934}
2935
2936} // namespace sat
2937} // namespace operations_research
int64_t max
Definition: alldiff_cst.cc:140
int64_t min
Definition: alldiff_cst.cc:139
#define CHECK(condition)
Definition: base/logging.h:495
#define DCHECK_NE(val1, val2)
Definition: base/logging.h:891
#define CHECK_LT(val1, val2)
Definition: base/logging.h:705
#define CHECK_EQ(val1, val2)
Definition: base/logging.h:702
#define CHECK_GE(val1, val2)
Definition: base/logging.h:706
#define CHECK_GT(val1, val2)
Definition: base/logging.h:707
#define CHECK_NE(val1, val2)
Definition: base/logging.h:703
#define DCHECK_GT(val1, val2)
Definition: base/logging.h:895
#define DCHECK(condition)
Definition: base/logging.h:889
#define VLOG(verboselevel)
Definition: base/logging.h:983
void assign(size_type n, const value_type &val)
void resize(size_type new_size)
size_type size() const
void push_back(const value_type &x)
static int64_t GCD64(int64_t x, int64_t y)
Definition: mathutil.h:107
void SetLevel(int level) final
Definition: rev.h:134
void SaveState(T *object)
Definition: rev.h:61
A simple class to enforce both an elapsed time limit and a deterministic time limit in the same threa...
Definition: time_limit.h:106
bool LimitReached()
Returns true when the external limit is true, or the deterministic time is over the deterministic lim...
Definition: time_limit.h:534
static constexpr CostScalingAlgorithm MEAN_COST_SCALING
void SetVariableBounds(ColIndex col, Fractional lower_bound, Fractional upper_bound)
Definition: lp_data.cc:249
void SetObjectiveOffset(Fractional objective_offset)
Definition: lp_data.cc:331
void SetCoefficient(RowIndex row, ColIndex col, Fractional value)
Definition: lp_data.cc:317
void SetConstraintBounds(RowIndex row, Fractional lower_bound, Fractional upper_bound)
Definition: lp_data.cc:309
void SetObjectiveCoefficient(ColIndex col, Fractional value)
Definition: lp_data.cc:326
std::string GetDimensionString() const
Definition: lp_data.cc:425
Fractional objective_scaling_factor() const
Definition: lp_data.h:261
const SparseColumn & GetSparseColumn(ColIndex col) const
Definition: lp_data.cc:409
Fractional VariableScalingFactor(ColIndex col) const
Fractional UnscaleVariableValue(ColIndex col, Fractional value) const
Fractional UnscaleReducedCost(ColIndex col, Fractional value) const
Fractional UnscaleDualValue(RowIndex row, Fractional value) const
const GlopParameters & GetParameters() const
const DenseRow & GetDualRayRowCombination() const
Fractional GetVariableValue(ColIndex col) const
void SetIntegralityScale(ColIndex col, Fractional scale)
VariableStatus GetVariableStatus(ColIndex col) const
Fractional GetReducedCost(ColIndex col) const
const DenseColumn & GetDualRay() const
ABSL_MUST_USE_RESULT Status Solve(const LinearProgram &lp, TimeLimit *time_limit)
const ScatteredRow & GetUnitRowLeftInverse(RowIndex row)
Fractional GetDualValue(RowIndex row) const
ConstraintStatus GetConstraintStatus(RowIndex row) const
void LoadStateForNextSolve(const BasisState &state)
ColIndex GetBasis(RowIndex row) const
void SetParameters(const GlopParameters &parameters)
LinearConstraint * mutable_cut()
Definition: cuts.h:254
bool TrySimpleKnapsack(const LinearConstraint base_ct, const std::vector< double > &lp_values, const std::vector< IntegerValue > &lower_bounds, const std::vector< IntegerValue > &upper_bounds)
Definition: cuts.cc:1175
void WatchIntegerVariable(IntegerVariable i, int id, int watch_index=-1)
Definition: integer.h:1583
void WatchUpperBound(IntegerVariable var, int id, int watch_index=-1)
Definition: integer.h:1577
void SetPropagatorPriority(int id, int priority)
Definition: integer.cc:2018
int Register(PropagatorInterface *propagator)
Definition: integer.cc:1995
void AddLpVariable(IntegerVariable var)
Definition: cuts.h:113
void ProcessUpperBoundedConstraintWithSlackCreation(bool substitute_only_inner_variables, IntegerVariable first_slack, const absl::StrongVector< IntegerVariable, double > &lp_values, LinearConstraint *cut, std::vector< SlackInfo > *slack_infos)
Definition: cuts.cc:1598
bool DebugSlack(IntegerVariable first_slack, const LinearConstraint &initial_cut, const LinearConstraint &cut, const std::vector< SlackInfo > &info)
Definition: cuts.cc:1731
void RecomputeCacheAndSeparateSomeImpliedBoundCuts(const absl::StrongVector< IntegerVariable, double > &lp_values)
Definition: cuts.cc:1588
const IntegerVariable GetLiteralView(Literal lit) const
Definition: integer.h:493
void ComputeCut(RoundingOptions options, const std::vector< double > &lp_values, const std::vector< IntegerValue > &lower_bounds, const std::vector< IntegerValue > &upper_bounds, ImpliedBoundsProcessor *ib_processor, LinearConstraint *cut)
Definition: cuts.cc:721
ABSL_MUST_USE_RESULT bool Enqueue(IntegerLiteral i_lit, absl::Span< const Literal > literal_reason, absl::Span< const IntegerLiteral > integer_reason)
Definition: integer.cc:1027
bool IsCurrentlyIgnored(IntegerVariable i) const
Definition: integer.h:698
bool IsFixed(IntegerVariable i) const
Definition: integer.h:1443
IntegerLiteral LowerBoundAsLiteral(IntegerVariable i) const
Definition: integer.h:1467
bool ReportConflict(absl::Span< const Literal > literal_reason, absl::Span< const IntegerLiteral > integer_reason)
Definition: integer.h:917
IntegerValue UpperBound(IntegerVariable i) const
Definition: integer.h:1439
IntegerValue LevelZeroUpperBound(IntegerVariable var) const
Definition: integer.h:1524
IntegerValue LevelZeroLowerBound(IntegerVariable var) const
Definition: integer.h:1519
void RelaxLinearReason(IntegerValue slack, absl::Span< const IntegerValue > coeffs, std::vector< IntegerLiteral > *reason) const
Definition: integer.cc:805
IntegerValue LowerBound(IntegerVariable i) const
Definition: integer.h:1435
IntegerLiteral UpperBoundAsLiteral(IntegerVariable i) const
Definition: integer.h:1472
bool IsFixedAtLevelZero(IntegerVariable var) const
Definition: integer.h:1529
void RemoveLevelZeroBounds(std::vector< IntegerLiteral > *reason) const
Definition: integer.cc:939
void RegisterReversibleClass(ReversibleInterface *rev)
Definition: integer.h:940
bool ChangeLp(const absl::StrongVector< IntegerVariable, double > &lp_solution, glop::BasisState *solution_state)
void SetObjectiveCoefficient(IntegerVariable var, IntegerValue coeff)
ConstraintIndex Add(LinearConstraint ct, bool *added=nullptr)
const std::vector< ConstraintIndex > & LpConstraints() const
bool AddCut(LinearConstraint ct, std::string type_name, const absl::StrongVector< IntegerVariable, double > &lp_solution, std::string extra_info="")
const absl::StrongVector< ConstraintIndex, ConstraintInfo > & AllConstraints() const
std::function< IntegerLiteral()> HeuristicLpReducedCostBinary(Model *model)
bool IncrementalPropagate(const std::vector< int > &watch_indices) override
std::function< IntegerLiteral()> HeuristicLpMostInfeasibleBinary(Model *model)
void SetObjectiveCoefficient(IntegerVariable ivar, IntegerValue coeff)
Class that owns everything related to a particular optimization model.
Definition: sat/model.h:38
static constexpr SearchBranching LP_SEARCH
void ConvertToLinearConstraint(const std::vector< IntegerVariable > &integer_variables, IntegerValue upper_bound, LinearConstraint *result)
bool Add(glop::ColIndex col, IntegerValue value)
bool AddLinearExpressionMultiple(IntegerValue multiplier, const std::vector< std::pair< glop::ColIndex, IntegerValue > > &terms)
std::vector< std::pair< glop::ColIndex, IntegerValue > > GetTerms()
void TransferToManager(const absl::StrongVector< IntegerVariable, double > &lp_solution, LinearConstraintManager *manager)
std::vector< Literal > * MutableConflict()
Definition: sat_base.h:363
void ProcessVariables(const std::vector< double > &lp_values, const std::vector< IntegerValue > &lower_bounds, const std::vector< IntegerValue > &upper_bounds)
void AddOneConstraint(glop::RowIndex, const std::vector< std::pair< glop::ColIndex, IntegerValue > > &terms, IntegerValue lb, IntegerValue ub)
std::vector< std::vector< std::pair< glop::RowIndex, IntegerValue > > > InterestingCandidates(ModelRandomGenerator *random)
int64_t b
int64_t a
Block * next
SatParameters parameters
const std::string name
const Constraint * ct
int64_t value
IntVar * var
Definition: expr_array.cc:1874
absl::Status status
Definition: g_gurobi.cc:35
double upper_bound
double lower_bound
GRBmodel * model
const bool DEBUG_MODE
Definition: macros.h:24
ColIndex col
Definition: markowitz.cc:183
RowIndex row
Definition: markowitz.cc:182
const Collection::value_type::second_type & FindOrDie(const Collection &collection, const typename Collection::value_type::first_type &key)
Definition: map_util.h:206
StrictITIVector< ColIndex, Fractional > DenseRow
Definition: lp_types.h:303
std::string GetProblemStatusString(ProblemStatus problem_status)
Definition: lp_types.cc:19
ColIndex RowToColIndex(RowIndex row)
Definition: lp_types.h:49
RowIndex ColToRowIndex(ColIndex col)
Definition: lp_types.h:52
const double kEpsilon
Definition: lp_types.h:87
StrictITIVector< RowIndex, Fractional > DenseColumn
Definition: lp_types.h:332
IntegerValue FloorRatio(IntegerValue dividend, IntegerValue positive_divisor)
Definition: integer.h:92
CutGenerator CreateCVRPCutGenerator(int num_nodes, const std::vector< int > &tails, const std::vector< int > &heads, const std::vector< Literal > &literals, const std::vector< int64_t > &demands, int64_t capacity, Model *model)
bool AddProductTo(IntegerValue a, IntegerValue b, IntegerValue *result)
Definition: integer.h:115
constexpr IntegerValue kMaxIntegerValue(std::numeric_limits< IntegerValue::ValueType >::max() - 1)
IntType IntTypeAbs(IntType t)
Definition: integer.h:79
constexpr IntegerValue kMinIntegerValue(-kMaxIntegerValue)
const IntegerVariable kNoIntegerVariable(-1)
void MakeAllCoefficientsPositive(LinearConstraint *constraint)
IntegerVariable PositiveVariable(IntegerVariable i)
Definition: integer.h:143
std::vector< IntegerVariable > NegationOf(const std::vector< IntegerVariable > &vars)
Definition: integer.cc:30
IntegerValue ComputeInfinityNorm(const LinearConstraint &constraint)
void SeparateSubtourInequalities(int num_nodes, const std::vector< int > &tails, const std::vector< int > &heads, const std::vector< Literal > &literals, const absl::StrongVector< IntegerVariable, double > &lp_values, absl::Span< const int64_t > demands, int64_t capacity, LinearConstraintManager *manager, Model *model)
bool VariableIsPositive(IntegerVariable i)
Definition: integer.h:139
void DivideByGCD(LinearConstraint *constraint)
CutGenerator CreateStronglyConnectedGraphCutGenerator(int num_nodes, const std::vector< int > &tails, const std::vector< int > &heads, const std::vector< Literal > &literals, Model *model)
double ComputeActivity(const LinearConstraint &constraint, const absl::StrongVector< IntegerVariable, double > &values)
double ToDouble(IntegerValue value)
Definition: integer.h:71
Collection of objects used to extend the Constraint Solver library.
int64_t CapAdd(int64_t x, int64_t y)
int64_t CapSub(int64_t x, int64_t y)
std::pair< int64_t, int64_t > Arc
Definition: search.cc:3434
int64_t CapProd(int64_t x, int64_t y)
int index
Definition: pack.cc:509
int64_t demand
Definition: resource.cc:125
int64_t bound
int64_t capacity
int64_t tail
int64_t head
int64_t start
std::vector< IntegerVariable > vars
Definition: cuts.h:43
std::function< bool(const absl::StrongVector< IntegerVariable, double > &lp_values, LinearConstraintManager *manager)> generate_cuts
Definition: cuts.h:47
static IntegerLiteral LowerOrEqual(IntegerVariable i, IntegerValue bound)
Definition: integer.h:1383
static IntegerLiteral GreaterOrEqual(IntegerVariable i, IntegerValue bound)
Definition: integer.h:1377
const double coeff
#define VLOG_IS_ON(verboselevel)
Definition: vlog_is_on.h:44