// Copyright 2010-2018 Google LLC // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "ortools/sat/linear_programming_constraint.h" #include #include #include #include "absl/container/flat_hash_map.h" #include "ortools/base/commandlineflags.h" #include "ortools/base/int_type_indexed_vector.h" #include "ortools/base/integral_types.h" #include "ortools/base/logging.h" #include "ortools/base/map_util.h" #include "ortools/glop/parameters.pb.h" #include "ortools/glop/preprocessor.h" #include "ortools/glop/status.h" #include "ortools/graph/strongly_connected_components.h" #include "ortools/util/saturated_arithmetic.h" namespace operations_research { namespace sat { using glop::ColIndex; using glop::Fractional; using glop::RowIndex; const double LinearProgrammingConstraint::kCpEpsilon = 1e-4; const double LinearProgrammingConstraint::kLpEpsilon = 1e-6; // TODO(user): make SatParameters singleton too, otherwise changing them after // a constraint was added will have no effect on this class. LinearProgrammingConstraint::LinearProgrammingConstraint(Model* model) : sat_parameters_(*(model->GetOrCreate())), time_limit_(model->GetOrCreate()), integer_trail_(model->GetOrCreate()), trail_(model->GetOrCreate()), model_heuristics_(model->GetOrCreate()), integer_encoder_(model->GetOrCreate()), dispatcher_(model->GetOrCreate()) { // Tweak the default parameters to make the solve incremental. glop::GlopParameters parameters; parameters.set_use_dual_simplex(true); simplex_.SetParameters(parameters); } void LinearProgrammingConstraint::AddLinearConstraint( const LinearConstraint& ct) { DCHECK(!lp_constraint_is_registered_); constraint_manager_.Add(ct); // We still create the mirror variable right away though. // // TODO(user): clean this up? Note that it is important that the variable // in lp_data_ never changes though, so we can restart from the current // lp solution and be incremental (even if the constraints changed). for (const IntegerVariable var : ct.vars) { GetOrCreateMirrorVariable(PositiveVariable(var)); } } glop::ColIndex LinearProgrammingConstraint::GetOrCreateMirrorVariable( IntegerVariable positive_variable) { DCHECK(VariableIsPositive(positive_variable)); if (!gtl::ContainsKey(mirror_lp_variable_, positive_variable)) { const glop::ColIndex col(integer_variables_.size()); mirror_lp_variable_[positive_variable] = col; integer_variables_.push_back(positive_variable); lp_solution_.push_back(std::numeric_limits::infinity()); lp_reduced_cost_.push_back(0.0); (*dispatcher_)[positive_variable] = this; const int index = std::max(positive_variable.value(), NegationOf(positive_variable).value()); if (index >= expanded_lp_solution_.size()) { expanded_lp_solution_.resize(index + 1, 0.0); } return col; } return mirror_lp_variable_[positive_variable]; } void LinearProgrammingConstraint::SetObjectiveCoefficient(IntegerVariable ivar, IntegerValue coeff) { CHECK(!lp_constraint_is_registered_); objective_is_defined_ = true; IntegerVariable pos_var = VariableIsPositive(ivar) ? ivar : NegationOf(ivar); if (ivar != pos_var) coeff = -coeff; const glop::ColIndex col = GetOrCreateMirrorVariable(pos_var); integer_objective_.push_back({col, coeff}); } // TODO(user): As the search progress, some variables might get fixed. Exploit // this to reduce the number of variables in the LP and in the // ConstraintManager? We might also detect during the search that two variable // are equivalent. void LinearProgrammingConstraint::CreateLpFromConstraintManager() { // Fill integer_lp_. integer_lp_.clear(); const auto& all_constraints = constraint_manager_.AllConstraints(); for (const auto index : constraint_manager_.LpConstraints()) { const LinearConstraint& ct = all_constraints[index]; integer_lp_.push_back(LinearConstraintInternal()); LinearConstraintInternal& new_ct = integer_lp_.back(); new_ct.lb = ct.lb; new_ct.ub = ct.ub; const int size = ct.vars.size(); for (int i = 0; i < size; ++i) { // We only use positive variable inside this class. IntegerVariable var = ct.vars[i]; IntegerValue coeff = ct.coeffs[i]; if (!VariableIsPositive(var)) { var = NegationOf(var); coeff = -coeff; } new_ct.terms.push_back({GetOrCreateMirrorVariable(var), coeff}); } // Important to keep lp_data_ "clean". std::sort(new_ct.terms.begin(), new_ct.terms.end()); } // Copy the integer_lp_ into lp_data_. lp_data_.Clear(); for (int i = 0; i < integer_variables_.size(); ++i) { CHECK_EQ(glop::ColIndex(i), lp_data_.CreateNewVariable()); } for (const auto entry : integer_objective_) { lp_data_.SetObjectiveCoefficient(entry.first, ToDouble(entry.second)); } for (const LinearConstraintInternal& ct : integer_lp_) { const ConstraintIndex row = lp_data_.CreateNewConstraint(); lp_data_.SetConstraintBounds(row, ToDouble(ct.lb), ToDouble(ct.ub)); for (const auto& term : ct.terms) { lp_data_.SetCoefficient(row, term.first, ToDouble(term.second)); } } // Scale lp_data_. scaler_.Clear(); Scale(&lp_data_, &scaler_, glop::GlopParameters::DEFAULT); lp_data_.ScaleObjective(); // ScaleBounds() looks at both the constraints and variable bounds, so we // initialize the LP variable bounds before scaling them. // // TODO(user): As part of the scaling, we may also want to shift the initial // variable bounds so that each variable contain the value zero in their // domain. Maybe just once and for all at the beginning. bound_scaling_factor_ = 1.0; UpdateBoundsOfLpVariables(); bound_scaling_factor_ = lp_data_.ScaleBounds(); lp_data_.NotifyThatColumnsAreClean(); lp_data_.AddSlackVariablesWhereNecessary(false); VLOG(1) << "LP relaxation: " << lp_data_.GetDimensionString() << ". " << constraint_manager_.AllConstraints().size() << " Managed constraints."; } void LinearProgrammingConstraint::RegisterWith(Model* model) { DCHECK(!lp_constraint_is_registered_); lp_constraint_is_registered_ = true; model->GetOrCreate()->push_back(this); // Note fdid, this is not really needed by should lead to better cache // locality. std::sort(integer_objective_.begin(), integer_objective_.end()); // Set the LP to its initial content. if (!sat_parameters_.add_lp_constraints_lazily()) { constraint_manager_.AddAllConstraintsToLp(); } CreateLpFromConstraintManager(); GenericLiteralWatcher* watcher = model->GetOrCreate(); const int watcher_id = watcher->Register(this); const int num_vars = integer_variables_.size(); for (int i = 0; i < num_vars; i++) { watcher->WatchIntegerVariable(integer_variables_[i], watcher_id, i); } if (objective_is_defined_) { watcher->WatchUpperBound(objective_cp_, watcher_id); } watcher->SetPropagatorPriority(watcher_id, 2); if (integer_variables_.size() >= 20) { // Do not use on small subparts. auto* container = model->GetOrCreate(); container->push_back(HeuristicLPPseudoCostBinary(model)); container->push_back(HeuristicLPMostInfeasibleBinary(model)); } // Registering it with the trail make sure this class is always in sync when // it is used in the decision heuristics. integer_trail_->RegisterReversibleClass(this); } void LinearProgrammingConstraint::SetLevel(int level) { if (lp_solution_is_set_ && level < lp_solution_level_) { lp_solution_is_set_ = false; } // Special case for level zero, we "reload" any previously known optimal // solution from that level. // // TODO(user): Keep all optimal solution in the current branch? if (level == 0 && !level_zero_lp_solution_.empty()) { lp_solution_is_set_ = true; lp_solution_ = level_zero_lp_solution_; lp_solution_level_ = 0; for (int i = 0; i < lp_solution_.size(); i++) { expanded_lp_solution_[integer_variables_[i]] = lp_solution_[i]; expanded_lp_solution_[NegationOf(integer_variables_[i])] = -lp_solution_[i]; } } } void LinearProgrammingConstraint::AddCutGenerator(CutGenerator generator) { for (const IntegerVariable var : generator.vars) { GetOrCreateMirrorVariable(VariableIsPositive(var) ? var : NegationOf(var)); } cut_generators_.push_back(std::move(generator)); } bool LinearProgrammingConstraint::IncrementalPropagate( const std::vector& watch_indices) { if (!lp_solution_is_set_) return Propagate(); // Check whether the change breaks the current LP solution. If it does, call // Propagate() on the current LP. for (const int index : watch_indices) { const double lb = ToDouble(integer_trail_->LowerBound(integer_variables_[index])); const double ub = ToDouble(integer_trail_->UpperBound(integer_variables_[index])); const double value = lp_solution_[index]; if (value < lb - kCpEpsilon || value > ub + kCpEpsilon) return Propagate(); } // TODO(user): The saved lp solution is still valid given the current variable // bounds, so the LP optimal didn't change. However we might still want to add // new cuts or new lazy constraints? return true; } glop::Fractional LinearProgrammingConstraint::CpToLpScalingFactor( glop::ColIndex col) const { return scaler_.col_scale(col) / bound_scaling_factor_; } glop::Fractional LinearProgrammingConstraint::LpToCpScalingFactor( glop::ColIndex col) const { return bound_scaling_factor_ / scaler_.col_scale(col); } glop::Fractional LinearProgrammingConstraint::GetVariableValueAtCpScale( glop::ColIndex var) { return simplex_.GetVariableValue(var) * LpToCpScalingFactor(var); } double LinearProgrammingConstraint::GetSolutionValue( IntegerVariable variable) const { return lp_solution_[gtl::FindOrDie(mirror_lp_variable_, variable).value()]; } double LinearProgrammingConstraint::GetSolutionReducedCost( IntegerVariable variable) const { return lp_reduced_cost_[gtl::FindOrDie(mirror_lp_variable_, variable) .value()]; } void LinearProgrammingConstraint::UpdateBoundsOfLpVariables() { const int num_vars = integer_variables_.size(); for (int i = 0; i < num_vars; i++) { const IntegerVariable cp_var = integer_variables_[i]; const double lb = ToDouble(integer_trail_->LowerBound(cp_var)); const double ub = ToDouble(integer_trail_->UpperBound(cp_var)); const double factor = CpToLpScalingFactor(glop::ColIndex(i)); lp_data_.SetVariableBounds(glop::ColIndex(i), lb * factor, ub * factor); } } bool LinearProgrammingConstraint::SolveLp() { const auto status = simplex_.Solve(lp_data_, time_limit_); if (!status.ok()) { LOG(WARNING) << "The LP solver encountered an error: " << status.error_message(); simplex_.ClearStateForNextSolve(); return false; } if (simplex_.GetProblemStatus() == glop::ProblemStatus::OPTIMAL) { lp_solution_is_set_ = true; lp_solution_level_ = trail_->CurrentDecisionLevel(); const int num_vars = integer_variables_.size(); for (int i = 0; i < num_vars; i++) { const glop::Fractional value = GetVariableValueAtCpScale(glop::ColIndex(i)); lp_solution_[i] = value; expanded_lp_solution_[integer_variables_[i]] = value; expanded_lp_solution_[NegationOf(integer_variables_[i])] = -value; } if (lp_solution_level_ == 0) { level_zero_lp_solution_ = lp_solution_; } } return true; } bool LinearProgrammingConstraint::Propagate() { UpdateBoundsOfLpVariables(); // TODO(user): It seems the time we loose by not stopping early might be worth // it because we end up with a better explanation at optimality. glop::GlopParameters parameters = simplex_.GetParameters(); if (/* DISABLES CODE */ (false) && objective_is_defined_) { // We put a limit on the dual objective since there is no point increasing // it past our current objective upper-bound (we will already fail as soon // as we pass it). Note that this limit is properly transformed using the // objective scaling factor and offset stored in lp_data_. // // Note that we use a bigger epsilon here to be sure that if we abort // because of this, we will report a conflict. parameters.set_objective_upper_limit( static_cast(integer_trail_->UpperBound(objective_cp_).value() + 100.0 * kCpEpsilon)); } // Put an iteration limit on the work we do in the simplex for this call. Note // that because we are "incremental", even if we don't solve it this time we // will make progress towards a solve in the lower node of the tree search. // // TODO(user): Put more at the root, and less afterwards? parameters.set_max_number_of_iterations(500); if (sat_parameters_.use_exact_lp_reason()) { parameters.set_change_status_to_imprecise(false); parameters.set_primal_feasibility_tolerance(1e-7); parameters.set_dual_feasibility_tolerance(1e-7); } simplex_.SetParameters(parameters); simplex_.NotifyThatMatrixIsUnchangedForNextSolve(); if (!SolveLp()) return true; // Add new constraints to the LP and resolve? // // TODO(user): We might want to do that more than once. Currently we rely on // this beeing called again on the next IncrementalPropagate() call, but that // might not always happen at level zero. if (simplex_.GetProblemStatus() == glop::ProblemStatus::OPTIMAL) { // First add any new lazy constraints or cuts that where previsouly // generated and are now cutting the current solution. if (constraint_manager_.ChangeLp(expanded_lp_solution_)) { CreateLpFromConstraintManager(); if (!SolveLp()) return true; } else { // Try to add cuts. if (!cut_generators_.empty() && num_cuts_ < sat_parameters_.max_num_cuts() && (trail_->CurrentDecisionLevel() == 0 || !sat_parameters_.only_add_cuts_at_level_zero())) { int num_new_cuts = 0; for (const CutGenerator& generator : cut_generators_) { // TODO(user): Change api so cuts can directly be added to the manager // and we don't need this intermediate vector. std::vector cuts = generator.generate_cuts(expanded_lp_solution_); // Add the cuts to the manager. for (const LinearConstraint& cut : cuts) { ++num_new_cuts; constraint_manager_.Add(cut); } } if (num_new_cuts > 0) { num_cuts_ += num_new_cuts; VLOG(1) << "#cuts " << num_cuts_; if (constraint_manager_.ChangeLp(expanded_lp_solution_)) { CreateLpFromConstraintManager(); if (!SolveLp()) return true; } } } } } // A dual-unbounded problem is infeasible. We use the dual ray reason. if (simplex_.GetProblemStatus() == glop::ProblemStatus::DUAL_UNBOUNDED) { if (sat_parameters_.use_exact_lp_reason()) { if (!FillExactDualRayReason()) return true; } else { FillDualRayReason(); } return integer_trail_->ReportConflict(integer_reason_); } // Optimality deductions if problem has an objective. if (objective_is_defined_ && (simplex_.GetProblemStatus() == glop::ProblemStatus::OPTIMAL || simplex_.GetProblemStatus() == glop::ProblemStatus::DUAL_FEASIBLE)) { // Try to filter optimal objective value. Note that GetObjectiveValue() // already take care of the scaling so that it returns an objective in the // CP world. const double relaxed_optimal_objective = simplex_.GetObjectiveValue(); const IntegerValue approximate_new_lb( static_cast(std::ceil(relaxed_optimal_objective - kCpEpsilon))); // TODO(user): Maybe do a bit less computation when we cannot propagate // anything. const IntegerValue old_lb = integer_trail_->LowerBound(objective_cp_); IntegerValue new_lb; if (sat_parameters_.use_exact_lp_reason()) { new_lb = ExactLpReasonning(); // A difference of 1 happens relatively often, so we just display when // there is more. Note that when we are over the objective upper bound, // we relax new_lb for a better reason, so we ignore this case. if (new_lb <= kMinIntegerValue) { VLOG(2) << "Overflow during exact LP reasoning."; } else if (new_lb <= integer_trail_->UpperBound(objective_cp_) && std::abs((approximate_new_lb - new_lb).value()) > 1) { VLOG(2) << "LP objective lower bound approx = " << approximate_new_lb; VLOG(2) << " exact = " << new_lb; } } else { FillReducedCostsReason(); new_lb = approximate_new_lb; const double objective_cp_ub = ToDouble(integer_trail_->UpperBound(objective_cp_)); ReducedCostStrengtheningDeductions(objective_cp_ub - relaxed_optimal_objective); if (!deductions_.empty()) { deductions_reason_ = integer_reason_; deductions_reason_.push_back( integer_trail_->UpperBoundAsLiteral(objective_cp_)); } } // Push new objective lb. if (old_lb < new_lb) { const IntegerLiteral deduction = IntegerLiteral::GreaterOrEqual(objective_cp_, new_lb); if (!integer_trail_->Enqueue(deduction, {}, integer_reason_)) { return false; } } // Push reduced cost strengthening bounds. if (!deductions_.empty()) { const int trail_index_with_same_reason = integer_trail_->Index(); for (const IntegerLiteral deduction : deductions_) { if (!integer_trail_->Enqueue(deduction, {}, deductions_reason_, trail_index_with_same_reason)) { return false; } } } } // Copy more info about the current solution. if (simplex_.GetProblemStatus() == glop::ProblemStatus::OPTIMAL) { CHECK(lp_solution_is_set_); lp_objective_ = simplex_.GetObjectiveValue(); lp_solution_is_integer_ = true; const int num_vars = integer_variables_.size(); const double objective_scale = lp_data_.objective_scaling_factor(); for (int i = 0; i < num_vars; i++) { // The reduced cost need to be divided by LpToCpScalingFactor(). lp_reduced_cost_[i] = simplex_.GetReducedCost(glop::ColIndex(i)) * CpToLpScalingFactor(glop::ColIndex(i)) * objective_scale; if (std::abs(lp_solution_[i] - std::round(lp_solution_[i])) > kCpEpsilon) { lp_solution_is_integer_ = false; } } if (compute_reduced_cost_averages_) { // Decay averages. num_calls_since_reduced_cost_averages_reset_++; if (num_calls_since_reduced_cost_averages_reset_ == 10000) { for (int i = 0; i < num_vars; i++) { sum_cost_up_[i] /= 2; num_cost_up_[i] /= 2; sum_cost_down_[i] /= 2; num_cost_down_[i] /= 2; } num_calls_since_reduced_cost_averages_reset_ = 0; } // Accumulate pseudo-costs of all unassigned variables. for (int i = 0; i < num_vars; i++) { const IntegerVariable var = this->integer_variables_[i]; // Skip ignored and fixed variables. if (integer_trail_->IsCurrentlyIgnored(var)) continue; const IntegerValue lb = integer_trail_->LowerBound(var); const IntegerValue ub = integer_trail_->UpperBound(var); if (lb == ub) continue; // Skip reduced costs that are zero or close. const double rc = this->GetSolutionReducedCost(var); if (std::abs(rc) < kCpEpsilon) continue; if (rc < 0.0) { sum_cost_down_[i] -= rc; num_cost_down_[i]++; } else { sum_cost_up_[i] += rc; num_cost_up_[i]++; } } } } return true; } namespace { std::vector> GetSparseRepresentation( const gtl::ITIVector& dense_vector) { std::vector> result; for (ColIndex col(0); col < dense_vector.size(); ++col) { if (dense_vector[col] != 0) { result.push_back({col, dense_vector[col]}); } } return result; } // Returns false in case of overflow bool AddLinearExpressionMultiple( IntegerValue multiplier, const std::vector>& terms, gtl::ITIVector* dense_vector) { for (const std::pair term : terms) { const int64 prod = CapProd(multiplier.value(), term.second.value()); if (prod == kint64min || prod == kint64max) return false; const int64 result = CapAdd((*dense_vector)[term.first].value(), prod); if (result == kint64min || result == kint64max) return false; (*dense_vector)[term.first] = IntegerValue(result); } return true; } } // namespace // Returns kMinIntegerValue in case of overflow. // // TODO(user): To avoid overflow, we could relax the constraint Sum term <= ub // with Sum floor(term/divisor) <= floor(ub/divisor). It will be less precise, // but we should be able to avoid overlow. IntegerValue LinearProgrammingConstraint::GetImpliedLowerBound( const LinearExpression& terms) const { IntegerValue lower_bound(0); for (const auto term : terms) { const IntegerVariable var = integer_variables_[term.first.value()]; const IntegerValue coeff = term.second; CHECK_NE(coeff, 0); const IntegerValue bound = coeff > 0 ? integer_trail_->LowerBound(var) : integer_trail_->UpperBound(var); const int64 prod = CapProd(bound.value(), coeff.value()); if (prod == kint64min || prod == kint64max) return kMinIntegerValue; const int64 new_lb = CapAdd(lower_bound.value(), prod); if (new_lb == kint64min || new_lb == kint64max) return kMinIntegerValue; lower_bound = new_lb; } return lower_bound; } // TODO(user): combine this with RelaxLinearReason() to avoid the extra // magnitude vector and the weird precondition of RelaxLinearReason(). void LinearProgrammingConstraint::SetImpliedLowerBoundReason( const LinearExpression& terms, IntegerValue slack) { integer_reason_.clear(); std::vector magnitudes; for (const auto term : terms) { const IntegerVariable var = integer_variables_[term.first.value()]; const IntegerValue coeff = term.second; CHECK_NE(coeff, 0); if (coeff > 0) { magnitudes.push_back(coeff); integer_reason_.push_back(integer_trail_->LowerBoundAsLiteral(var)); } else { magnitudes.push_back(-coeff); integer_reason_.push_back(integer_trail_->UpperBoundAsLiteral(var)); } } CHECK_GE(slack, 0); if (slack > 0) { integer_trail_->RelaxLinearReason(slack, magnitudes, &integer_reason_); } integer_trail_->RemoveLevelZeroBounds(&integer_reason_); } // TODO(user): Provide a sparse interface. bool LinearProgrammingConstraint::ComputeNewLinearConstraint( bool take_objective_into_account, const glop::DenseColumn& dense_lp_multipliers, Fractional* scaling, gtl::ITIVector* dense_terms, IntegerValue* upper_bound) const { // Process the dense_lp_multipliers and compute their infinity norm. std::vector> lp_multipliers; Fractional lp_multipliers_norm = take_objective_into_account ? 1.0 : 0.0; for (RowIndex row(0); row < dense_lp_multipliers.size(); ++row) { const Fractional lp_multi = dense_lp_multipliers[row]; if (lp_multi == 0.0) continue; // Remove trivial bad cases. if (lp_multi > 0.0 && integer_lp_[row.value()].ub >= kMaxIntegerValue) { continue; } if (lp_multi < 0.0 && integer_lp_[row.value()].lb <= kMinIntegerValue) { continue; } lp_multipliers_norm = std::max(lp_multipliers_norm, std::abs(lp_multi)); lp_multipliers.push_back({row, lp_multi}); } // This scaling will be responsible to keep the wanted number of precision // digit when we round a floating point value to integer. We will want // around 6 digits for the larger lp_multi and less for the smaller ones. *scaling = 1.0; // Scale the lp_multipliers to the CP world (still Fractional though). std::vector> cp_multipliers; Fractional min_cp_multi = glop::kInfinity; const Fractional global_scaling = bound_scaling_factor_ / lp_data_.objective_scaling_factor(); for (const auto entry : lp_multipliers) { const RowIndex row = entry.first; const Fractional lp_multi = entry.second; // The LP guarantee about 6 digits of precision, so we ignore anything // smaller that lp_multipliers_norm * 1e-6. const Fractional magnitude_diff = lp_multipliers_norm / std::abs(lp_multi); if (magnitude_diff > 1e6) continue; // Scale back in the cp world. const Fractional cp_multi = lp_multi / scaler_.row_scale(row) / global_scaling; // We want std::round(cp_multi * scaling) to have the same number of // digits of relative precision as lp_multi. const Fractional wanted_scaling = (1e6 / magnitude_diff) / std::abs(cp_multi); *scaling = std::max(*scaling, wanted_scaling); min_cp_multi = std::min(std::abs(cp_multi), min_cp_multi); cp_multipliers.push_back({row, cp_multi}); } // This behave exactly like if we had another "objective" constraint with // an lp_multi of 1.0 and a cp_multi of 1.0. if (take_objective_into_account) { *scaling = std::max(*scaling, 1e6 / lp_multipliers_norm); } // Scale the multipliers by *scaling. // // TODO(user): Maybe use int128 to avoid overflow? // TODO(user): Divide dual by gcd to limit overflow? // TODO(user): To avoid overflow, we could lower scaling at the cost of // loosing precision. std::vector> integer_multipliers; for (const auto entry : cp_multipliers) { const IntegerValue coeff(std::round(entry.second * (*scaling))); if (coeff != 0) integer_multipliers.push_back({entry.first, coeff}); } // Initialize the new constraint. *upper_bound = 0; dense_terms->assign(integer_variables_.size(), IntegerValue(0)); // Compute the new constraint by taking the linear combination given by // integer_multipliers of the integer constraints in integer_lp_. const ColIndex num_cols(integer_variables_.size()); for (const std::pair term : integer_multipliers) { const RowIndex row = term.first; const IntegerValue multiplier = term.second; CHECK_LT(row, integer_lp_.size()); // Update the constraint. if (!AddLinearExpressionMultiple(multiplier, integer_lp_[row.value()].terms, dense_terms)) { return false; } // Update the upper bound. const int64 bound = multiplier > 0 ? integer_lp_[row.value()].ub.value() : integer_lp_[row.value()].lb.value(); const int64 prod = CapProd(multiplier.value(), bound); if (prod == kint64min || prod == kint64max) return false; const int64 result = CapAdd((*upper_bound).value(), prod); if (result == kint64min || result == kint64max) return false; (*upper_bound) = IntegerValue(result); } return true; } // The "exact" computation go as follow: // // Given any INTEGER linear combination of the LP constraints, we can create a // new integer constraint that is valid (its computation must not overflow // though). Lets call this new_constraint. We can then always write for the // objective linear expression: // objective = (objective - new_constraint) + new_constraint // And we can compute a lower bound as folllow: // objective >= ImpliedLB(objective - new_constraint) + new_constraint_lb // where ImpliedLB() is computed from the variable current bounds. // // Now, if we use for the linear combination and approximation of the optimal // dual LP values (by scaling them and rounding them to integer), we will get an // EXACT objective lower bound that is more or less the same as the inexact // bound given by the LP relaxation. This allows to derive exact reasons for any // propagation done by this constraint. IntegerValue LinearProgrammingConstraint::ExactLpReasonning() { // Clear old reason and deductions. integer_reason_.clear(); deductions_.clear(); deductions_reason_.clear(); // The row multipliers will be the negation of the LP duals. // // TODO(user): Provide and use a sparse API in Glop to get the duals. const RowIndex num_rows = simplex_.GetProblemNumRows(); glop::DenseColumn lp_multipliers(num_rows); for (RowIndex row(0); row < num_rows; ++row) { lp_multipliers[row] = -simplex_.GetDualValue(row); } Fractional scaling; gtl::ITIVector reduced_costs; IntegerValue negative_lb; if (!ComputeNewLinearConstraint(/*take_objective_into_account=*/true, lp_multipliers, &scaling, &reduced_costs, &negative_lb)) { return kMinIntegerValue; // Overflow. } // The "objective constraint" behave like if the unscaled cp multiplier was // 1.0, so we will multiply it by this number and add it to reduced_costs. const IntegerValue obj_scale(std::round(scaling)); if (!AddLinearExpressionMultiple(obj_scale, integer_objective_, &reduced_costs)) { return kMinIntegerValue; // Overflow. } // TODO(user): We could correct little imprecision by heuristically computing // for each row the best multiple to improve the scaled_objective_lb below // while keeping the reduced_costs of the same sign. This should improve the // objective lower bound. // Compute the objective lower bound, and the reason for it. const LinearExpression new_constraint = GetSparseRepresentation(reduced_costs); const IntegerValue lb = GetImpliedLowerBound(new_constraint); if (lb == kMinIntegerValue) return kMinIntegerValue; // Overflow. const IntegerValue scaled_objective_lb( CapAdd(lb.value(), -negative_lb.value())); if (scaled_objective_lb == kint64min || scaled_objective_lb == kint64max) { return kMinIntegerValue; } const IntegerValue objective_ub = integer_trail_->UpperBound(objective_cp_); const IntegerValue scaled_objective_ub( CapProd(objective_ub.value(), obj_scale.value())); if (scaled_objective_ub == kint64min || scaled_objective_ub == kint64max) { return kMinIntegerValue; // Overflow. } IntegerValue exact_objective_lb(CeilRatio(scaled_objective_lb, obj_scale)); if (exact_objective_lb > objective_ub) { // We will have a conflict, so we can can relax more! exact_objective_lb = objective_ub + 1; } else { // Reduced cost strenghtening. // // Remark: This is nothing else than basic bound propagation of the // new_constraint with the given feasibility_slack between its implied lower // bound and its upper bound. IntegerValue explanation_slack = kMaxIntegerValue; const IntegerValue feasibility_slack = IntegerValue( CapSub(scaled_objective_ub.value(), scaled_objective_lb.value())); CHECK_GE(feasibility_slack, 0); if (feasibility_slack != kint64max) { for (const auto& term : new_constraint) { const IntegerVariable var = integer_variables_[term.first.value()]; const IntegerValue coeff = term.second; CHECK_NE(coeff, 0); // Any change by more than this will make scaled_objective_lb go past // the objective upper bound const IntegerValue allowed_change(FloorRatio( feasibility_slack, IntegerValue(std::abs(coeff.value())))); CHECK_GE(allowed_change, 0); if (coeff > 0) { const IntegerValue new_ub = integer_trail_->LowerBound(var) + allowed_change; if (new_ub < integer_trail_->UpperBound(var)) { explanation_slack = std::min(explanation_slack, (allowed_change + 1) * coeff - feasibility_slack - 1); deductions_.push_back(IntegerLiteral::LowerOrEqual(var, new_ub)); } } else { // coeff < 0 const IntegerValue new_lb = integer_trail_->UpperBound(var) - allowed_change; if (new_lb > integer_trail_->LowerBound(var)) { explanation_slack = std::min(explanation_slack, (allowed_change + 1) * -coeff - feasibility_slack - 1); deductions_.push_back(IntegerLiteral::GreaterOrEqual(var, new_lb)); } } } } if (!deductions_.empty()) { // TODO(user): Instead of taking explanation_slack as the min of the slack // of all deductions, we could use different reason for each push instead. // Experiment! Maybe there is some tradeoff depending on the number of // push. // // TODO(user): The individual reason are even smaller because we can // ignore the term corresponding to the variable we push. // // TODO(user): The proper fix might be to add a lazy reason code // that can reconstruct the relaxed reason on demand from a base one. // So we have better reason, and not more work at propagation time. // Also, this code should be shared with the one in IntegerSumLE since // they are the same, and it will facilitate unit-testing. SetImpliedLowerBoundReason(new_constraint, explanation_slack); deductions_reason_ = integer_reason_; deductions_reason_.push_back( integer_trail_->UpperBoundAsLiteral(objective_cp_)); } } // Relax the lower bound reason. const IntegerValue min_value = (exact_objective_lb - 1) * obj_scale + 1; const IntegerValue slack = scaled_objective_lb - min_value; SetImpliedLowerBoundReason(new_constraint, slack); return exact_objective_lb; } bool LinearProgrammingConstraint::FillExactDualRayReason() { Fractional scaling; gtl::ITIVector dense_new_constraint; IntegerValue new_constraint_ub; if (!ComputeNewLinearConstraint(/*take_objective_into_account=*/false, simplex_.GetDualRay(), &scaling, &dense_new_constraint, &new_constraint_ub)) { return false; } const LinearExpression new_constraint = GetSparseRepresentation(dense_new_constraint); const IntegerValue implied_lb = GetImpliedLowerBound(new_constraint); if (implied_lb <= new_constraint_ub) { VLOG(1) << "LP exact dual ray not infeasible by " << (new_constraint_ub - implied_lb).value() / scaling; return false; } const IntegerValue slack = (implied_lb - new_constraint_ub) - 1; SetImpliedLowerBoundReason(new_constraint, slack); return true; } void LinearProgrammingConstraint::FillReducedCostsReason() { integer_reason_.clear(); const int num_vars = integer_variables_.size(); for (int i = 0; i < num_vars; i++) { const double rc = simplex_.GetReducedCost(glop::ColIndex(i)); if (rc > kLpEpsilon) { integer_reason_.push_back( integer_trail_->LowerBoundAsLiteral(integer_variables_[i])); } else if (rc < -kLpEpsilon) { integer_reason_.push_back( integer_trail_->UpperBoundAsLiteral(integer_variables_[i])); } } integer_trail_->RemoveLevelZeroBounds(&integer_reason_); } void LinearProgrammingConstraint::FillDualRayReason() { integer_reason_.clear(); const int num_vars = integer_variables_.size(); for (int i = 0; i < num_vars; i++) { // TODO(user): Like for FillReducedCostsReason(), the bounds could be // extended here. Actually, the "dual ray cost updates" is the reduced cost // of an optimal solution if we were optimizing one direction of one basic // variable. The simplex_ interface would need to be slightly extended to // retrieve the basis column in question and the variable values though. const double rc = simplex_.GetDualRayRowCombination()[glop::ColIndex(i)]; if (rc > kLpEpsilon) { integer_reason_.push_back( integer_trail_->LowerBoundAsLiteral(integer_variables_[i])); } else if (rc < -kLpEpsilon) { integer_reason_.push_back( integer_trail_->UpperBoundAsLiteral(integer_variables_[i])); } } integer_trail_->RemoveLevelZeroBounds(&integer_reason_); } void LinearProgrammingConstraint::ReducedCostStrengtheningDeductions( double cp_objective_delta) { deductions_.clear(); // TRICKY: while simplex_.GetObjectiveValue() use the objective scaling factor // stored in the lp_data_, all the other functions like GetReducedCost() or // GetVariableValue() do not. const double lp_objective_delta = cp_objective_delta / lp_data_.objective_scaling_factor(); const int num_vars = integer_variables_.size(); for (int i = 0; i < num_vars; i++) { const IntegerVariable cp_var = integer_variables_[i]; const glop::ColIndex lp_var = glop::ColIndex(i); const double rc = simplex_.GetReducedCost(lp_var); const double value = simplex_.GetVariableValue(lp_var); if (rc == 0.0) continue; const double lp_other_bound = value + lp_objective_delta / rc; const double cp_other_bound = lp_other_bound * LpToCpScalingFactor(lp_var); if (rc > kLpEpsilon) { const double ub = ToDouble(integer_trail_->UpperBound(cp_var)); const double new_ub = std::floor(cp_other_bound + kCpEpsilon); if (new_ub < ub) { // TODO(user): Because rc > kLpEpsilon, the lower_bound of cp_var // will be part of the reason returned by FillReducedCostsReason(), but // we actually do not need it here. Same below. const IntegerValue new_ub_int(static_cast(new_ub)); deductions_.push_back(IntegerLiteral::LowerOrEqual(cp_var, new_ub_int)); } } else if (rc < -kLpEpsilon) { const double lb = ToDouble(integer_trail_->LowerBound(cp_var)); const double new_lb = std::ceil(cp_other_bound - kCpEpsilon); if (new_lb > lb) { const IntegerValue new_lb_int(static_cast(new_lb)); deductions_.push_back( IntegerLiteral::GreaterOrEqual(cp_var, new_lb_int)); } } } } namespace { // TODO(user): we could use a sparser algorithm, even if this do not seems to // matter for now. void AddIncomingAndOutgoingCutsIfNeeded( int num_nodes, const std::vector& s, const std::vector& tails, const std::vector& heads, const std::vector& vars, const std::vector& var_lp_values, int64 rhs_lower_bound, std::vector* cuts) { LinearConstraint incoming; LinearConstraint outgoing; double sum_incoming = 0.0; double sum_outgoing = 0.0; incoming.lb = outgoing.lb = IntegerValue(rhs_lower_bound); incoming.ub = outgoing.ub = kMaxIntegerValue; const std::set subset(s.begin(), s.end()); // Add incoming/outgoing cut arcs, compute flow through cuts. for (int i = 0; i < tails.size(); ++i) { const bool out = gtl::ContainsKey(subset, tails[i]); const bool in = gtl::ContainsKey(subset, heads[i]); if (out && in) continue; if (out) { sum_outgoing += var_lp_values[i]; outgoing.vars.push_back(vars[i]); outgoing.coeffs.push_back(IntegerValue(1)); } if (in) { sum_incoming += var_lp_values[i]; incoming.vars.push_back(vars[i]); incoming.coeffs.push_back(IntegerValue(1)); } } // A node is said to be optional if it can be excluded from the subcircuit, // in which case there is a self-loop on that node. // If there are optional nodes, use extended formula: // sum(cut) >= 1 - optional_loop_in - optional_loop_out // where optional_loop_in's node is in subset, optional_loop_out's is out. // TODO(user): Favor optional loops fixed to zero at root. int num_optional_nodes_in = 0; int num_optional_nodes_out = 0; int optional_loop_in = -1; int optional_loop_out = -1; for (int i = 0; i < tails.size(); ++i) { if (tails[i] != heads[i]) continue; if (gtl::ContainsKey(subset, tails[i])) { num_optional_nodes_in++; if (optional_loop_in == -1 || var_lp_values[i] < var_lp_values[optional_loop_in]) { optional_loop_in = i; } } else { num_optional_nodes_out++; if (optional_loop_out == -1 || var_lp_values[i] < var_lp_values[optional_loop_out]) { optional_loop_out = i; } } } if (num_optional_nodes_in + num_optional_nodes_out > 0) { CHECK_EQ(rhs_lower_bound, 1); // When all optionals of one side are excluded in lp solution, no cut. if (num_optional_nodes_in == subset.size() && (optional_loop_in == -1 || var_lp_values[optional_loop_in] > 1.0 - 1e-6)) { return; } if (num_optional_nodes_out == num_nodes - subset.size() && (optional_loop_out == -1 || var_lp_values[optional_loop_out] > 1.0 - 1e-6)) { return; } // There is no mandatory node in subset, add optional_loop_in. if (num_optional_nodes_in == subset.size()) { incoming.vars.push_back(vars[optional_loop_in]); incoming.coeffs.push_back(IntegerValue(1)); sum_incoming += var_lp_values[optional_loop_in]; outgoing.vars.push_back(vars[optional_loop_in]); outgoing.coeffs.push_back(IntegerValue(1)); sum_outgoing += var_lp_values[optional_loop_in]; } // There is no mandatory node out of subset, add optional_loop_out. if (num_optional_nodes_out == num_nodes - subset.size()) { incoming.vars.push_back(vars[optional_loop_out]); incoming.coeffs.push_back(IntegerValue(1)); sum_incoming += var_lp_values[optional_loop_out]; outgoing.vars.push_back(vars[optional_loop_out]); outgoing.coeffs.push_back(IntegerValue(1)); sum_outgoing += var_lp_values[optional_loop_out]; } } if (sum_incoming < rhs_lower_bound - 1e-6) { cuts->push_back(std::move(incoming)); } if (sum_outgoing < rhs_lower_bound - 1e-6) { cuts->push_back(std::move(outgoing)); } } } // namespace // We use a basic algorithm to detect components that are not connected to the // rest of the graph in the LP solution, and add cuts to force some arcs to // enter and leave this component from outside. CutGenerator CreateStronglyConnectedGraphCutGenerator( int num_nodes, const std::vector& tails, const std::vector& heads, const std::vector& vars) { CutGenerator result; result.vars = vars; result.generate_cuts = [num_nodes, tails, heads, vars](const gtl::ITIVector& lp_values) { int num_arcs_in_lp_solution = 0; std::vector var_lp_values; std::vector> graph(num_nodes); for (int i = 0; i < vars.size(); ++i) { var_lp_values.push_back(lp_values[vars[i]]); // TODO(user): a more advanced algorithm consist of adding the arcs // in the decreasing order of their lp_values, and for each strongly // connected components S along the way, try to add the corresponding // cuts. We can stop as soon as there is only two components left, // after adding the corresponding cut. if (lp_values[vars[i]] > 1e-6) { ++num_arcs_in_lp_solution; graph[tails[i]].push_back(heads[i]); } } std::vector cuts; std::vector> components; FindStronglyConnectedComponents(num_nodes, graph, &components); if (components.size() == 1) return cuts; VLOG(1) << "num_arcs_in_lp_solution:" << num_arcs_in_lp_solution << " sccs:" << components.size(); for (const std::vector& component : components) { if (component.size() == 1) continue; AddIncomingAndOutgoingCutsIfNeeded(num_nodes, component, tails, heads, vars, var_lp_values, /*rhs_lower_bound=*/1, &cuts); // In this case, the cuts for each component are the same. if (components.size() == 2) break; } return cuts; }; return result; } CutGenerator CreateCVRPCutGenerator(int num_nodes, const std::vector& tails, const std::vector& heads, const std::vector& vars, const std::vector& demands, int64 capacity) { CHECK_GT(capacity, 0); int64 total_demands = 0; for (const int64 demand : demands) total_demands += demand; CutGenerator result; result.vars = vars; result.generate_cuts = [num_nodes, tails, heads, total_demands, demands, capacity, vars](const gtl::ITIVector& lp_values) { int num_arcs_in_lp_solution = 0; std::vector var_lp_values; std::vector> graph(num_nodes); for (int i = 0; i < vars.size(); ++i) { var_lp_values.push_back(lp_values[vars[i]]); if (lp_values[vars[i]] > 1e-6) { ++num_arcs_in_lp_solution; graph[tails[i]].push_back(heads[i]); } } std::vector cuts; std::vector> components; FindStronglyConnectedComponents(num_nodes, graph, &components); if (components.size() == 1) return cuts; VLOG(1) << "num_arcs_in_lp_solution:" << num_arcs_in_lp_solution << " sccs:" << components.size(); for (const std::vector& component : components) { if (component.size() == 1) continue; bool contain_depot = false; int64 component_demand = 0; for (const int node : component) { if (node == 0) contain_depot = true; component_demand += demands[node]; } const int min_num_vehicles = contain_depot ? (total_demands - component_demand + capacity - 1) / capacity : (component_demand + capacity - 1) / capacity; CHECK_GE(min_num_vehicles, 1); AddIncomingAndOutgoingCutsIfNeeded( num_nodes, component, tails, heads, vars, var_lp_values, /*rhs_lower_bound=*/min_num_vehicles, &cuts); // In this case, the cuts for each component are the same. if (components.size() == 2) break; } return cuts; }; return result; } std::function LinearProgrammingConstraint::HeuristicLPMostInfeasibleBinary(Model* model) { IntegerTrail* integer_trail = integer_trail_; IntegerEncoder* integer_encoder = model->GetOrCreate(); // Gather all 0-1 variables that appear in some LP. std::vector variables; for (IntegerVariable var : integer_variables_) { if (integer_trail_->LowerBound(var) == 0 && integer_trail_->UpperBound(var) == 1) { variables.push_back(var); } } VLOG(1) << "HeuristicLPMostInfeasibleBinary has " << variables.size() << " variables."; return [this, variables, integer_trail, integer_encoder]() { const double kEpsilon = 1e-6; // Find most fractional value. IntegerVariable fractional_var = kNoIntegerVariable; double fractional_distance_best = -1.0; for (const IntegerVariable var : variables) { // Skip ignored and fixed variables. if (integer_trail_->IsCurrentlyIgnored(var)) continue; const IntegerValue lb = integer_trail_->LowerBound(var); const IntegerValue ub = integer_trail_->UpperBound(var); if (lb == ub) continue; // Check variable's support is fractional. const double lp_value = this->GetSolutionValue(var); const double fractional_distance = std::min(std::ceil(lp_value - kEpsilon) - lp_value, lp_value - std::floor(lp_value + kEpsilon)); if (fractional_distance < kEpsilon) continue; // Keep variable if it is farther from integrality than the previous. if (fractional_distance > fractional_distance_best) { fractional_var = var; fractional_distance_best = fractional_distance; } } if (fractional_var != kNoIntegerVariable) { return integer_encoder ->GetOrCreateAssociatedLiteral( IntegerLiteral::GreaterOrEqual(fractional_var, IntegerValue(1))) .Index(); } return kNoLiteralIndex; }; } std::function LinearProgrammingConstraint::HeuristicLPPseudoCostBinary(Model* model) { // Gather all 0-1 variables that appear in this LP. std::vector variables; for (IntegerVariable var : integer_variables_) { if (integer_trail_->LowerBound(var) == 0 && integer_trail_->UpperBound(var) == 1) { variables.push_back(var); } } VLOG(1) << "HeuristicLPPseudoCostBinary has " << variables.size() << " variables."; // Store average of reduced cost from 1 to 0. The best heuristic only sets // variables to one and cares about cost to zero, even though classic // pseudocost will use max_var min(cost_to_one[var], cost_to_zero[var]). const int num_vars = variables.size(); std::vector cost_to_zero(num_vars, 0.0); std::vector num_cost_to_zero(num_vars); int num_calls = 0; IntegerEncoder* integer_encoder = model->GetOrCreate(); return [=]() mutable { const double kEpsilon = 1e-6; // Every 10000 calls, decay pseudocosts. num_calls++; if (num_calls == 10000) { for (int i = 0; i < num_vars; i++) { cost_to_zero[i] /= 2; num_cost_to_zero[i] /= 2; } num_calls = 0; } // Accumulate pseudo-costs of all unassigned variables. for (int i = 0; i < num_vars; i++) { const IntegerVariable var = variables[i]; // Skip ignored and fixed variables. if (integer_trail_->IsCurrentlyIgnored(var)) continue; const IntegerValue lb = integer_trail_->LowerBound(var); const IntegerValue ub = integer_trail_->UpperBound(var); if (lb == ub) continue; const double rc = this->GetSolutionReducedCost(var); // Skip reduced costs that are nonzero because of numerical issues. if (std::abs(rc) < kEpsilon) continue; const double value = std::round(this->GetSolutionValue(var)); if (value == 1.0 && rc < 0.0) { cost_to_zero[i] -= rc; num_cost_to_zero[i]++; } } // Select noninstantiated variable with highest pseudo-cost. int selected_index = -1; double best_cost = 0.0; for (int i = 0; i < num_vars; i++) { const IntegerVariable var = variables[i]; // Skip ignored and fixed variables. if (integer_trail_->IsCurrentlyIgnored(var)) continue; const IntegerValue lb = integer_trail_->LowerBound(var); const IntegerValue ub = integer_trail_->UpperBound(var); if (lb == ub) continue; if (num_cost_to_zero[i] > 0 && best_cost < cost_to_zero[i] / num_cost_to_zero[i]) { best_cost = cost_to_zero[i] / num_cost_to_zero[i]; selected_index = i; } } if (selected_index >= 0) { const Literal decision = integer_encoder->GetOrCreateAssociatedLiteral( IntegerLiteral::GreaterOrEqual(variables[selected_index], IntegerValue(1))); return decision.Index(); } return kNoLiteralIndex; }; } std::function LinearProgrammingConstraint::LPReducedCostAverageBranching() { if (!compute_reduced_cost_averages_) { compute_reduced_cost_averages_ = true; const int num_vars = integer_variables_.size(); VLOG(1) << " LPReducedCostAverageBranching has #variables: " << num_vars; sum_cost_down_.resize(num_vars, 0.0); num_cost_down_.resize(num_vars, 0); sum_cost_up_.resize(num_vars, 0.0); num_cost_up_.resize(num_vars, 0); } return [this]() { return this->LPReducedCostAverageDecision(); }; } LiteralIndex LinearProgrammingConstraint::LPReducedCostAverageDecision() { const int num_vars = integer_variables_.size(); // Select noninstantiated variable with highest pseudo-cost. int selected_index = -1; double best_cost = 0.0; for (int i = 0; i < num_vars; i++) { const IntegerVariable var = this->integer_variables_[i]; // Skip ignored and fixed variables. if (integer_trail_->IsCurrentlyIgnored(var)) continue; const IntegerValue lb = integer_trail_->LowerBound(var); const IntegerValue ub = integer_trail_->UpperBound(var); if (lb == ub) continue; // If only one direction exist, we takes its value divided by 2, so that // such variable should have a smaller cost than the min of the two side // except if one direction have a really high reduced costs. double cost_i = 0.0; if (num_cost_down_[i] > 0 && num_cost_up_[i] > 0) { cost_i = std::min(sum_cost_down_[i] / num_cost_down_[i], sum_cost_up_[i] / num_cost_up_[i]); } else { const double divisor = num_cost_down_[i] + num_cost_up_[i]; if (divisor != 0) { cost_i = 0.5 * (sum_cost_down_[i] + sum_cost_up_[i]) / divisor; } } if (selected_index == -1 || cost_i > best_cost) { best_cost = cost_i; selected_index = i; } } if (selected_index == -1) return kNoLiteralIndex; const IntegerVariable var = this->integer_variables_[selected_index]; // If ceil(value) is current upper bound, try var == upper bound first. // Guarding with >= prevents numerical problems. // With 0/1 variables, this will tend to try setting to 1 first, // which produces more shallow trees. const IntegerValue ub = integer_trail_->UpperBound(var); const IntegerValue value_ceil( std::ceil(this->GetSolutionValue(var) - kCpEpsilon)); if (value_ceil >= ub) { return integer_encoder_ ->GetOrCreateAssociatedLiteral(IntegerLiteral::GreaterOrEqual(var, ub)) .Index(); } // If floor(value) is current lower bound, try var == lower bound first. // Guarding with <= prevents numerical problems. const IntegerValue lb = integer_trail_->LowerBound(var); const IntegerValue value_floor( std::floor(this->GetSolutionValue(var) + kCpEpsilon)); if (value_floor <= lb) { return integer_encoder_ ->GetOrCreateAssociatedLiteral(IntegerLiteral::LowerOrEqual(var, lb)) .Index(); } // Here lb < value_floor <= value_ceil < ub. // Try the most promising split between var <= floor or var >= ceil. if (sum_cost_down_[selected_index] / num_cost_down_[selected_index] < sum_cost_up_[selected_index] / num_cost_up_[selected_index]) { return integer_encoder_ ->GetOrCreateAssociatedLiteral( IntegerLiteral::LowerOrEqual(var, value_floor)) .Index(); } else { return integer_encoder_ ->GetOrCreateAssociatedLiteral( IntegerLiteral::GreaterOrEqual(var, value_ceil)) .Index(); } } } // namespace sat } // namespace operations_research