diff --git a/ortools/sat/cp_model_solver.cc b/ortools/sat/cp_model_solver.cc index 96f99ad131..92f81dde08 100644 --- a/ortools/sat/cp_model_solver.cc +++ b/ortools/sat/cp_model_solver.cc @@ -3420,7 +3420,8 @@ CpSolverResponse SolveCpModel(const CpModelProto& model_proto, Model* model) { if (!model_proto.assumptions().empty() && (params.num_workers() > 1 || model_proto.has_objective() || - model_proto.has_floating_point_objective())) { + model_proto.has_floating_point_objective() || + params.enumerate_all_solutions())) { SOLVER_LOG( logger, "Warning: solving with assumptions was requested in a non-fully " diff --git a/ortools/sat/lb_tree_search.cc b/ortools/sat/lb_tree_search.cc index 181dc043c6..70f9431630 100644 --- a/ortools/sat/lb_tree_search.cc +++ b/ortools/sat/lb_tree_search.cc @@ -565,17 +565,12 @@ SatSolver::Status LbTreeSearch::Search( objective_var_, integer_trail_->LowerBound(objective_var_))); std::vector decisions = ExtractDecisions(base_level, reason); - // Because we will re-enqueue these decision, we change the order so that - // if one is implied by others, it will be skipped. - std::reverse(decisions.begin(), decisions.end()); - // Bump activities. sat_decision_->BumpVariableActivities(reason); sat_decision_->BumpVariableActivities(decisions); sat_decision_->UpdateVariableActivityIncrement(); // Create one node per new decisions. - const int old_size = current_branch_.size(); CHECK_EQ(current_branch_.size(), base_level); for (const Literal d : decisions) { AppendNewNodeToCurrentBranch(d); @@ -593,10 +588,18 @@ SatSolver::Status LbTreeSearch::Search( } // Reset the solver to a correct state since we have a subset of the - // current propagation. + // current propagation. We backtrack as little as possible. // - // TODO(user): no need to backtrack if the prefix is correct? - sat_solver_->Backtrack(base_level); + // The decision level is the number of decision taken. + // Decision()[level] is the decision at that level. + int backtrack_level = base_level; + CHECK_LE(current_branch_.size(), sat_solver_->CurrentDecisionLevel()); + while (backtrack_level < current_branch_.size() && + sat_solver_->Decisions()[backtrack_level].literal == + nodes_[current_branch_[backtrack_level]].literal) { + ++backtrack_level; + } + sat_solver_->Backtrack(backtrack_level); // Update bounds with reduced costs info. // @@ -604,7 +607,7 @@ SatSolver::Status LbTreeSearch::Search( // backtracked over? // // TODO(user): We could do all at once rather than in O(#decision * #size). - for (int i = old_size; i < current_branch_.size(); ++i) { + for (int i = backtrack_level; i < current_branch_.size(); ++i) { ExploitReducedCosts(current_branch_[i]); } } diff --git a/ortools/sat/linear_programming_constraint.cc b/ortools/sat/linear_programming_constraint.cc index 8ed06e85bc..bd74a818b5 100644 --- a/ortools/sat/linear_programming_constraint.cc +++ b/ortools/sat/linear_programming_constraint.cc @@ -1746,8 +1746,8 @@ IntegerValue LinearProgrammingConstraint::GetImpliedLowerBound( return lower_bound; } -bool LinearProgrammingConstraint::PossibleOverflow( - const LinearConstraint& constraint) { +bool PossibleOverflow(const IntegerTrail& integer_trail, + const LinearConstraint& constraint) { IntegerValue lower_bound(0); const int size = constraint.vars.size(); for (int i = 0; i < size; ++i) { @@ -1755,18 +1755,15 @@ bool LinearProgrammingConstraint::PossibleOverflow( const IntegerValue coeff = constraint.coeffs[i]; CHECK_NE(coeff, 0); const IntegerValue bound = coeff > 0 - ? integer_trail_->LevelZeroLowerBound(var) - : integer_trail_->LevelZeroUpperBound(var); + ? integer_trail.LevelZeroLowerBound(var) + : integer_trail.LevelZeroUpperBound(var); if (!AddProductTo(bound, coeff, &lower_bound)) { return true; } } - const int64_t slack = CapAdd(lower_bound.value(), -constraint.ub.value()); - if (slack == std::numeric_limits::min() || - slack == std::numeric_limits::max()) { - return true; - } - return false; + const int64_t slack = CapSub(constraint.ub.value(), lower_bound.value()); + return slack == std::numeric_limits::min() || + slack == std::numeric_limits::max(); } namespace { @@ -1778,43 +1775,17 @@ absl::int128 FloorRatio128(absl::int128 x, IntegerValue positive_div) { return result; } -} // namespace - -void LinearProgrammingConstraint::PreventOverflow(LinearConstraint* constraint, - int max_pow) { - // First, make all coefficient positive. - MakeAllCoefficientsPositive(constraint); - - // Compute the max possible value that will appear when propagating this - // constraint. This relies on the propagator computing the implied lower - // bound, computing the slack (rhs_ub - implied_lb) and comparing the - // max_delta of each variable to the slack. - // - // Note(user): it is important to be as precise as possible here. It is why - // we try to be tight in our evaluation of possible overflow. - double sum_min_neg = 0.0; - double sum_min_pos = 0.0; - double max_delta = 0.0; - const int size = constraint->vars.size(); - for (int i = 0; i < size; ++i) { - const IntegerVariable var = constraint->vars[i]; - const double coeff = ToDouble(constraint->coeffs[i]); - const IntegerValue lb = integer_trail_->LevelZeroLowerBound(var); - const IntegerValue ub = integer_trail_->LevelZeroUpperBound(var); - if (lb > 0) { - sum_min_pos += coeff * ToDouble(lb); - } else { - sum_min_neg += coeff * ToDouble(lb); - } - max_delta = std::max(max_delta, ToDouble(ub - lb) * coeff); - } - const double max_value = - std::max({-sum_min_neg, sum_min_pos, max_delta, - ToDouble(constraint->ub) - (sum_min_pos + sum_min_neg)}); - - const IntegerValue divisor(std::ceil(std::ldexp(max_value, -max_pow))); - if (divisor <= 1) return; +absl::int128 CeilRatio128(absl::int128 x, absl::int128 div128) { + absl::int128 result = x / div128; + if (result * div128 < x) return result + 1; + return result; +} +// TODO(user): This code is tricky and similar to the one to generate cuts. +// Maybe reduce the duplication? note however that here we use int128 to deal +// with potential overflow. +void DivideConstraint(const IntegerTrail& integer_trail, IntegerValue divisor, + LinearConstraint* constraint) { // To be correct, we need to shift all variable so that they are positive. // // Important: One might be tempted to think that using the current variable @@ -1823,12 +1794,9 @@ void LinearProgrammingConstraint::PreventOverflow(LinearConstraint* constraint, // one term become zero), we might loose the fact that we used one of the // variable bound to derive the new constraint, so we will miss it in the // explanation !! - // - // TODO(user): This code is tricky and similar to the one to generate cuts. - // Test and may reduce the duplication? note however that here we use int128 - // to deal with potential overflow. int new_size = 0; absl::int128 adjust = 0; + const int size = constraint->vars.size(); for (int i = 0; i < size; ++i) { const IntegerValue old_coeff = constraint->coeffs[i]; const IntegerValue new_coeff = FloorRatio(old_coeff, divisor); @@ -1840,7 +1808,7 @@ void LinearProgrammingConstraint::PreventOverflow(LinearConstraint* constraint, adjust += remainder * absl::int128( - integer_trail_->LevelZeroLowerBound(constraint->vars[i]).value()); + integer_trail.LevelZeroLowerBound(constraint->vars[i]).value()); if (new_coeff == 0) continue; constraint->vars[new_size] = constraint->vars[i]; @@ -1850,10 +1818,100 @@ void LinearProgrammingConstraint::PreventOverflow(LinearConstraint* constraint, constraint->vars.resize(new_size); constraint->coeffs.resize(new_size); + // TODO(user): I am not 100% sure this cannot overflow. If it does it means + // our reduced constraint is trivial though, and we can cap it. constraint->ub = IntegerValue(static_cast( FloorRatio128(absl::int128(constraint->ub.value()) - adjust, divisor))); } +} // namespace + +// The goal here is to prevent overflow in the IntegerSumLE propagation code. +// We want to be as tight as possible. We do it in two steps, which is not ideal +// but easier. +void PreventOverflow(const IntegerTrail& integer_trail, + LinearConstraint* constraint) { + // We use kint64max - 1 so that PossibleOverflow() can distinguish overflow + // for a sum exactly equal to kint64max. + const absl::int128 threshold(std::numeric_limits::max() - 1); + + // First, make all coefficient positive. + MakeAllCoefficientsPositive(constraint); + + // First step is to make sure coeff * (ub - lb) and coeff * lb will not + // overflow. Note that we already know (ub - lb) cannot overflow. + { + absl::int128 max_delta = 0; + const int size = constraint->vars.size(); + for (int i = 0; i < size; ++i) { + const IntegerVariable var = constraint->vars[i]; + const IntegerValue lb = integer_trail.LevelZeroLowerBound(var); + const IntegerValue ub = integer_trail.LevelZeroUpperBound(var); + const absl::int128 coeff(constraint->coeffs[i].value()); + const absl::int128 diff( + std::max({IntTypeAbs(lb), IntTypeAbs(ub), ub - lb}).value()); + max_delta = std::max(max_delta, coeff * diff); + } + if (max_delta > threshold) { + const IntegerValue divisor( + static_cast(CeilRatio128(max_delta, threshold))); + DivideConstraint(integer_trail, divisor, constraint); + } + } + + // Second step is to make sure computing the lower bound will not overflow + // whatever the order and at whatever level. And also that computing the slack + // will not overflow. + // + // Note that because each term fit on an int64_t per first step, we will not + // have int128 overflow. + // + // TODO(user): We could change the propag to detect a conflict without + // computing the full activity, and thus avoid some overflow. Like precompute + // a base lb and then compute the activity from there? Or we could have + // a custom code here, actually we only propagate this with the current lb + // instead of the LevelZeroUpperBound(). Or we could just propagate using + // int128 arithmetic. + { + absl::int128 sum_min_neg = 0; + absl::int128 sum_min_pos = 0; + absl::int128 sum_max_neg = 0; + absl::int128 sum_max_pos = 0; + const int size = constraint->vars.size(); + for (int i = 0; i < size; ++i) { + const IntegerVariable var = constraint->vars[i]; + const absl::int128 coeff(constraint->coeffs[i].value()); + const absl::int128 lb(integer_trail.LevelZeroLowerBound(var).value()); + if (lb > 0) { + sum_min_pos += coeff * lb; + } else { + sum_min_neg += coeff * lb; + } + const absl::int128 ub(integer_trail.LevelZeroUpperBound(var).value()); + if (ub > 0) { + sum_max_pos += coeff * ub; + } else { + sum_max_neg += coeff * ub; + } + } + const absl::int128 min_slack = + static_cast(constraint->ub.value()) - + (sum_min_pos + sum_min_neg); + const absl::int128 max_slack = + static_cast(constraint->ub.value()) - + (sum_max_pos + sum_max_neg); + const absl::int128 max_value = + std::max({-sum_min_neg, sum_min_pos, sum_min_pos + sum_min_neg, + -sum_max_neg, sum_max_pos, sum_max_pos + sum_max_neg, + min_slack, -min_slack, max_slack, -max_slack}); + if (max_value > threshold) { + const IntegerValue divisor( + static_cast(CeilRatio128(max_value, threshold))); + DivideConstraint(integer_trail, divisor, constraint); + } + } +} + // TODO(user): combine this with RelaxLinearReason() to avoid the extra // magnitude vector and the weird precondition of RelaxLinearReason(). void LinearProgrammingConstraint::SetImpliedLowerBoundReason( @@ -2183,8 +2241,8 @@ bool LinearProgrammingConstraint::ExactLpReasonning() { tmp_constraint_.vars.push_back(objective_cp_); tmp_constraint_.coeffs.push_back(-obj_scale); DivideByGCD(&tmp_constraint_); - PreventOverflow(&tmp_constraint_); - DCHECK(!PossibleOverflow(tmp_constraint_)); + PreventOverflow(*integer_trail_, &tmp_constraint_); + DCHECK(!PossibleOverflow(*integer_trail_, tmp_constraint_)); DCHECK(constraint_manager_.DebugCheckConstraint(tmp_constraint_)); // Corner case where prevent overflow removed all terms. @@ -2232,8 +2290,8 @@ bool LinearProgrammingConstraint::FillExactDualRayReason() { tmp_scattered_vector_.ConvertToLinearConstraint( integer_variables_, new_constraint_ub, &tmp_constraint_); DivideByGCD(&tmp_constraint_); - PreventOverflow(&tmp_constraint_); - DCHECK(!PossibleOverflow(tmp_constraint_)); + PreventOverflow(*integer_trail_, &tmp_constraint_); + DCHECK(!PossibleOverflow(*integer_trail_, tmp_constraint_)); DCHECK(constraint_manager_.DebugCheckConstraint(tmp_constraint_)); const IntegerValue implied_lb = GetImpliedLowerBound(tmp_constraint_); diff --git a/ortools/sat/linear_programming_constraint.h b/ortools/sat/linear_programming_constraint.h index d68aa1094b..2ebbebffaa 100644 --- a/ortools/sat/linear_programming_constraint.h +++ b/ortools/sat/linear_programming_constraint.h @@ -347,18 +347,6 @@ class LinearProgrammingConstraint : public PropagatorInterface, // current variable bound. Return kMinIntegerValue in case of overflow. IntegerValue GetImpliedLowerBound(const LinearConstraint& terms) const; - // Tests for possible overflow in the propagation of the given linear - // constraint. - bool PossibleOverflow(const LinearConstraint& constraint); - - // Reduce the coefficient of the constraint so that we cannot have overflow - // in the propagation of the given linear constraint. Note that we may loose - // some strength by doing so. - // - // We make sure that any partial sum involving any variable value in their - // domain do not exceed 2 ^ max_pow. - void PreventOverflow(LinearConstraint* constraint, int max_pow = 62); - // Fills integer_reason_ with the reason for the implied lower bound of the // given linear expression. We relax the reason if we have some slack. void SetImpliedLowerBoundReason(const LinearConstraint& terms, @@ -570,6 +558,17 @@ class LinearProgrammingDispatcher class LinearProgrammingConstraintCollection : public std::vector {}; +// Tests for possible overflow in the propagation of the given linear +// constraint. +bool PossibleOverflow(const IntegerTrail& integer_trail, + const LinearConstraint& constraint); + +// Reduce the coefficient of the constraint so that we cannot have overflow +// in the propagation of the given linear constraint. Note that we may loose +// some strength by doing so. +void PreventOverflow(const IntegerTrail& integer_trail, + LinearConstraint* constraint); + // TODO(user): Move the cut "graph" based cut generator out of this class, there // is no reason to keep them here. diff --git a/ortools/sat/presolve_util.cc b/ortools/sat/presolve_util.cc index 535ec0f882..bd44356071 100644 --- a/ortools/sat/presolve_util.cc +++ b/ortools/sat/presolve_util.cc @@ -58,6 +58,14 @@ void DomainDeductions::AddDeduction(int literal_ref, int var, Domain domain) { } } +Domain DomainDeductions::ImpliedDomain(int literal_ref, int var) const { + CHECK_GE(var, 0); + const Index index = IndexFromLiteral(literal_ref); + const auto it = deductions_.find({index, var}); + if (it == deductions_.end()) return Domain::AllValues(); + return it->second; +} + std::vector> DomainDeductions::ProcessClause( absl::Span clause) { std::vector> result; diff --git a/ortools/sat/presolve_util.h b/ortools/sat/presolve_util.h index 7142fefba6..dddb7e760f 100644 --- a/ortools/sat/presolve_util.h +++ b/ortools/sat/presolve_util.h @@ -55,6 +55,10 @@ class DomainDeductions { // of the current variable domain. void AddDeduction(int literal_ref, int var, Domain domain); + // Returns the domain of var when literal_ref is true. + // If there is no information, returns Domain::AllValues(). + Domain ImpliedDomain(int literal_ref, int var) const; + // Returns list of (var, domain) that were deduced because: // 1/ We have a domain deduction for var and all literal from the clause // 2/ So we can take the union of all the deduced domains. @@ -76,7 +80,7 @@ class DomainDeductions { private: DEFINE_STRONG_INDEX_TYPE(Index); - Index IndexFromLiteral(int ref) { + Index IndexFromLiteral(int ref) const { return Index(ref >= 0 ? 2 * ref : -2 * ref - 1); } diff --git a/ortools/sat/var_domination.cc b/ortools/sat/var_domination.cc index 87fcc0f1cc..076d31bc88 100644 --- a/ortools/sat/var_domination.cc +++ b/ortools/sat/var_domination.cc @@ -563,20 +563,24 @@ void DualBoundStrengthening::CannotIncrease(absl::Span refs, } } -void DualBoundStrengthening::CannotMove(absl::Span refs) { +void DualBoundStrengthening::CannotMove(absl::Span refs, + int ct_index) { for (const int ref : refs) { const IntegerVariable var = RefToIntegerVariable(ref); can_freely_decrease_until_[var] = kMaxIntegerValue; can_freely_decrease_until_[NegationOf(var)] = kMaxIntegerValue; num_locks_[var]++; num_locks_[NegationOf(var)]++; + locking_ct_index_[var] = ct_index; + locking_ct_index_[NegationOf(var)] = ct_index; } } template void DualBoundStrengthening::ProcessLinearConstraint( bool is_objective, const PresolveContext& context, - const LinearProto& linear, int64_t min_activity, int64_t max_activity) { + const LinearProto& linear, int64_t min_activity, int64_t max_activity, + int ct_index) { const int64_t lb_limit = linear.domain(linear.domain_size() - 2); const int64_t ub_limit = linear.domain(1); const int num_terms = linear.vars_size(); @@ -596,6 +600,7 @@ void DualBoundStrengthening::ProcessLinearConstraint( // lb side. if (min_activity < lb_limit) { num_locks_[var]++; + locking_ct_index_[var] = ct_index; if (min_activity + term_diff < lb_limit) { can_freely_decrease_until_[var] = kMaxIntegerValue; } else { @@ -619,6 +624,7 @@ void DualBoundStrengthening::ProcessLinearConstraint( // ub side. if (max_activity > ub_limit) { num_locks_[NegationOf(var)]++; + locking_ct_index_[NegationOf(var)] = ct_index; if (max_activity - term_diff > ub_limit) { can_freely_decrease_until_[NegationOf(var)] = kMaxIntegerValue; } else { @@ -698,34 +704,104 @@ bool DualBoundStrengthening::Strengthen(PresolveContext* context) { } } - // If (a => b) is the only constraint blocking a literal a in the up - // direction, then we can set a == b ! + // If there is only one blocking constraint, we can simplify the problem in + // a few situation. // - // TODO(user): We can deal with more general situation. For instance an at - // most one that is the only blocking constraint can become an exactly one. + // TODO(user): Cover all the cases. std::vector processed(num_vars, false); - for (int positive_ref = 0; positive_ref < num_vars; ++positive_ref) { + for (IntegerVariable var(0); var < num_locks_.size(); ++var) { + const int ref = VarDomination::IntegerVariableToRef(var); + const int positive_ref = PositiveRef(ref); if (processed[positive_ref]) continue; if (context->IsFixed(positive_ref)) continue; - const IntegerVariable var = RefToIntegerVariable(positive_ref); - int ct_index = -1; - if (num_locks_[var] == 1 && locking_ct_index_[var] != -1) { - ct_index = locking_ct_index_[var]; - } else if (num_locks_[NegationOf(var)] == 1 && - locking_ct_index_[NegationOf(var)] != -1) { - ct_index = locking_ct_index_[NegationOf(var)]; - } else { + if (num_locks_[var] != 1) continue; + if (locking_ct_index_[var] == -1) { + context->UpdateRuleStats( + "TODO dual: only one unspecified blocking constraint?"); continue; } + + const int ct_index = locking_ct_index_[var]; const ConstraintProto& ct = context->working_model->constraints(ct_index); if (ct.constraint_case() == ConstraintProto::kAtMostOne) { context->UpdateRuleStats("TODO dual: tighten at most one"); continue; } - if (ct.constraint_case() != ConstraintProto::kBoolAnd) continue; + if (ct.constraint_case() != ConstraintProto::kBoolAnd) { + // If we have an enforcement literal then we can always add the + // implication "not enforced => var at its lower bound. + // + // TODO(user): We can also deal with more than one enforcement. + if (ct.enforcement_literal().size() == 1 && + PositiveRef(ct.enforcement_literal(0)) != positive_ref) { + const int enf = ct.enforcement_literal(0); + const int64_t bound = RefIsPositive(ref) ? context->MinOf(positive_ref) + : context->MaxOf(positive_ref); + const Domain implied = + context->DomainOf(positive_ref) + .IntersectionWith( + context->deductions.ImpliedDomain(enf, positive_ref)); + if (implied.IsFixed()) { + // Corner case. + if (implied.FixedValue() == bound) { + context->UpdateRuleStats("dual: fix variable"); + if (!context->IntersectDomainWith(positive_ref, implied)) { + return false; + } + continue; + } + + // Note(user): If we have enforced => var fixed, we could actually + // just have removed var from the constraint it it was implied by + // another constraint. If not, because of the new affine relation we + // could remove it right away. + processed[PositiveRef(enf)] = true; + processed[positive_ref] = true; + context->UpdateRuleStats("dual: affine relation"); + if (RefIsPositive(enf)) { + // positive_ref = enf * implied + (1 - enf) * bound. + if (!context->StoreAffineRelation( + positive_ref, enf, implied.FixedValue() - bound, bound)) { + return false; + } + } else { + // positive_ref = (1 - enf) * implied + enf * bound. + if (!context->StoreAffineRelation(positive_ref, PositiveRef(enf), + bound - implied.FixedValue(), + implied.FixedValue())) { + return false; + } + } + continue; + } + + if (context->CanBeUsedAsLiteral(positive_ref)) { + // If we have a literal, we always add the implication. + // This seems like a good thing to do. + processed[PositiveRef(enf)] = true; + processed[positive_ref] = true; + context->UpdateRuleStats("dual: add implication"); + context->AddImplication(NegatedRef(enf), NegatedRef(ref)); + context->UpdateNewConstraintsVariableUsage(); + continue; + } + + // We can add an implication not_enforced => var to its bound ? + context->UpdateRuleStats("TODO dual: add implied bound"); + } else if (!ct.enforcement_literal().empty()) { + context->UpdateRuleStats( + "TODO dual: only one blocking enforced constraint?"); + } else { + context->UpdateRuleStats("TODO dual: only one blocking constraint?"); + } + continue; + } if (ct.enforcement_literal().size() != 1) continue; + // If (a => b) is the only constraint blocking a literal a in the up + // direction, then we can set a == b ! + // // Recover a => b where a is having an unique up_lock (i.e this constraint). // Note that if many implications are encoded in the same bool_and, we have // to be careful that a is appearing in just one of them. @@ -833,7 +909,8 @@ void DetectDominanceRelations( switch (ct.constraint_case()) { case ConstraintProto::kBoolOr: if (phase == 0) { - dual_bound_strengthening->CannotDecrease(ct.bool_or().literals()); + dual_bound_strengthening->CannotDecrease(ct.bool_or().literals(), + c); } var_domination->ActivityShouldNotDecrease(ct.enforcement_literal(), ct.bool_or().literals(), @@ -875,7 +952,8 @@ void DetectDominanceRelations( break; case ConstraintProto::kExactlyOne: if (phase == 0) { - dual_bound_strengthening->CannotMove(ct.exactly_one().literals()); + dual_bound_strengthening->CannotMove(ct.exactly_one().literals(), + c); } var_domination->ActivityShouldNotChange(ct.exactly_one().literals(), /*coeffs=*/{}); @@ -885,7 +963,7 @@ void DetectDominanceRelations( &max_activity); if (phase == 0) { dual_bound_strengthening->ProcessLinearConstraint( - false, context, ct.linear(), min_activity, max_activity); + false, context, ct.linear(), min_activity, max_activity, c); } const bool domain_is_simple = ct.linear().domain().size() == 2; const bool free_to_increase = @@ -916,7 +994,8 @@ void DetectDominanceRelations( // We cannot infer anything if we don't know the constraint. // TODO(user): Handle enforcement better here. if (phase == 0) { - dual_bound_strengthening->CannotMove(context.ConstraintToVars(c)); + dual_bound_strengthening->CannotMove(context.ConstraintToVars(c), + c); } for (const int var : context.ConstraintToVars(c)) { var_domination->CanOnlyDominateEachOther({var}); diff --git a/ortools/sat/var_domination.h b/ortools/sat/var_domination.h index 4a6f5cb50c..57606dc5a2 100644 --- a/ortools/sat/var_domination.h +++ b/ortools/sat/var_domination.h @@ -229,14 +229,14 @@ class DualBoundStrengthening { // All constraints should be mapped to one of more call to these functions. void CannotDecrease(absl::Span refs, int ct_index = -1); void CannotIncrease(absl::Span refs, int ct_index = -1); - void CannotMove(absl::Span refs); + void CannotMove(absl::Span refs, int ct_index = -1); // Most of the logic here deals with linear constraints. template void ProcessLinearConstraint(bool is_objective, const PresolveContext& context, const LinearProto& linear, int64_t min_activity, - int64_t max_activity); + int64_t max_activity, int ct_index = -1); // Once ALL constraints have been processed, call this to fix variables or // reduce their domain if possible.