diff --git a/ortools/sat/cp_model_solver.cc b/ortools/sat/cp_model_solver.cc index 320c096ff3..f465a282bc 100644 --- a/ortools/sat/cp_model_solver.cc +++ b/ortools/sat/cp_model_solver.cc @@ -1861,15 +1861,11 @@ std::function NewFeasibleSolutionObserver( } std::function NewSatParameters(const std::string& params) { - return [=](Model* model) { - sat::SatParameters parameters; - if (!params.empty()) { - CHECK(google::protobuf::TextFormat::ParseFromString(params, ¶meters)) << params; - model->GetOrCreate()->SetParameters(parameters); - model->SetSingleton(TimeLimit::FromParameters(parameters)); - } - return parameters; - }; + sat::SatParameters parameters; + if (!params.empty()) { + CHECK(google::protobuf::TextFormat::ParseFromString(params, ¶meters)) << params; + } + return NewSatParameters(parameters); } std::function NewSatParameters( @@ -1934,12 +1930,15 @@ CpSolverResponse SolveCpModelInternal( } // We propagate after each new Boolean constraint but not the integer - // ones. So we call Propagate() manually here. - // TODO(user): Do that automatically? - model->GetOrCreate()->Propagate(); - if (is_real_solve && trail->Index() > old_num_fixed) { - VLOG(1) << "Constraint fixed " << trail->Index() - old_num_fixed - << " Boolean variable(s): " << ct.ShortDebugString(); + // ones. So we call Propagate() manually here. Note that we do not do + // that in the postsolve as there is some corner case where propagating + // after each new constraint can have a quadratic behavior. + if (is_real_solve) { + model->GetOrCreate()->Propagate(); + if (trail->Index() > old_num_fixed) { + VLOG(1) << "Constraint fixed " << trail->Index() - old_num_fixed + << " Boolean variable(s): " << ct.ShortDebugString(); + } } if (model->GetOrCreate()->IsModelUnsat()) { VLOG(1) << "UNSAT during extraction (after adding '" diff --git a/ortools/sat/integer.cc b/ortools/sat/integer.cc index acb048447f..c038fd9567 100644 --- a/ortools/sat/integer.cc +++ b/ortools/sat/integer.cc @@ -204,12 +204,27 @@ Literal IntegerEncoder::GetOrCreateAssociatedLiteral(IntegerLiteral i_lit) { } ++num_created_variables_; - const BooleanVariable new_var = sat_solver_->NewBooleanVariable(); - const Literal literal(new_var, true); + const Literal literal(sat_solver_->NewBooleanVariable(), true); AssociateToIntegerLiteral(literal, new_lit); return literal; } +Literal IntegerEncoder::GetOrCreateLiteralAssociatedToEquality( + IntegerVariable var, IntegerValue value) { + { + const std::pair key{var, value}; + const auto it = equality_to_associated_literal_.find(key); + if (it != equality_to_associated_literal_.end()) { + return it->second; + } + } + + ++num_created_variables_; + const Literal literal(sat_solver_->NewBooleanVariable(), true); + AssociateToIntegerEqualValue(literal, var, value); + return literal; +} + void IntegerEncoder::AssociateToIntegerLiteral(Literal literal, IntegerLiteral i_lit) { const auto& domain = (*domains_)[i_lit.var]; @@ -392,6 +407,7 @@ IntegerVariable IntegerTrail::AddIntegerVariable(IntegerValue lower_bound, const IntegerVariable i(vars_.size()); is_ignored_literals_.push_back(kNoLiteralIndex); vars_.push_back({lower_bound, static_cast(integer_trail_.size())}); + var_trail_index_cache_.push_back(integer_trail_.size()); integer_trail_.push_back({lower_bound, i}); domains_->push_back({{lower_bound.value(), upper_bound.value()}}); @@ -401,6 +417,7 @@ IntegerVariable IntegerTrail::AddIntegerVariable(IntegerValue lower_bound, CHECK_EQ(NegationOf(i).value(), vars_.size()); is_ignored_literals_.push_back(kNoLiteralIndex); vars_.push_back({-upper_bound, static_cast(integer_trail_.size())}); + var_trail_index_cache_.push_back(integer_trail_.size()); integer_trail_.push_back({-upper_bound, NegationOf(i)}); domains_->push_back({{-upper_bound.value(), -lower_bound.value()}}); @@ -533,11 +550,33 @@ int IntegerTrail::FindLowestTrailIndexThatExplainBound( DCHECK_LE(i_lit.bound, vars_[i_lit.var].current_bound); if (i_lit.bound <= LevelZeroBound(i_lit.var)) return -1; int trail_index = vars_[i_lit.var].current_trail_index; + + // Check the validity of the cached index and use it if possible. This caching + // mechanism is important in case of long chain of propagation on the same + // variable. Because during conflict resolution, we call + // FindLowestTrailIndexThatExplainBound() with lowest and lowest bound, this + // cache can transform a quadratic complexity into a linear one. + { + const int cached_index = var_trail_index_cache_[i_lit.var]; + if (cached_index < trail_index) { + const TrailEntry& entry = integer_trail_[cached_index]; + if (entry.var == i_lit.var && entry.bound >= i_lit.bound) { + trail_index = cached_index; + } + } + } + int prev_trail_index = trail_index; while (true) { const TrailEntry& entry = integer_trail_[trail_index]; - if (entry.bound == i_lit.bound) return trail_index; - if (entry.bound < i_lit.bound) return prev_trail_index; + if (entry.bound == i_lit.bound) { + var_trail_index_cache_[i_lit.var] = trail_index; + return trail_index; + } + if (entry.bound < i_lit.bound) { + var_trail_index_cache_[i_lit.var] = prev_trail_index; + return prev_trail_index; + } prev_trail_index = trail_index; trail_index = entry.prev_trail_index; } diff --git a/ortools/sat/integer.h b/ortools/sat/integer.h index 05591fa360..006d67e7b9 100644 --- a/ortools/sat/integer.h +++ b/ortools/sat/integer.h @@ -263,6 +263,8 @@ class IntegerEncoder { // one if they exist. This is the "list encoding" in: Thibaut Feydy, Peter J. // Stuckey, "Lazy Clause Generation Reengineered", CP 2009. Literal GetOrCreateAssociatedLiteral(IntegerLiteral i_lit); + Literal GetOrCreateLiteralAssociatedToEquality(IntegerVariable var, + IntegerValue value); // Associates the Boolean literal to (X >= bound) or (X == value). If a // literal was already associated to this fact, this will add an equality @@ -601,6 +603,12 @@ class IntegerTrail : public SatPropagator { }; ITIVector vars_; + // This is used by FindLowestTrailIndexThatExplainBound() to speed up + // the lookup. It keeps a trail index for each variable that may or may not + // point to a TrailEntry regarding this variable. The validity of the index is + // verified before beeing used. + mutable ITIVector var_trail_index_cache_; + // Used by GetOrCreateConstantIntegerVariable() to return already created // constant variables that share the same value. std::unordered_map constant_map_; diff --git a/ortools/sat/lp_utils.cc b/ortools/sat/lp_utils.cc index 8efd554bbc..5f876d95d9 100644 --- a/ortools/sat/lp_utils.cc +++ b/ortools/sat/lp_utils.cc @@ -58,10 +58,15 @@ bool ConvertMPModelProtoToCpModelProto(const MPModelProto& mp_model, // cannot go that much further because we need to make sure we will not run // into overflow if we add a big linear combination of such variables. It // should always be possible for an user to scale its problem so that all - // relevant quantities are under a billion. A LP/MIP solver have a similar - // condition in disguise because problem with a difference of more than 6 - // magnitude between the variable values will likely run into numeric trouble. - const int64 kMaxVariableBound = 1ll << 30; + // relevant quantities are a couple of millions. A LP/MIP solver have a + // similar condition in disguise because problem with a difference of more + // than 6 magnitudes between the variable values will likely run into numeric + // trouble. + const int64 kMaxVariableBound = static_cast(1e7); + + int num_truncated_bounds = 0; + int num_small_domains = 0; + const int64 kSmallDomainSize = 1000; // Add the variables. const int num_variables = mp_model.variable_size(); @@ -73,37 +78,32 @@ bool ConvertMPModelProtoToCpModelProto(const MPModelProto& mp_model, // Note that we must process the lower bound first. for (const bool lower : {true, false}) { const double bound = lower ? mp_var.lower_bound() : mp_var.upper_bound(); - if (std::abs(bound) == kInfinity) { + if (std::abs(bound) >= kMaxVariableBound) { + if (std::abs(bound) != kInfinity) ++num_truncated_bounds; cp_var->add_domain(lower ? -kMaxVariableBound : kMaxVariableBound); continue; } - // Reject larger bound than kMaxVariableBound. We also reject the equality - // so that after the solve, we can detect if one of our "artificial" - // bounds that we add on unbounded variable is restricting the objective. - if (std::floor(std::abs(bound)) >= kMaxVariableBound) { - LOG(ERROR) << "Large bound : " << bound; - return false; - } + // Note that the cast is "perfect" because we forbid large values. + cp_var->add_domain( + static_cast(lower ? std::ceil(bound) : std::floor(bound))); + } - if (mp_var.is_integer()) { - // Note that the cast is "perfect" because we forbid large values. - cp_var->add_domain( - static_cast(lower ? std::ceil(bound) : std::floor(bound))); - } else { - // Continuous variable. We reject non-integer bounds. - // We do nothing if the domain is really small though. - // - // TODO(user): scale the domain. - if (bound != std::round(bound)) { - LOG(ERROR) << "Non-integer bound not supported: " << bound; - return false; - } - cp_var->add_domain(static_cast(bound)); - } + // Notify if a continuous variable has a small domain as this is likely to + // make an all integer solution far from a continuous one. + if (!mp_var.is_integer() && + cp_var->domain(1) - cp_var->domain(0) < kSmallDomainSize) { + ++num_small_domains; } } + LOG_IF(WARNING, num_truncated_bounds > 0) + << num_truncated_bounds << " bounds where truncated to " + << kMaxVariableBound << "."; + LOG_IF(WARNING, num_small_domains > 0) + << num_small_domains << " continuous variable domain with less than " + << kSmallDomainSize << " values."; + // Variables needed to scale the double coefficients into int64. double max_relative_coeff_error = 0.0; double max_scaled_sum_error = 0.0; @@ -237,7 +237,7 @@ bool ConvertMPModelProtoToCpModelProto(const MPModelProto& mp_model, } // Test the precision of the conversion. - const double kRelativeTolerance = 1e-4; + const double kRelativeTolerance = 1e-2; if (max_relative_coeff_error > kRelativeTolerance) { LOG(WARNING) << "The relative error during double -> int64 conversion " << "is too high!"; diff --git a/ortools/sat/optimization.cc b/ortools/sat/optimization.cc index 83d0234cbd..f94363b965 100644 --- a/ortools/sat/optimization.cc +++ b/ortools/sat/optimization.cc @@ -216,6 +216,7 @@ void MinimizeCore(SatSolver* solver, std::vector* core) { std::vector temp = *core; std::reverse(temp.begin(), temp.end()); solver->Backtrack(0); + solver->SetAssumptionLevel(0); // Note that this Solve() is really fast, since the solver should detect that // the assumptions are unsat with unit propagation only. This is just a @@ -225,6 +226,7 @@ void MinimizeCore(SatSolver* solver, std::vector* core) { solver->ResetAndSolveWithGivenAssumptions(temp); if (status != SatSolver::ASSUMPTIONS_UNSAT) { if (status != SatSolver::LIMIT_REACHED) { + CHECK_NE(status, SatSolver::MODEL_SAT); // This should almost never happen, but it is not impossible. The reason // is that the solver may delete some learned clauses required by the unit // propagation to show that the core is unsat. @@ -235,12 +237,64 @@ void MinimizeCore(SatSolver* solver, std::vector* core) { } temp = solver->GetLastIncompatibleDecisions(); if (temp.size() < core->size()) { - VLOG(1) << "old core size " << core->size(); + VLOG(1) << "minimization " << core->size() << " -> " << temp.size(); std::reverse(temp.begin(), temp.end()); *core = temp; } } +void MinimizeCoreWithPropagation(SatSolver* solver, + std::vector* core) { + std::set moved_last; + std::deque candidate(core->begin(), core->end()); + while (true) { + { + // Cyclic shift. We stop as soon as we are back in the original core + // order. Because we always preserve the order in the candidate reduction + // below, we can abort the first time we see someone we moved back. + CHECK(!candidate.empty()); + const Literal temp = candidate[0]; + if (ContainsKey(moved_last, temp.Index())) break; + moved_last.insert(temp.Index()); + candidate.erase(candidate.begin()); + candidate.push_back(temp); + } + + solver->Backtrack(0); + solver->SetAssumptionLevel(0); + while (solver->CurrentDecisionLevel() < candidate.size()) { + const Literal decision = candidate[solver->CurrentDecisionLevel()]; + if (solver->Assignment().LiteralIsTrue(decision)) { + candidate.erase(candidate.begin() + solver->CurrentDecisionLevel()); + continue; + } else if (solver->Assignment().LiteralIsFalse(decision)) { + const int level = + solver->LiteralTrail().Info(decision.Variable()).level; + if (level + 1 != candidate.size()) { + candidate.resize(level); + candidate.push_back(decision); + } + break; + } else { + solver->EnqueueDecisionAndBackjumpOnConflict(decision); + } + } + } + + if (candidate.size() < core->size()) { + VLOG(1) << "minimization " << core->size() << " -> " << candidate.size(); + + // Check that the order is indeed preserved. + int index = 0; + for (const Literal l : *core) { + if (index < candidate.size() && l == candidate[index]) ++index; + } + CHECK_EQ(index, candidate.size()); + + core->assign(candidate.begin(), candidate.end()); + } +} + // This algorithm works by exploiting the unsat core returned by the SAT solver // when the problem is UNSAT. It starts by trying to solve the decision problem // where all the objective variables are set to their value with minimal cost, @@ -1156,7 +1210,7 @@ SatSolver::Status FindCores(std::vector assumptions, if (result != SatSolver::ASSUMPTIONS_UNSAT) return result; std::vector core = sat_solver->GetLastIncompatibleDecisions(); if (sat_solver->parameters().minimize_core()) { - MinimizeCore(sat_solver, &core); + MinimizeCoreWithPropagation(sat_solver, &core); } CHECK(!core.empty()); cores->push_back(core); @@ -1297,9 +1351,6 @@ SatSolver::Status MinimizeWithCoreAndLazyEncoding( } // Update the objective lower bound with our current bound. - // - // Note(user): This is not needed for correctness, but it might cause - // more propagation and is nice to have for reporting/logging purpose. if (!integer_trail->Enqueue( IntegerLiteral::GreaterOrEqual(objective_var, implied_objective_lb), {}, {})) { @@ -1314,11 +1365,21 @@ SatSolver::Status MinimizeWithCoreAndLazyEncoding( continue; } - // If there is only one assumptions left, we switch the algorithm. - if (term_indices.size() == 1 && next_stratified_threshold == 0) { + // If there is only one or two assumptions left, we switch the algorithm. + if (term_indices.size() <= 2 && next_stratified_threshold == 0) { if (log_info) LOG(INFO) << "Switching to linear scan..."; - model->Add(LowerOrEqualWithOffset( - terms[term_indices[0]].var, objective_var, objective_offset.value())); + + std::vector constraint_vars; + std::vector constraint_coeffs; + for (const int index : term_indices) { + constraint_vars.push_back(terms[index].var); + constraint_coeffs.push_back(terms[index].weight.value()); + } + constraint_vars.push_back(objective_var); + constraint_coeffs.push_back(-1); + model->Add(WeightedSumLowerOrEqual(constraint_vars, constraint_coeffs, + -objective_offset.value())); + result = MinimizeIntegerVariableWithLinearScanAndLazyEncoding( false, objective_var, next_decision, feasible_solution_observer, model); @@ -1330,9 +1391,11 @@ SatSolver::Status MinimizeWithCoreAndLazyEncoding( if (log_info) { const int64 lb = objective_lb.value(); const int64 ub = best_objective.value(); - const int gap = lb == ub ? 0 - : static_cast(std::ceil(100.0 * (ub - lb) / - std::max(ub, lb))); + const int gap = + lb == ub + ? 0 + : static_cast(std::ceil( + 100.0 * (ub - lb) / std::max(std::abs(ub), std::abs(lb)))); LOG(INFO) << StrCat("unscaled_objective:[", lb, ",", ub, "]" " gap:", @@ -1342,7 +1405,7 @@ SatSolver::Status MinimizeWithCoreAndLazyEncoding( } // Abort if we have a solution and a gap of zero. - if (implied_objective_lb == best_objective && num_solutions > 0) { + if (objective_lb == best_objective && num_solutions > 0) { result = SatSolver::MODEL_SAT; break; } @@ -1411,7 +1474,7 @@ SatSolver::Status MinimizeWithCoreAndLazyEncoding( max_depth = std::max(max_depth, new_depth); if (log_info) { LOG(INFO) << StringPrintf( - " core:%zu weight:[%lld,%lld] domain:[%lld,%lld] depth:%d", + "core:%zu weight:[%lld,%lld] domain:[%lld,%lld] depth:%d", core.size(), min_weight.value(), max_weight.value(), new_var_lb.value(), new_var_ub.value(), new_depth); } @@ -1423,7 +1486,6 @@ SatSolver::Status MinimizeWithCoreAndLazyEncoding( terms.push_back({new_var, min_weight, new_depth}); // Sum variables in the core <= new_var. - // TODO(user): Experiment with FixedWeightedSum() instead. { std::vector constraint_vars; std::vector constraint_coeffs; @@ -1440,12 +1502,12 @@ SatSolver::Status MinimizeWithCoreAndLazyEncoding( } // Find out the true lower bound of new_var. This is called "cover - // optimization" in the max-SAT literature. + // optimization" in some of the max-SAT literature. It can helps on some + // problem families and hurt on others. // - // TODO(user): Do more experiments to decide if this is better. This - // approach kind of mix the basic linear-scan one with the core based - // approach. - if (/* DISABLES CODE */ (false)) { + // TODO(user): Add some conflict limit? and/or come up with an algo that + // dynamically turn this on or not depending on the situation? + if (sat_solver->parameters().cover_optimization()) { IntegerValue best = new_var_ub; // Simple linear scan algorithm to find the optimal of new_var. @@ -1456,14 +1518,17 @@ SatSolver::Status MinimizeWithCoreAndLazyEncoding( SolveIntegerProblemWithLazyEncoding({a}, next_decision, model); if (result != SatSolver::MODEL_SAT) break; best = integer_trail->LowerBound(new_var); + if (log_info) { + LOG(INFO) << "cover_opt:[" << new_var_lb << "," << best << "]"; + } if (!process_solution()) { result = SatSolver::MODEL_UNSAT; break; } } + sat_solver->Backtrack(0); + sat_solver->SetAssumptionLevel(0); if (result == SatSolver::ASSUMPTIONS_UNSAT) { - sat_solver->Backtrack(0); - sat_solver->SetAssumptionLevel(0); if (!integer_trail->Enqueue( IntegerLiteral::GreaterOrEqual(new_var, best), {}, {})) { result = SatSolver::MODEL_UNSAT; @@ -1535,6 +1600,7 @@ SatSolver::Status MinimizeWithHittingSetAndLazyEncoding( // we find a core, a new constraint will be added to this problem. MPModelRequest request; #if defined(USE_SCIP) + request.set_solver_specific_parameters("limits/gap = 0"); request.set_solver_type(MPModelRequest::SCIP_MIXED_INTEGER_PROGRAMMING); #else // USE_CBC request.set_solver_type(MPModelRequest::CBC_MIXED_INTEGER_PROGRAMMING); @@ -1680,11 +1746,7 @@ SatSolver::Status MinimizeWithHittingSetAndLazyEncoding( ct->add_coefficient(1.0); ct->set_lower_bound(ct->lower_bound() + lb); } else { - // TODO(user): if we have just one variable whose hs_value is not at - // its lower bound, then we can add a cut that remove the current - // solution (on the core) without the need to introduce this extra - // variable. - std::pair key = {index, hs_value}; + const std::pair key = {index, hs_value}; if (!ContainsKey(created_var, key)) { const int new_var_index = hs_model.variable_size(); created_var[key] = new_var_index; diff --git a/ortools/sat/optimization.h b/ortools/sat/optimization.h index ae302e93c2..a45503f9a3 100644 --- a/ortools/sat/optimization.h +++ b/ortools/sat/optimization.h @@ -40,6 +40,13 @@ namespace sat { // core. void MinimizeCore(SatSolver* solver, std::vector* core); +// Like MinimizeCore() with a slower but strictly better heuristic. This +// algorithm should produce a minimal core with respect to propagation. We put +// each literal of the initial core "last" at least once, so if such literal can +// be infered by propagation by any subset of the other literal, it will be +// removed. +void MinimizeCoreWithPropagation(SatSolver* solver, std::vector* core); + // Because the Solve*() functions below are also used in scripts that requires a // special output format, we use this to tell them whether or not to use the // default logging framework or simply stdout. Most users should just use diff --git a/ortools/sat/precedences.cc b/ortools/sat/precedences.cc index f27a71eaf8..bee5f26f36 100644 --- a/ortools/sat/precedences.cc +++ b/ortools/sat/precedences.cc @@ -304,6 +304,13 @@ void PrecedencesPropagator::AddArc(IntegerVariable tail, IntegerVariable head, // // TODO(user): Adding arcs and then calling Untrail() before Propagate() // will cause this mecanism to break. Find a more robust implementation. + // + // TODO(user): In some rare corner case, rescanning the whole list of arc + // leaving tail_var can make AddVar() have a quadratic complexity where it + // shouldn't. A better solution would be to see if this new arc currently + // propagate something, and if it does, just update the lower bound of + // a.head_var and let the normal "is modified" mecanism handle any eventual + // follow up propagations. modified_vars_.Set(a.tail_var); const int arc_index = arcs_.size(); if (l == kNoLiteralIndex) { diff --git a/ortools/sat/sat_parameters.proto b/ortools/sat/sat_parameters.proto index 2f3e5e077c..bfb9dada1a 100644 --- a/ortools/sat/sat_parameters.proto +++ b/ortools/sat/sat_parameters.proto @@ -18,7 +18,7 @@ package operations_research.sat; // Contains the definitions for all the sat algorithm parameters and their // default values. // -// NEXT TAG: 89 +// NEXT TAG: 90 message SatParameters { // ========================================================================== // Branching and polarity @@ -396,6 +396,10 @@ message SatParameters { // in the core based max-SAT algorithms. optional bool find_multiple_cores = 84 [default = true]; + // If true, when the max-sat algo find a core, we compute the minimal number + // of literals in the core that needs to be true to have a feasible solution. + optional bool cover_optimization = 89 [default = true]; + // In what order do we add the assumptions in a core-based max-sat algorithm enum MaxSatAssumptionOrder { DEFAULT_ASSUMPTION_ORDER = 0; diff --git a/ortools/sat/timetable.cc b/ortools/sat/timetable.cc index dab5efaf82..d3066a5c86 100644 --- a/ortools/sat/timetable.cc +++ b/ortools/sat/timetable.cc @@ -316,7 +316,7 @@ void TimeTablingPerTask::ReverseProfile() { bool TimeTablingPerTask::SweepAllTasks() { // Tasks with a lower or equal demand will not be pushed. - const IntegerValue min_demand = CapacityMax() - profile_max_height_; + const IntegerValue min_demand = CapSub(CapacityMax(), profile_max_height_); for (int i = num_tasks_to_sweep_ - 1; i >= 0; --i) { const int t = tasks_to_sweep_[i];