diff --git a/ortools/sat/cp_model_lns.cc b/ortools/sat/cp_model_lns.cc index fc94a881c0..0e182baa3e 100644 --- a/ortools/sat/cp_model_lns.cc +++ b/ortools/sat/cp_model_lns.cc @@ -224,22 +224,24 @@ void NeighborhoodGenerator::Synchronize() { // SolveData. std::sort(solve_data_.begin(), solve_data_.end()); + // This will be used to update the difficulty of this neighborhood. + int num_fully_solved_in_batch = 0; + int num_not_fully_solved_in_batch = 0; + for (const SolveData& data : solve_data_) { - // TODO(user): we should use the original neighborhood difficulty in the - // formula that update the difficulty. Ideally, using all the recent data we - // want to aim for a given percentage of "solvable vs. unsolvable" problem - // so we are at an interesting spot. It seems the current code converges - // towards a 50% percentage though. if (data.status == CpSolverStatus::INFEASIBLE || data.status == CpSolverStatus::OPTIMAL) { - num_fully_solved_calls_++; - difficulty_.Increase(); + ++num_fully_solved_calls_; + ++num_fully_solved_in_batch; } else { - difficulty_.Decrease(); + ++num_not_fully_solved_in_batch; } num_calls_++; - if (data.objective_diff > 0.0) { + if (data.objective_improvement > 0) { + // Note(user): For this to work properly, the objective diff should be + // computed with respect to the best solution at the time of the + // neighborhood generation. num_consecutive_non_improving_calls_ = 0; } else { num_consecutive_non_improving_calls_++; @@ -248,7 +250,8 @@ void NeighborhoodGenerator::Synchronize() { // TODO(user): Weight more recent data. // degrade the current average to forget old learnings. const double gain_per_time_unit = - data.objective_diff / (1.0 + data.deterministic_time); + std::max(0.0, static_cast(data.objective_improvement.value())) / + (1.0 + data.deterministic_time); if (num_calls_ <= 100) { current_average_ += (gain_per_time_unit - current_average_) / num_calls_; } else { @@ -258,6 +261,10 @@ void NeighborhoodGenerator::Synchronize() { deterministic_time_ += data.deterministic_time; } + // Update the difficulty. + difficulty_.Update(/*num_decreases=*/num_not_fully_solved_in_batch, + /*num_increases=*/num_fully_solved_in_batch); + // Bump the time limit if we saw no better solution in the last few calls. // This means that as the search progress, we likely spend more and more time // trying to solve individual neighborhood. @@ -281,7 +288,7 @@ void GetRandomSubset(int seed, double relative_size, std::vector* base) { // TODO(user): we could generate this more efficiently than using random // shuffle. std::shuffle(base->begin(), base->end(), random); - const int target_size = std::ceil(relative_size * base->size()); + const int target_size = std::round(relative_size * base->size()); base->resize(target_size); } diff --git a/ortools/sat/cp_model_lns.h b/ortools/sat/cp_model_lns.h index 29e0ab28ee..c46c59724e 100644 --- a/ortools/sat/cp_model_lns.h +++ b/ortools/sat/cp_model_lns.h @@ -200,14 +200,29 @@ class NeighborhoodGenerator { // The time it took to solve this neighborhood. double deterministic_time = 0.0; - // The objective improvement compared to the base solution. - double objective_diff = 0.0; + // The objective improvement compared to the BEST solution at the time of + // generation. Positive if better, negative if worse. Note that we use + // the inner objective (without scaling or offset) so we are exact and it is + // always in the minimization direction. + // + // Note(user): It seems to make more sense to compare to the base solution + // objective, not the best one. However this causes issue in our adaptive + // parameter logic and selection because on some problems, its seems that + // the neighbhorhood is always improving. For example if you have two + // solutions, one worse, and it is super easy to find the better one from + // the worse one. This might not be a final solution, but it does work ok + // for now. + // + // TODO(user): Probably clearer to have 3 fields base_objective, + // best_objective and new_objective here. So we can easily change the logic. + IntegerValue objective_improvement = IntegerValue(0); // This is just used to construct a deterministic order for the updates. bool operator<(const SolveData& o) const { - return std::tie(status, difficulty, deterministic_time, objective_diff) < + return std::tie(status, difficulty, deterministic_time, + objective_improvement) < std::tie(o.status, o.difficulty, o.deterministic_time, - o.objective_diff); + o.objective_improvement); } }; void AddSolveData(SolveData data) { diff --git a/ortools/sat/cp_model_solver.cc b/ortools/sat/cp_model_solver.cc index 698bc09651..105394362d 100644 --- a/ortools/sat/cp_model_solver.cc +++ b/ortools/sat/cp_model_solver.cc @@ -783,20 +783,6 @@ IntegerVariable AddLPConstraints(const CpModelProto& model_proto, return main_objective_var; } -void ExtractLinearObjective(const CpModelProto& model_proto, - const CpModelMapping& mapping, - std::vector* linear_vars, - std::vector* linear_coeffs) { - CHECK(model_proto.has_objective()); - const CpObjectiveProto& obj = model_proto.objective(); - linear_vars->reserve(obj.vars_size()); - linear_coeffs->reserve(obj.vars_size()); - for (int i = 0; i < obj.vars_size(); ++i) { - linear_vars->push_back(mapping.Integer(obj.vars(i))); - linear_coeffs->push_back(IntegerValue(obj.coeffs(i))); - } -} - } // namespace // Used by NewFeasibleSolutionObserver to register observers. @@ -1071,6 +1057,14 @@ void LoadCpModel(const CpModelProto& model_proto, SharedResponseManager* shared_response_manager, Model* model) { CHECK(shared_response_manager != nullptr); + // Simple function for the few places where we do "return unsat()". + const auto unsat = [shared_response_manager, model] { + model->GetOrCreate()->NotifyThatModelIsUnsat(); + shared_response_manager->NotifyThatImprovingProblemIsInfeasible( + absl::StrCat(model->GetOrCreate()->worker_name, + " [loading]")); + }; + // We will add them all at once after model_proto is loaded. model->GetOrCreate()->DisableImplicationBetweenLiteral(); @@ -1103,14 +1097,12 @@ void LoadCpModel(const CpModelProto& model_proto, } // We propagate after each new Boolean constraint but not the integer - // ones. So we call FinishPropagation() manually here. Note that we do not - // do that in the postsolve as there is some corner case where propagating - // after each new constraint can have a quadratic behavior. + // ones. So we call FinishPropagation() manually here. // // Note that we only do that in debug mode as this can be really slow on // certain types of problems with millions of constraints. if (DEBUG_MODE) { - if (!sat_solver->FinishPropagation()) return; + if (!sat_solver->FinishPropagation()) return unsat(); Trail* trail = model->GetOrCreate(); const int old_num_fixed = trail->Index(); if (trail->Index() > old_num_fixed) { @@ -1133,12 +1125,12 @@ void LoadCpModel(const CpModelProto& model_proto, for (const std::string& type : unsupported_types) { VLOG(1) << " - " << type; } - return; + return unsat(); } model->GetOrCreate() ->AddAllImplicationsBetweenAssociatedLiterals(); - if (!sat_solver->FinishPropagation()) return; + if (!sat_solver->FinishPropagation()) return unsat(); // Auto detect "at least one of" constraints in the PrecedencesPropagator. // Note that we do that before we finish loading the problem (objective and @@ -1148,7 +1140,7 @@ void LoadCpModel(const CpModelProto& model_proto, parameters.auto_detect_greater_than_at_least_one_of()) { model->Mutable() ->AddGreaterThanAtLeastOneOfConstraints(model); - if (!sat_solver->FinishPropagation()) return; + if (!sat_solver->FinishPropagation()) return unsat(); } // TODO(user): This should be done in the presolve instead. @@ -1157,12 +1149,11 @@ void LoadCpModel(const CpModelProto& model_proto, if (parameters.cp_model_probing_level() > 1) { ProbeBooleanVariables(/*deterministic_time_limit=*/1.0, model); if (model->GetOrCreate()->IsModelUnsat()) { - return; + return unsat(); } if (!model->GetOrCreate() ->ComputeTransitiveReduction()) { - sat_solver->NotifyThatModelIsUnsat(); - return; + return unsat(); } } @@ -1201,8 +1192,16 @@ void LoadCpModel(const CpModelProto& model_proto, objective_definition->offset = objective_proto.offset(); objective_definition->objective_var = objective_var; - // Fill the objective heuristics data. + const int size = objective_proto.vars_size(); + objective_definition->vars.resize(size); + objective_definition->coeffs.resize(size); for (int i = 0; i < objective_proto.vars_size(); ++i) { + // Note that if there is no mapping, then the variable will be + // kNoIntegerVariable. + objective_definition->vars[i] = mapping->Integer(objective_proto.vars(i)); + objective_definition->coeffs[i] = IntegerValue(objective_proto.coeffs(i)); + + // Fill the objective heuristics data. const int ref = objective_proto.vars(i); if (mapping->IsInteger(ref)) { const IntegerVariable var = mapping->Integer(objective_proto.vars(i)); @@ -1227,8 +1226,7 @@ void LoadCpModel(const CpModelProto& model_proto, objective_var, user_domain); if (!ok) { VLOG(2) << "UNSAT due to the objective domain."; - sat_solver->NotifyThatModelIsUnsat(); - return; + return unsat(); } // Make sure the sum take a value inside the objective domain by adding @@ -1254,7 +1252,7 @@ void LoadCpModel(const CpModelProto& model_proto, // Note that we do one last propagation at level zero once all the // constraints were added. - if (!sat_solver->FinishPropagation()) return; + if (!sat_solver->FinishPropagation()) return unsat(); if (model_proto.has_objective()) { // Watch improved objective best bounds. @@ -1288,6 +1286,33 @@ void LoadCpModel(const CpModelProto& model_proto, } } } + + // Initialize the fixed_search strategy. + auto* search_heuristics = model->GetOrCreate(); + search_heuristics->fixed_search = ConstructSearchStrategy( + model_proto, mapping->GetVariableMapping(), objective_var, model); + if (VLOG_IS_ON(3)) { + search_heuristics->fixed_search = + InstrumentSearchStrategy(model_proto, mapping->GetVariableMapping(), + search_heuristics->fixed_search, model); + } + + // Initialize the "follow hint" strategy. + std::vector vars; + std::vector values; + for (int i = 0; i < model_proto.solution_hint().vars_size(); ++i) { + const int ref = model_proto.solution_hint().vars(i); + CHECK(RefIsPositive(ref)); + BooleanOrIntegerVariable var; + if (mapping->IsBoolean(ref)) { + var.bool_var = mapping->Literal(ref).Variable(); + } else { + var.int_var = mapping->Integer(ref); + } + vars.push_back(var); + values.push_back(IntegerValue(model_proto.solution_hint().values(i))); + } + search_heuristics->hint_search = FollowHint(vars, values, model); } // Solves an already loaded cp_model_proto. @@ -1299,29 +1324,10 @@ void LoadCpModel(const CpModelProto& model_proto, void SolveLoadedCpModel(const CpModelProto& model_proto, SharedResponseManager* shared_response_manager, Model* model) { - CHECK(shared_response_manager != nullptr); - std::string solution_info = model->GetOrCreate()->worker_name; - if (model->GetOrCreate()->IsModelUnsat()) { - shared_response_manager->NotifyThatImprovingProblemIsInfeasible( - absl::StrCat(solution_info, " [loading]")); - return; - } - - IntegerVariable objective_var = kNoIntegerVariable; - if (model_proto.has_objective()) { - objective_var = model->Get()->objective_var; - CHECK_NE(objective_var, kNoIntegerVariable); - } - - // Initialize the search strategy function. - auto* mapping = model->Get(); - std::function fixed_search = ConstructSearchStrategy( - model_proto, mapping->GetVariableMapping(), objective_var, model); - if (VLOG_IS_ON(3)) { - fixed_search = InstrumentSearchStrategy( - model_proto, mapping->GetVariableMapping(), fixed_search, model); - } + if (shared_response_manager->ProblemIsSolved()) return; + const std::string& solution_info = + model->GetOrCreate()->worker_name; const auto solution_observer = [&model_proto, &model, &solution_info, &shared_response_manager]() { CpSolverResponse response; @@ -1330,80 +1336,11 @@ void SolveLoadedCpModel(const CpModelProto& model_proto, shared_response_manager->NewSolution(response, model); }; - // Load solution hint. - // We follow it and allow for a tiny number of conflicts before giving up. - // - // TODO(user): When we get a feasible solution here, even with phase saving - // it seems we don't get the same solution while launching the optimization - // below. Understand why. Also just constrain the solver to find a better - // solution right away, it should be more efficient. Note that this is kind - // of already done when get_objective_value() is registered above. - const SatParameters& parameters = *(model->GetOrCreate()); - if (model_proto.has_solution_hint()) { - const int64 old_conflict_limit = parameters.max_number_of_conflicts(); - model->GetOrCreate()->set_max_number_of_conflicts(10); - std::vector vars; - std::vector values; - for (int i = 0; i < model_proto.solution_hint().vars_size(); ++i) { - const int ref = model_proto.solution_hint().vars(i); - CHECK(RefIsPositive(ref)); - BooleanOrIntegerVariable var; - if (mapping->IsBoolean(ref)) { - var.bool_var = mapping->Literal(ref).Variable(); - } else { - var.int_var = mapping->Integer(ref); - } - vars.push_back(var); - values.push_back(IntegerValue(model_proto.solution_hint().values(i))); - } - - SearchHeuristics& search_heuristics = - *model->GetOrCreate(); - search_heuristics.policy_index = 0; - search_heuristics.decision_policies = { - SequentialSearch({FollowHint(vars, values, model), - SatSolverHeuristic(model), fixed_search})}; - auto no_restart = []() { return false; }; - search_heuristics.restart_policies = {no_restart}; - const SatSolver::Status status = ResetAndSolveIntegerProblem({}, model); - - if (status == SatSolver::Status::FEASIBLE) { - const int old_size = solution_info.size(); - solution_info += "[hint]"; - solution_observer(); - solution_info.resize(old_size); - - if (!model_proto.has_objective()) { - if (parameters.enumerate_all_solutions()) { - model->Add( - ExcludeCurrentSolutionWithoutIgnoredVariableAndBacktrack()); - } else { - return; - } - } else { - // Restrict the objective. Note that since we call the solution observer - // above, the shared_response_manager objective should be up to date. - model->GetOrCreate()->Backtrack(0); - IntegerTrail* integer_trail = model->GetOrCreate(); - if (!integer_trail->Enqueue( - IntegerLiteral::LowerOrEqual( - objective_var, - shared_response_manager->GetInnerObjectiveUpperBound()), - {}, {})) { - shared_response_manager->NotifyThatImprovingProblemIsInfeasible( - absl::StrCat(solution_info, "[hint]")); - shared_response_manager->SetStatsFromModel(model); - return; - } - } - } - // TODO(user): Remove conflicts used during hint search. - model->GetOrCreate()->set_max_number_of_conflicts( - old_conflict_limit); - } + // Reconfigure search heuristic if it was changed. + ConfigureSearchHeuristics(model); SatSolver::Status status; - ConfigureSearchHeuristics(fixed_search, model); + const SatParameters& parameters = *model->GetOrCreate(); if (!model_proto.has_objective()) { while (true) { status = ResetAndSolveIntegerProblem(/*assumptions=*/{}, model); @@ -1418,21 +1355,25 @@ void SolveLoadedCpModel(const CpModelProto& model_proto, } } else { // Optimization problem. + const auto& objective = *model->GetOrCreate(); + const IntegerVariable objective_var = objective.objective_var; + CHECK_NE(objective_var, kNoIntegerVariable); + if (parameters.optimize_with_core()) { - std::vector linear_vars; - std::vector linear_coeffs; - ExtractLinearObjective(model_proto, *model->GetOrCreate(), - &linear_vars, &linear_coeffs); + // TODO(user): This doesn't work with splitting in chunk for now. It + // shouldn't be too hard to fix. if (parameters.optimize_with_max_hs()) { status = MinimizeWithHittingSetAndLazyEncoding( - objective_var, linear_vars, linear_coeffs, solution_observer, + objective_var, objective.vars, objective.coeffs, solution_observer, model); } else { - CoreBasedOptimizer core(objective_var, linear_vars, linear_coeffs, + CoreBasedOptimizer core(objective_var, objective.vars, objective.coeffs, solution_observer, model); status = core.Optimize(); } } else { + // TODO(user): This parameter break the splitting in chunk of a Solve(). + // It should probably be moved into another SubSolver altogether. if (parameters.binary_search_num_conflicts() >= 0) { RestrictObjectiveDomainWithBinarySearch(objective_var, solution_observer, model); @@ -1451,18 +1392,63 @@ void SolveLoadedCpModel(const CpModelProto& model_proto, } } + // TODO(user): Remove from here when we split in chunk. We just want to + // do that at the end. shared_response_manager->SetStatsFromModel(model); } -// Thin wrapper for the postsolve "solve" and LNS local solve. -CpSolverResponse LocalSolve(const CpModelProto& local_proto, - WallTimer* wall_timer, Model* local_model) { - SharedResponseManager local_response_manager( - /*log_updates=*/false, &local_proto, wall_timer); +// Try to find a solution by following the hint and using a low conflict limit. +// The CpModelProto must already be loaded in the Model. +void QuickSolveWithHint(const CpModelProto& model_proto, + SharedResponseManager* shared_response_manager, + Model* model) { + if (!model_proto.has_solution_hint()) return; + if (shared_response_manager->ProblemIsSolved()) return; - LoadCpModel(local_proto, &local_response_manager, local_model); - SolveLoadedCpModel(local_proto, &local_response_manager, local_model); - return local_response_manager.GetResponse(); + // Temporarily change the parameters. + auto* parameters = model->GetOrCreate(); + const SatParameters saved_params = *parameters; + parameters->set_max_number_of_conflicts(10); + parameters->set_search_branching(SatParameters::HINT_SEARCH); + parameters->set_optimize_with_core(false); + auto cleanup = ::gtl::MakeCleanup( + [parameters, saved_params]() { *parameters = saved_params; }); + + // Solve decision problem. + ConfigureSearchHeuristics(model); + const SatSolver::Status status = + ResetAndSolveIntegerProblem(/*assumptions=*/{}, model); + + const std::string& solution_info = + model->GetOrCreate()->worker_name; + if (status == SatSolver::Status::FEASIBLE) { + CpSolverResponse response; + FillSolutionInResponse(model_proto, *model, &response); + response.set_solution_info(absl::StrCat(solution_info, " [hint]")); + shared_response_manager->NewSolution(response, model); + + if (!model_proto.has_objective()) { + if (parameters->enumerate_all_solutions()) { + model->Add(ExcludeCurrentSolutionWithoutIgnoredVariableAndBacktrack()); + } + } else { + // Restrict the objective. + const IntegerVariable objective_var = + model->GetOrCreate()->objective_var; + model->GetOrCreate()->Backtrack(0); + IntegerTrail* integer_trail = model->GetOrCreate(); + if (!integer_trail->Enqueue( + IntegerLiteral::LowerOrEqual( + objective_var, + shared_response_manager->GetInnerObjectiveUpperBound()), + {}, {})) { + shared_response_manager->NotifyThatImprovingProblemIsInfeasible( + absl::StrCat(solution_info, " [hint]")); + shared_response_manager->SetStatsFromModel(model); + return; + } + } + } } // TODO(user): If this ever shows up in the profile, we could avoid copying @@ -1503,8 +1489,12 @@ void PostsolveResponse(const CpModelProto& model_proto, postsolve_model.Add(operations_research::sat::NewSatParameters(params)); } + SharedResponseManager local_response_manager( + /*log_updates=*/false, &mapping_proto, wall_timer); + LoadCpModel(mapping_proto, &local_response_manager, &postsolve_model); + SolveLoadedCpModel(mapping_proto, &local_response_manager, &postsolve_model); const CpSolverResponse postsolve_response = - LocalSolve(mapping_proto, wall_timer, &postsolve_model); + local_response_manager.GetResponse(); CHECK(postsolve_response.status() == CpSolverStatus::FEASIBLE || postsolve_response.status() == CpSolverStatus::OPTIMAL); @@ -1752,6 +1742,8 @@ class FullProblemSolver : public SubSolver { task_is_generated_ = true; return [this]() { LoadCpModel(*shared_->model_proto, shared_->response, local_model_.get()); + QuickSolveWithHint(*shared_->model_proto, shared_->response, + local_model_.get()); SolveLoadedCpModel(*shared_->model_proto, shared_->response, local_model_.get()); @@ -1810,6 +1802,8 @@ class LnsSolver : public SubSolver { const int num_solutions = shared_->response->SolutionsRepository().NumSolutions(); + const IntegerValue initial_best_inner_objective = + shared_->response->BestSolutionInnerObjectiveValue(); CpSolverResponse base_response; if (num_solutions > 0) { random_engine_t random; @@ -1823,8 +1817,6 @@ class LnsSolver : public SubSolver { for (const int64 value : solution.variable_values) { base_response.add_solution(value); } - base_response.set_objective_value(ScaleObjectiveValue( - shared_->model_proto->objective(), solution.internal_objective)); } else { base_response.set_status(CpSolverStatus::UNKNOWN); } @@ -1875,14 +1867,26 @@ class LnsSolver : public SubSolver { options.time_limit = local_model.GetOrCreate(); PresolveCpModel(options, &neighborhood.cp_model, &mapping_proto, &postsolve_mapping); - CpSolverResponse local_response = - LocalSolve(neighborhood.cp_model, shared_->wall_timer, &local_model); + + // TODO(user): Depending on the problem, we should probably use the + // parameters that work bests (core, linearization_level, etc...) or + // maybe we can just randomize them like for the base solution used. + SharedResponseManager local_response_manager( + /*log_updates=*/false, &neighborhood.cp_model, shared_->wall_timer); + LoadCpModel(neighborhood.cp_model, &local_response_manager, &local_model); + QuickSolveWithHint(neighborhood.cp_model, &local_response_manager, + &local_model); + SolveLoadedCpModel(neighborhood.cp_model, &local_response_manager, + &local_model); + CpSolverResponse local_response = local_response_manager.GetResponse(); // TODO(user): we actually do not need to postsolve if the solution is // not going to be used... PostsolveResponse(*shared_->model_proto, mapping_proto, postsolve_mapping, shared_->wall_timer, &local_response); + local_response_manager.BestSolutionInnerObjectiveValue(); + if (local_response.solution_info().empty()) { local_response.set_solution_info(solution_info); } else { @@ -1894,11 +1898,14 @@ class LnsSolver : public SubSolver { data.status = local_response.status(); data.difficulty = saved_difficulty; data.deterministic_time = local_response.deterministic_time(); - data.objective_diff = 0.0; + data.objective_improvement = 0.0; if (local_response.status() == CpSolverStatus::OPTIMAL || local_response.status() == CpSolverStatus::FEASIBLE) { - data.objective_diff = std::abs(local_response.objective_value() - - base_response.objective_value()); + const IntegerValue local_response_inner_objective = + IntegerValue(ComputeInnerObjective( + shared_->model_proto->objective(), local_response)); + data.objective_improvement = + initial_best_inner_objective - local_response_inner_objective; } generator_->AddSolveData(data); @@ -2266,6 +2273,7 @@ CpSolverResponse SolveCpModel(const CpModelProto& model_proto, Model* model) { LOG(INFO) << "Initial num_bool: " << model->Get()->NumVariables(); } + QuickSolveWithHint(new_cp_model_proto, &shared_response_manager, model); SolveLoadedCpModel(new_cp_model_proto, &shared_response_manager, model); } diff --git a/ortools/sat/cp_model_utils.cc b/ortools/sat/cp_model_utils.cc index 229c6f8169..baf871cdb4 100644 --- a/ortools/sat/cp_model_utils.cc +++ b/ortools/sat/cp_model_utils.cc @@ -463,5 +463,21 @@ std::vector UsedIntervals(const ConstraintProto& ct) { return used_intervals; } +int64 ComputeInnerObjective(const CpObjectiveProto& objective, + const CpSolverResponse& response) { + int64 objective_value = 0; + auto& repeated_field_values = response.solution().empty() + ? response.solution_lower_bounds() + : response.solution(); + for (int i = 0; i < objective.vars_size(); ++i) { + int64 coeff = objective.coeffs(i); + const int ref = objective.vars(i); + const int var = PositiveRef(ref); + if (!RefIsPositive(ref)) coeff = -coeff; + objective_value += coeff * repeated_field_values[var]; + } + return objective_value; +} + } // namespace sat } // namespace operations_research diff --git a/ortools/sat/cp_model_utils.h b/ortools/sat/cp_model_utils.h index a3503f3fa9..5c343f2d80 100644 --- a/ortools/sat/cp_model_utils.h +++ b/ortools/sat/cp_model_utils.h @@ -118,7 +118,7 @@ std::vector AllValuesInDomain(const ProtoWithDomain& proto) { return result; } -// Scale back a objective value to a double value from the original model. +// Scales back a objective value to a double value from the original model. inline double ScaleObjectiveValue(const CpObjectiveProto& proto, int64 value) { double result = static_cast(value); if (value == kint64min) result = -std::numeric_limits::infinity(); @@ -128,6 +128,12 @@ inline double ScaleObjectiveValue(const CpObjectiveProto& proto, int64 value) { return proto.scaling_factor() * result; } +// Computes the "inner" objective of a response that contains a solution. +// This is the objective without offset and scaling. Call ScaleObjectiveValue() +// to get the user facing objective. +int64 ComputeInnerObjective(const CpObjectiveProto& objective, + const CpSolverResponse& response); + } // namespace sat } // namespace operations_research diff --git a/ortools/sat/integer.cc b/ortools/sat/integer.cc index 163f594e76..48f9034d5a 100644 --- a/ortools/sat/integer.cc +++ b/ortools/sat/integer.cc @@ -947,7 +947,7 @@ void IntegerTrail::EnqueueLiteralInternal( absl::Span literal_reason, absl::Span integer_reason) { DCHECK(!trail_->Assignment().LiteralIsAssigned(literal)); - DCHECK(lazy_reason == nullptr || + DCHECK(lazy_reason != nullptr || ReasonIsValid(literal_reason, integer_reason)); if (integer_search_levels_.empty()) { // Level zero. We don't keep any reason. diff --git a/ortools/sat/integer_search.cc b/ortools/sat/integer_search.cc index 1b51acc970..020c02d22f 100644 --- a/ortools/sat/integer_search.cc +++ b/ortools/sat/integer_search.cc @@ -475,9 +475,9 @@ std::function SatSolverRestartPolicy(Model* model) { return [policy]() { return policy->ShouldRestart(); }; } -void ConfigureSearchHeuristics( - const std::function& fixed_search, Model* model) { +void ConfigureSearchHeuristics(Model* model) { SearchHeuristics& heuristics = *model->GetOrCreate(); + CHECK(heuristics.fixed_search != nullptr); heuristics.policy_index = 0; heuristics.decision_policies.clear(); heuristics.restart_policies.clear(); @@ -491,7 +491,8 @@ void ConfigureSearchHeuristics( } else { decision_policy = SatSolverHeuristic(model); } - decision_policy = SequentialSearch({decision_policy, fixed_search}); + decision_policy = + SequentialSearch({decision_policy, heuristics.fixed_search}); decision_policy = IntegerValueSelectionHeuristic(decision_policy, model); heuristics.decision_policies = {decision_policy}; heuristics.restart_policies = {SatSolverRestartPolicy(model)}; @@ -500,8 +501,8 @@ void ConfigureSearchHeuristics( case SatParameters::FIXED_SEARCH: { // Not all Boolean might appear in fixed_search(), so once there is no // decision left, we fix all Booleans that are still undecided. - heuristics.decision_policies = { - SequentialSearch({fixed_search, SatSolverHeuristic(model)})}; + heuristics.decision_policies = {SequentialSearch( + {heuristics.fixed_search, SatSolverHeuristic(model)})}; if (parameters.randomize_search()) { heuristics.restart_policies = {SatSolverRestartPolicy(model)}; @@ -514,10 +515,20 @@ void ConfigureSearchHeuristics( heuristics.restart_policies = {no_restart}; return; } + case SatParameters::HINT_SEARCH: { + CHECK(heuristics.hint_search != nullptr); + heuristics.decision_policies = { + SequentialSearch({heuristics.hint_search, SatSolverHeuristic(model), + heuristics.fixed_search})}; + auto no_restart = []() { return false; }; + heuristics.restart_policies = {no_restart}; + return; + } case SatParameters::PORTFOLIO_SEARCH: { heuristics.decision_policies = CompleteHeuristics( - AddModelHeuristics({fixed_search}, model), - SequentialSearch({SatSolverHeuristic(model), fixed_search})); + AddModelHeuristics({heuristics.fixed_search}, model), + SequentialSearch( + {SatSolverHeuristic(model), heuristics.fixed_search})); for (auto& ref : heuristics.decision_policies) { ref = IntegerValueSelectionHeuristic(ref, model); } @@ -526,7 +537,6 @@ void ConfigureSearchHeuristics( return; } case SatParameters::LP_SEARCH: { - // Fill portfolio with pseudocost heuristics. std::vector> lp_heuristics; for (const auto& ct : *(model->GetOrCreate())) { @@ -534,28 +544,29 @@ void ConfigureSearchHeuristics( } if (lp_heuristics.empty()) { // Revert to fixed search. heuristics.decision_policies = {SequentialSearch( - {fixed_search, SatSolverHeuristic(model)})}, + {heuristics.fixed_search, SatSolverHeuristic(model)})}, heuristics.restart_policies = {SatSolverRestartPolicy(model)}; return; } heuristics.decision_policies = CompleteHeuristics( - lp_heuristics, - SequentialSearch({SatSolverHeuristic(model), fixed_search})); + lp_heuristics, SequentialSearch({SatSolverHeuristic(model), + heuristics.fixed_search})); heuristics.restart_policies.assign(heuristics.decision_policies.size(), SatSolverRestartPolicy(model)); return; } case SatParameters::PSEUDO_COST_SEARCH: { - std::function search = SequentialSearch( - {PseudoCost(model), SatSolverHeuristic(model), fixed_search}); + std::function search = + SequentialSearch({PseudoCost(model), SatSolverHeuristic(model), + heuristics.fixed_search}); heuristics.decision_policies = { IntegerValueSelectionHeuristic(search, model)}; heuristics.restart_policies = {SatSolverRestartPolicy(model)}; return; } case SatParameters::PORTFOLIO_WITH_QUICK_RESTART_SEARCH: { - std::function search = - SequentialSearch({RandomizeOnRestartHeuristic(model), fixed_search}); + std::function search = SequentialSearch( + {RandomizeOnRestartHeuristic(model), heuristics.fixed_search}); heuristics.decision_policies = {search}; heuristics.restart_policies = { RestartEveryKFailures(10, model->GetOrCreate())}; @@ -616,6 +627,13 @@ SatSolver::Status SolveIntegerProblem(Model* model) { // already done because EnqueueDecisionAndBackjumpOnConflict() assumes that // the solver is in a "propagated" state. SatSolver* const solver = model->GetOrCreate(); + + // TODO(user): We have the issue that at level zero. calling the propagation + // loop more than once can propagate more! This is because we call the LP + // again and again on each level zero propagation. This is causing some + // CHECKs() to fail in multithread (rarely) because when we associate new + // literals to integer ones, Propagate() is indirectly called. Not sure yet + // how to fix. if (!solver->FinishPropagation()) return solver->UnsatStatus(); // Create and initialize pseudo costs. diff --git a/ortools/sat/integer_search.h b/ortools/sat/integer_search.h index 574af47d98..2f6f1327ad 100644 --- a/ortools/sat/integer_search.h +++ b/ortools/sat/integer_search.h @@ -46,14 +46,18 @@ struct SearchHeuristics { // Index in the vector above that indicate the current configuration. int policy_index; + + // Two special decision functions that are constructed at loading time. + // These are used by ConfigureSearchHeuristics() to fill the policies above. + std::function fixed_search = nullptr; + std::function hint_search = nullptr; }; // Given a base "fixed_search" function that should mainly control in which // order integer variables are lazily instantiated (and at what value), this // uses the current solver parameters to set the SearchHeuristics class in the // given model. -void ConfigureSearchHeuristics( - const std::function& fixed_search, Model* model); +void ConfigureSearchHeuristics(Model* model); // For an optimization problem, this contains the internal integer objective // to minimize and information on how to display it correctly in the logs. @@ -62,6 +66,13 @@ struct ObjectiveDefinition { double offset = 0.0; IntegerVariable objective_var = kNoIntegerVariable; + // The objective linear expression that should be equal to objective_var. + // If not all proto variable have an IntegerVariable view, then some vars + // will be set to kNoIntegerVariable. In practice, when this is used, we make + // sure there is a view though. + std::vector vars; + std::vector coeffs; + // List of variable that when set to their lower bound should help getting a // better objective. This is used by some search heuristic to preferably // assign any of the variable here to their lower bound first. diff --git a/ortools/sat/sat_parameters.proto b/ortools/sat/sat_parameters.proto index 97879c9139..22f63aa4e1 100644 --- a/ortools/sat/sat_parameters.proto +++ b/ortools/sat/sat_parameters.proto @@ -301,6 +301,12 @@ message SatParameters { optional double max_deterministic_time = 67 [default = inf]; // Maximum number of conflicts allowed to solve a problem. + // + // TODO(user,user): Maybe change the way the conflict limit is enforced? + // currently it is enforced on each independent internal SAT solve, rather + // than on the overall number of conflicts across all solves. So in the + // context of an optimization problem, this is not really usable directly by a + // client. optional int64 max_number_of_conflicts = 37 [default = 0x7FFFFFFFFFFFFFFF]; // kint64max @@ -556,27 +562,33 @@ message SatParameters { AUTOMATIC_SEARCH = 0; // If used then all decisions taken by the solver are made using a fixed - // order as specified in the API or in the cp_model.proto search_order + // order as specified in the API or in the CpModelProto search_strategy // field. - // - // TODO(user): This is not working in all situation yet. The time limit is - // not really enforced properly, we don't support assumptions, and all the - // decisions variables must be integers. FIXED_SEARCH = 1; // If used, the solver will use various generic heuristics in turn. PORTFOLIO_SEARCH = 2; - // If used, the solver will use heuristics from the LP relaxation. + // If used, the solver will use heuristics from the LP relaxation. This + // exploit the reduced costs of the variables in the relaxation. + // + // TODO(user): Maybe rename REDUCED_COST_SEARCH? LP_SEARCH = 3; - // If used, the solver uses the pseudo costs for branching. + // If used, the solver uses the pseudo costs for branching. Pseudo costs + // are computed using the historical change in objective bounds when some + // decision are taken. PSEUDO_COST_SEARCH = 4; // Mainly exposed here for testing. This quickly tries a lot of randomized // heuristics with a low conflict limit. It usually provides a good first // solution. PORTFOLIO_WITH_QUICK_RESTART_SEARCH = 5; + + // Mainly used internally. This is like FIXED_SEARCH, except we follow the + // solution_hint field of the CpModelProto rather than using the information + // provided in the search_strategy. + HINT_SEARCH = 6; } optional SearchBranching search_branching = 82 [default = AUTOMATIC_SEARCH]; diff --git a/ortools/sat/subsolver.cc b/ortools/sat/subsolver.cc index b07102405a..38a9245f5b 100644 --- a/ortools/sat/subsolver.cc +++ b/ortools/sat/subsolver.cc @@ -11,11 +11,14 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "absl/time/clock.h" -#include "absl/synchronization/mutex.h" -#include "ortools/base/logging.h" #include "ortools/sat/subsolver.h" +#include "ortools/base/logging.h" + +#if !defined(__PORTABLE_PLATFORM__) +#include "absl/synchronization/mutex.h" +#include "absl/time/clock.h" +#endif // __PORTABLE_PLATFORM__ namespace operations_research { namespace sat { diff --git a/ortools/sat/subsolver.h b/ortools/sat/subsolver.h index 4bfb8fce96..59ed843d59 100644 --- a/ortools/sat/subsolver.h +++ b/ortools/sat/subsolver.h @@ -128,15 +128,15 @@ void SequentialLoop(const std::vector>& subsolvers); // Basic adaptive [0.0, 1.0] parameter that can be increased or decreased with a // step that get smaller and smaller with the number of updates. // -// Note(user): The current logic work well in practice, but has no theoretical -// foundation. So it might be possible to use better formulas depending on the -// situation. +// After a while, if the probability of getting a Decrease() vs Increase() when +// running at a given value is f(value), then this should converge towards a +// value such that f(value) = 0.5 provided f is a non-decreasing function over +// [0.0, 1.0]. // -// TODO(user): In multithread, we get Increase()/Decrease() signal from -// different thread potentially working on different difficulty. The class need -// to be updated to properly handle this case. Increase()/Decrease() should take -// in the difficulty at which the signal was computed, and the update formula -// should be changed accordingly. +// TODO(user): The current logic work well in practice, but has no strong +// theoretical foundation. We should be able to come up with a better understood +// formula that converge way faster. It will also be nice to generalize the 0.5 +// above to a target probability p. class AdaptiveParameterValue { public: // Initial value is in [0.0, 1.0], both 0.0 and 1.0 are valid. @@ -155,6 +155,21 @@ class AdaptiveParameterValue { value_ = std::max(value_ / factor, 1.0 - (1.0 - value_) * factor); } + // If we get more than 1 datapoints from the same value(), we use a formula + // that is more sound than calling n times Increase()/Decrease() which depends + // on the order of calls. + void Update(int num_decreases, int num_increases) { + if (num_decreases == num_increases) { + num_changes_ += num_decreases + num_increases; + } else if (num_decreases < num_increases) { + for (int i = num_decreases; i < num_increases; ++i) Increase(); + num_changes_ += 2 * num_decreases; + } else { + for (int i = num_increases; i < num_decreases; ++i) Decrease(); + num_changes_ += 2 * num_increases; + } + } + double value() const { return value_; } private: diff --git a/ortools/sat/synchronization.cc b/ortools/sat/synchronization.cc index a8bd0d7f9d..1f84379e5c 100644 --- a/ortools/sat/synchronization.cc +++ b/ortools/sat/synchronization.cc @@ -158,6 +158,7 @@ void SharedResponseManager::NotifyThatImprovingProblemIsInfeasible( best_response_.set_all_solutions_were_found(true); } } else { + CHECK_EQ(num_solutions_, 0); best_response_.set_status(CpSolverStatus::INFEASIBLE); } if (log_updates_) LogNewSatSolution("Done", wall_timer_.Get(), worker_info); @@ -173,6 +174,11 @@ IntegerValue SharedResponseManager::GetInnerObjectiveUpperBound() { return IntegerValue(inner_objective_upper_bound_); } +IntegerValue SharedResponseManager::BestSolutionInnerObjectiveValue() { + absl::MutexLock mutex_lock(&mutex_); + return IntegerValue(best_solution_objective_value_); +} + int SharedResponseManager::AddSolutionCallback( std::function callback) { absl::MutexLock mutex_lock(&mutex_); @@ -233,19 +239,9 @@ void SharedResponseManager::NewSolution(const CpSolverResponse& response, absl::MutexLock mutex_lock(&mutex_); CHECK_NE(best_response_.status(), CpSolverStatus::INFEASIBLE); - int64 objective_value = 0; if (model_proto_.has_objective()) { - const CpObjectiveProto& obj = model_proto_.objective(); - auto& repeated_field_values = response.solution().empty() - ? response.solution_lower_bounds() - : response.solution(); - for (int i = 0; i < obj.vars_size(); ++i) { - int64 coeff = obj.coeffs(i); - const int ref = obj.vars(i); - const int var = PositiveRef(ref); - if (!RefIsPositive(ref)) coeff = -coeff; - objective_value += coeff * repeated_field_values[var]; - } + const int64 objective_value = + ComputeInnerObjective(model_proto_.objective(), response); // Add this solution to the pool, even if it is not improving. if (!response.solution().empty()) { @@ -381,40 +377,39 @@ void SharedBoundsManager::ReportPotentialNewBounds( const std::vector& new_upper_bounds) { CHECK_EQ(variables.size(), new_lower_bounds.size()); CHECK_EQ(variables.size(), new_upper_bounds.size()); - { - absl::MutexLock mutex_lock(&mutex_); - for (int i = 0; i < variables.size(); ++i) { - const int var = variables[i]; - if (var >= num_variables_) continue; - const int64 old_lb = lower_bounds_[var]; - const int64 old_ub = upper_bounds_[var]; - const int64 new_lb = new_lower_bounds[i]; - const int64 new_ub = new_upper_bounds[i]; - const bool changed_lb = new_lb > old_lb; - const bool changed_ub = new_ub < old_ub; - CHECK_GE(var, 0); - if (!changed_lb && !changed_ub) continue; - if (changed_lb) { - lower_bounds_[var] = new_lb; - } - if (changed_ub) { - upper_bounds_[var] = new_ub; - } + absl::MutexLock mutex_lock(&mutex_); + for (int i = 0; i < variables.size(); ++i) { + const int var = variables[i]; + if (var >= num_variables_) continue; + const int64 old_lb = lower_bounds_[var]; + const int64 old_ub = upper_bounds_[var]; + const int64 new_lb = new_lower_bounds[i]; + const int64 new_ub = new_upper_bounds[i]; + const bool changed_lb = new_lb > old_lb; + const bool changed_ub = new_ub < old_ub; + CHECK_GE(var, 0); + if (!changed_lb && !changed_ub) continue; - for (int j = 0; j < num_workers_; ++j) { - if (worker_id == j) continue; - changed_variables_per_workers_[j].Set(var); - } - if (VLOG_IS_ON(2)) { - const IntegerVariableProto& var_proto = model_proto.variables(var); - const std::string& var_name = - var_proto.name().empty() ? absl::StrCat("anonymous_var(", var, ")") - : var_proto.name(); - LOG(INFO) << " '" << worker_name << "' exports new bounds for " - << var_name << ": from [" << old_lb << ", " << old_ub - << "] to [" << new_lb << ", " << new_ub << "]"; - } + if (changed_lb) { + lower_bounds_[var] = new_lb; + } + if (changed_ub) { + upper_bounds_[var] = new_ub; + } + + for (int j = 0; j < num_workers_; ++j) { + if (worker_id == j) continue; + changed_variables_per_workers_[j].Set(var); + } + if (VLOG_IS_ON(2)) { + const IntegerVariableProto& var_proto = model_proto.variables(var); + const std::string& var_name = + var_proto.name().empty() ? absl::StrCat("anonymous_var(", var, ")") + : var_proto.name(); + LOG(INFO) << " '" << worker_name << "' exports new bounds for " + << var_name << ": from [" << old_lb << ", " << old_ub + << "] to [" << new_lb << ", " << new_ub << "]"; } } } diff --git a/ortools/sat/synchronization.h b/ortools/sat/synchronization.h index 34a361d468..c9f54a9f81 100644 --- a/ortools/sat/synchronization.h +++ b/ortools/sat/synchronization.h @@ -56,7 +56,10 @@ class SharedTimeLimit { return time_limit_->LimitReached(); } - void Stop() { *stopped_ = true; } + void Stop() { + absl::MutexLock mutex_lock(&mutex_); + *stopped_ = true; + } void UpdateLocalLimit(TimeLimit* local_limit) { absl::MutexLock mutex_lock(&mutex_); @@ -65,10 +68,9 @@ class SharedTimeLimit { private: mutable absl::Mutex mutex_; - TimeLimit* time_limit_; - - std::atomic stopped_boolean_; - std::atomic* stopped_; + TimeLimit* time_limit_ GUARDED_BY(mutex_); + std::atomic stopped_boolean_ GUARDED_BY(mutex_); + std::atomic* stopped_ GUARDED_BY(mutex_); }; // Thread-safe. Keeps a set of n unique best solution found so far. @@ -127,8 +129,8 @@ class SharedSolutionRepository { // Our two solutions pools, the current one and the new one that will be // merged into the current one on each Synchronize() calls. - std::vector solutions_; - std::vector new_solutions_; + std::vector solutions_ GUARDED_BY(mutex_); + std::vector new_solutions_ GUARDED_BY(mutex_); }; // Manages the global best response kept by the solver. @@ -164,6 +166,10 @@ class SharedResponseManager { IntegerValue GetInnerObjectiveLowerBound(); IntegerValue GetInnerObjectiveUpperBound(); + // Returns the current best solution inner objective value or kInt64Max if + // there is no solution. + IntegerValue BestSolutionInnerObjectiveValue(); + // Updates the inner objective bounds. void UpdateInnerObjectiveBounds(const std::string& worker_info, IntegerValue lb, IntegerValue ub); @@ -205,8 +211,8 @@ class SharedResponseManager { SharedSolutionRepository* MutableSolutionsRepository() { return &solutions_; } private: - void FillObjectiveValuesInBestResponse(); - void SetStatsFromModelInternal(Model* model); + void FillObjectiveValuesInBestResponse() EXCLUSIVE_LOCKS_REQUIRED(mutex_); + void SetStatsFromModelInternal(Model* model) EXCLUSIVE_LOCKS_REQUIRED(mutex_); const bool log_updates_; const CpModelProto& model_proto_; @@ -214,17 +220,17 @@ class SharedResponseManager { mutable absl::Mutex mutex_; - CpSolverResponse best_response_; - SharedSolutionRepository solutions_; + CpSolverResponse best_response_ GUARDED_BY(mutex_); + SharedSolutionRepository solutions_ GUARDED_BY(mutex_); - int num_solutions_ = 0; - int64 inner_objective_lower_bound_ = kint64min; - int64 inner_objective_upper_bound_ = kint64max; - int64 best_solution_objective_value_ = kint64max; + int num_solutions_ GUARDED_BY(mutex_) = 0; + int64 inner_objective_lower_bound_ GUARDED_BY(mutex_) = kint64min; + int64 inner_objective_upper_bound_ GUARDED_BY(mutex_) = kint64max; + int64 best_solution_objective_value_ GUARDED_BY(mutex_) = kint64max; - int next_callback_id_ = 0; + int next_callback_id_ GUARDED_BY(mutex_) = 0; std::vector>> - callbacks_; + callbacks_ GUARDED_BY(mutex_); }; // This class manages a pool of lower and upper bounds on a set of variables in @@ -251,10 +257,13 @@ class SharedBoundsManager { private: const int num_workers_; const int num_variables_; - std::vector> changed_variables_per_workers_; - std::vector lower_bounds_; - std::vector upper_bounds_; + absl::Mutex mutex_; + + std::vector> changed_variables_per_workers_ + GUARDED_BY(mutex_); + std::vector lower_bounds_ GUARDED_BY(mutex_); + std::vector upper_bounds_ GUARDED_BY(mutex_); }; // Stores information on the worker in the parallel context.