Files
ortools-clone/ortools/sat/synchronization.cc

1677 lines
61 KiB
C++
Raw Normal View History

2025-01-10 11:35:44 +01:00
// Copyright 2010-2025 Google LLC
2018-12-17 16:50:15 +01:00
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "ortools/sat/synchronization.h"
#include <sys/types.h>
#include <algorithm>
2023-05-24 11:42:11 +02:00
#include <atomic>
#include <cctype>
#include <cmath>
#include <cstddef>
2021-03-04 18:26:01 +01:00
#include <cstdint>
#include <ctime>
#include <deque>
#include <functional>
2021-03-04 18:26:01 +01:00
#include <limits>
#include <memory>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
2021-03-04 18:26:01 +01:00
2025-12-18 13:05:33 +01:00
#include "absl/algorithm/container.h"
#include "ortools/base/logging.h"
#include "ortools/base/timer.h"
#include "ortools/sat/drat_checker.h"
#if !defined(__PORTABLE_PLATFORM__)
#include "ortools/base/helpers.h"
2022-10-05 15:56:33 +02:00
#include "ortools/base/options.h"
2020-10-22 23:36:58 +02:00
#endif // __PORTABLE_PLATFORM__
2025-07-21 17:25:33 +02:00
#include "absl/base/thread_annotations.h"
#include "absl/container/btree_map.h"
#include "absl/container/flat_hash_map.h"
2018-12-17 16:50:15 +01:00
#include "absl/container/flat_hash_set.h"
#include "absl/flags/flag.h"
2025-07-21 17:25:33 +02:00
#include "absl/hash/hash.h"
#include "absl/log/check.h"
2025-07-21 17:25:33 +02:00
#include "absl/log/log.h"
#include "absl/numeric/int128.h"
#include "absl/random/bit_gen_ref.h"
#include "absl/random/distributions.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "absl/types/span.h"
#include "ortools/algorithms/sparse_permutation.h"
#include "ortools/sat/cp_model.pb.h"
#include "ortools/sat/cp_model_utils.h"
2024-11-29 14:27:36 +01:00
#include "ortools/sat/integer_base.h"
#include "ortools/sat/model.h"
#include "ortools/sat/sat_parameters.pb.h"
#include "ortools/sat/symmetry_util.h"
#include "ortools/sat/util.h"
#include "ortools/util/bitset.h"
#include "ortools/util/logging.h"
#include "ortools/util/sorted_interval_list.h"
#include "ortools/util/strong_integers.h"
2018-12-17 16:50:15 +01:00
ABSL_FLAG(bool, cp_model_dump_solutions, false,
"DEBUG ONLY. If true, all the intermediate solution will be dumped "
"under '\"FLAGS_cp_model_dump_prefix\" + \"solution_xxx.pb.txt\"'.");
ABSL_FLAG(bool, cp_model_dump_tightened_models, false,
"DEBUG ONLY. If true, dump tightened models incoporating all bounds "
"changes under '\"FLAGS_cp_model_dump_prefix\" + "
"\"tight_model_xxx.pb.txt\"'.");
2018-12-17 16:50:15 +01:00
namespace operations_research {
namespace sat {
2025-07-21 17:25:33 +02:00
std::shared_ptr<const SharedSolutionRepository<int64_t>::Solution>
SharedSolutionPool::Add(SharedSolutionRepository<int64_t>::Solution solution) {
// Only add to the alternative path if it has the correct source id.
if (alternative_path_.num_solutions_to_keep() > 0 &&
solution.source_id == alternative_path_.source_id()) {
alternative_path_.Add(solution);
if (solution.rank < best_solutions_.GetBestRank()) {
VLOG(2) << "ALTERNATIVE WIN !";
}
}
// For now we only return a solution if it was stored in best_solutions_.
return best_solutions_.Add(std::move(solution));
}
void SharedSolutionPool::Synchronize(absl::BitGenRef random) {
// Update the "seeds" for the aternative path.
if (alternative_path_.num_solutions_to_keep() > 0) {
2025-09-22 17:24:20 +02:00
absl::MutexLock mutex_lock(mutex_);
2025-07-21 17:25:33 +02:00
auto process_solution =
[this](const SharedSolutionRepository<int64_t>::Solution& solution)
ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex_) {
if (solution.variable_values.empty()) return;
if (solution.rank < min_rank_ || solution.rank > max_rank_) {
// Recompute buckets.
min_rank_ = std::min(min_rank_, solution.rank);
max_rank_ = std::max(max_rank_, solution.rank);
// We want to store around 100 MB max.
int num_solutions = std::max<int>(
10, 100'000'000 / solution.variable_values.size());
const int64_t range = max_rank_ - min_rank_ + 1;
if (num_solutions > range) {
num_solutions = range;
}
// But if the number of variables is low, we do not want
// to use a lot of space/time just iterating over num_solutions.
//
// TODO(user): Rework the algo to be in
// O(num_different_solutions) rather than initializing the
// maximum amount right away.
num_solutions = std::min(num_solutions, 1'000);
// Resize and recompute rank_.
//
// seeds_[i] should contains solution in [ranks_[i],
// rank_[i+1]). rank_[0] is always min_rank_. As long as we have
// room, we should have exactly one bucket per rank.
ranks_.resize(num_solutions);
seeds_.resize(num_solutions);
int64_t offset = (max_rank_ - min_rank_ + 1) / num_solutions;
CHECK_GT(offset, 0);
for (int i = 0; i < num_solutions; ++i) {
ranks_[i] = min_rank_ +
static_cast<int64_t>(absl::int128(i) *
absl::int128(range) /
absl::int128(num_solutions));
}
// Move existing solutions to their new bucket.
int to_index = seeds_.size() - 1;
for (int i = seeds_.size(); --i >= 0;) {
if (seeds_[i] == nullptr) continue;
while (to_index >= 0 && ranks_[to_index] > seeds_[i]->rank) {
--to_index;
}
seeds_[to_index] = std::move(seeds_[i]);
}
}
// rank[limit] is the first > solution.rank.
const int limit = std::upper_bound(ranks_.begin(), ranks_.end(),
solution.rank) -
ranks_.begin();
CHECK_GT(limit, 0);
seeds_[limit - 1] =
std::make_shared<SharedSolutionRepository<int64_t>::Solution>(
solution);
};
// All solution go through best_solutions_.Add(), so we only need
// to process these here.
best_solutions_.Synchronize(process_solution);
} else {
best_solutions_.Synchronize();
}
alternative_path_.Synchronize();
// If we try to improve the alternate path without success, reset it
// from a random path_seeds_.
//
// TODO(user): find a way to generate random solution and update the seeds
// with them. Shall we do that in a continuous way or only when needed?
if (alternative_path_.num_solutions_to_keep() > 0) {
// Restart the alternative path ?
const int threshold = std::max(
100, static_cast<int>(std::sqrt(best_solutions_.num_queried())));
if (alternative_path_.NumRecentlyNonImproving() > threshold) {
VLOG(2) << "Done. num_non_improving: "
<< alternative_path_.NumRecentlyNonImproving()
<< " achieved: " << alternative_path_.GetBestRank() << " / "
<< best_solutions_.GetBestRank();
alternative_path_.ClearSolutionsAndIncreaseSourceId();
}
// If we restarted, or we are at the beginning, pick a seed for the path.
if (alternative_path_.NumSolutions() == 0) {
2025-09-22 17:24:20 +02:00
absl::MutexLock mutex_lock(mutex_);
2025-07-21 17:25:33 +02:00
// Pick random bucket with bias. If the bucket is empty, we will scan
// "worse" bucket until we find a solution. We never pick bucket 0.
if (seeds_.size() > 1) {
// Note that LogUniform() is always inclusive.
// TODO(user): Shall we bias even more?
int index = 1 + absl::LogUniform<int>(random, 0, seeds_.size() - 2);
for (; index < seeds_.size(); ++index) {
if (seeds_[index] != nullptr) {
alternative_path_.Add(*seeds_[index]);
alternative_path_.Synchronize();
VLOG(2) << "RESTART bucket=" << index << "/" << seeds_.size()
<< " rank=" << alternative_path_.GetSolution(0)->rank
<< " from_optimal="
<< alternative_path_.GetSolution(0)->rank - min_rank_;
break;
}
}
// The last bucket should never be empty.
CHECK(seeds_.back() != nullptr);
CHECK_LT(index, seeds_.size());
}
}
}
}
2020-10-22 23:36:58 +02:00
void SharedLPSolutionRepository::NewLPSolution(
std::vector<double> lp_solution) {
if (lp_solution.empty()) return;
// Add this solution to the pool.
auto solution =
std::make_shared<SharedSolutionRepository<double>::Solution>();
solution->variable_values = std::move(lp_solution);
2019-07-24 16:49:38 -07:00
// We always prefer to keep the solution from the last synchronize batch.
{
2025-09-22 17:24:20 +02:00
absl::MutexLock mutex_lock(mutex_);
solution->rank = -num_synchronization_;
++num_added_;
new_solutions_.push_back(solution);
}
}
2023-06-26 18:15:28 +02:00
void SharedIncompleteSolutionManager::AddSolution(
const std::vector<double>& lp_solution) {
2025-09-22 17:24:20 +02:00
absl::MutexLock mutex_lock(mutex_);
2023-06-26 18:15:28 +02:00
++num_added_;
solutions_.push_back(lp_solution);
if (solutions_.size() > 100) solutions_.pop_front();
}
bool SharedIncompleteSolutionManager::HasSolution() const {
2025-09-22 17:24:20 +02:00
absl::MutexLock mutex_lock(mutex_);
2020-06-05 16:11:35 +02:00
return !solutions_.empty();
}
2023-06-26 18:15:28 +02:00
std::vector<double> SharedIncompleteSolutionManager::PopLast() {
2025-09-22 17:24:20 +02:00
absl::MutexLock mutex_lock(mutex_);
2023-06-26 18:15:28 +02:00
if (solutions_.empty()) return {};
2020-06-05 16:11:35 +02:00
2023-06-26 18:15:28 +02:00
++num_queried_;
std::vector<double> solution = std::move(solutions_.back());
2020-06-05 16:11:35 +02:00
solutions_.pop_back();
return solution;
}
SharedResponseManager::SharedResponseManager(Model* model)
: parameters_(*model->GetOrCreate<SatParameters>()),
wall_timer_(*model->GetOrCreate<WallTimer>()),
shared_time_limit_(model->GetOrCreate<ModelSharedTimeLimit>()),
2025-07-21 17:25:33 +02:00
random_(model->GetOrCreate<ModelRandomGenerator>()),
solution_pool_(parameters_),
logger_(model->GetOrCreate<SolverLogger>()) {
bounds_logging_id_ = logger_->GetNewThrottledId();
}
2024-11-29 14:27:36 +01:00
void SharedResponseManager::FillSolveStatsInResponse(
Model* model, CpSolverResponse* response) {
if (model == nullptr) return;
2025-09-22 17:24:20 +02:00
absl::MutexLock mutex_lock(mutex_);
2024-11-29 14:27:36 +01:00
for (const auto& set_stats : statistics_postprocessors_) {
set_stats(model, response);
}
}
2024-02-01 22:13:47 +01:00
void SharedResponseManager::LogMessage(absl::string_view prefix,
absl::string_view message) {
2025-09-22 17:24:20 +02:00
absl::MutexLock mutex_lock(mutex_);
SOLVER_LOG(logger_, absl::StrFormat("#%-5s %6.2fs %s", prefix,
wall_timer_.Get(), message));
}
void SharedResponseManager::LogMessageWithThrottling(
absl::string_view prefix, absl::string_view message) {
2025-09-22 17:24:20 +02:00
absl::MutexLock mutex_lock(mutex_);
int id;
auto it = throttling_ids_.find(prefix);
if (it == throttling_ids_.end()) {
id = throttling_ids_[prefix] = logger_->GetNewThrottledId();
} else {
id = it->second;
2022-02-11 15:02:47 +01:00
}
logger_->ThrottledLog(id, absl::StrFormat("#%-5s %6.2fs %s", prefix,
wall_timer_.Get(), message));
}
2022-02-11 15:02:47 +01:00
bool SharedResponseManager::LoggingIsEnabled() const {
2025-09-22 17:24:20 +02:00
absl::MutexLock mutex_lock(mutex_);
return logger_->LoggingIsEnabled();
2022-02-11 15:02:47 +01:00
}
void SharedResponseManager::InitializeObjective(const CpModelProto& cp_model) {
if (cp_model.has_objective()) {
objective_or_null_ = &cp_model.objective();
const Domain domain = ReadDomainFromProto(cp_model.objective());
if (!domain.IsEmpty()) {
UpdateInnerObjectiveBounds("initial_domain", IntegerValue(domain.Min()),
IntegerValue(domain.Max()));
}
} else {
objective_or_null_ = nullptr;
}
}
void SharedResponseManager::SetSynchronizationMode(bool always_synchronize) {
2025-09-22 17:24:20 +02:00
absl::MutexLock mutex_lock(mutex_);
always_synchronize_ = always_synchronize;
}
void SharedResponseManager::SetUpdateGapIntegralOnEachChange(bool set) {
2025-09-22 17:24:20 +02:00
absl::MutexLock mutex_lock(mutex_);
2021-01-08 09:49:56 +01:00
update_integral_on_each_change_ = set;
}
void SharedResponseManager::UpdateGapIntegral() {
2025-09-22 17:24:20 +02:00
absl::MutexLock mutex_lock(mutex_);
UpdateGapIntegralInternal();
2021-01-08 09:49:56 +01:00
}
void SharedResponseManager::UpdateGapIntegralInternal() {
if (objective_or_null_ == nullptr) return;
const double current_time = shared_time_limit_->GetElapsedDeterministicTime();
const double time_delta = current_time - last_gap_integral_time_stamp_;
// We use the log of the absolute objective gap.
//
// Using the log should count no solution as just log(2*64) = 18, and
// otherwise just compare order of magnitude which seems nice. Also, It is
// more easy to compare the primal integral with the total time.
const CpObjectiveProto& obj = *objective_or_null_;
const double factor =
obj.scaling_factor() != 0.0 ? std::abs(obj.scaling_factor()) : 1.0;
2021-01-08 09:49:56 +01:00
const double bounds_delta = std::log(1 + factor * last_absolute_gap_);
gap_integral_ += time_delta * bounds_delta;
2021-01-08 09:49:56 +01:00
// Update with new value.
last_gap_integral_time_stamp_ = current_time;
2021-01-08 09:49:56 +01:00
last_absolute_gap_ =
std::max(0.0, static_cast<double>(inner_objective_upper_bound_) -
static_cast<double>(inner_objective_lower_bound_));
2019-08-06 15:56:48 -07:00
}
void SharedResponseManager::SetGapLimitsFromParameters(
const SatParameters& parameters) {
2025-09-22 17:24:20 +02:00
absl::MutexLock mutex_lock(mutex_);
if (objective_or_null_ == nullptr) return;
absolute_gap_limit_ = parameters.absolute_gap_limit();
relative_gap_limit_ = parameters.relative_gap_limit();
}
void SharedResponseManager::TestGapLimitsIfNeeded() {
2021-01-08 09:49:56 +01:00
// This is called on each internal limit change, so it is a good place to
// update the integral. Note that this is not called at the end of the search
// though.
if (update_integral_on_each_change_) UpdateGapIntegralInternal();
2021-01-08 09:49:56 +01:00
// Abort if there is not limit set, if the gap is not defined or if we already
// proved optimality or infeasibility.
2020-10-22 23:36:58 +02:00
if (absolute_gap_limit_ == 0 && relative_gap_limit_ == 0) return;
if (best_solution_objective_value_ >= kMaxIntegerValue) return;
if (inner_objective_lower_bound_ <= kMinIntegerValue) return;
if (inner_objective_lower_bound_ > inner_objective_upper_bound_) return;
const CpObjectiveProto& obj = *objective_or_null_;
const double user_best =
ScaleObjectiveValue(obj, best_solution_objective_value_);
const double user_bound =
ScaleObjectiveValue(obj, inner_objective_lower_bound_);
const double gap = std::abs(user_best - user_bound);
if (gap <= absolute_gap_limit_) {
SOLVER_LOG(logger_, "Absolute gap limit of ", absolute_gap_limit_,
" reached.");
2022-11-06 10:58:59 +01:00
UpdateBestStatus(CpSolverStatus::OPTIMAL);
// Note(user): Some code path in single-thread assumes that the problem
// can only be solved when they have proven infeasibility and do not check
// the ProblemIsSolved() method. So we force a stop here.
if (always_synchronize_) shared_time_limit_->Stop();
}
if (gap / std::max(1.0, std::abs(user_best)) < relative_gap_limit_) {
SOLVER_LOG(logger_, "Relative gap limit of ", relative_gap_limit_,
" reached.");
2022-11-06 10:58:59 +01:00
UpdateBestStatus(CpSolverStatus::OPTIMAL);
// Same as above.
if (always_synchronize_) shared_time_limit_->Stop();
}
}
void SharedResponseManager::UpdateInnerObjectiveBounds(
2021-02-09 14:06:14 +01:00
const std::string& update_info, IntegerValue lb, IntegerValue ub) {
2025-09-22 17:24:20 +02:00
absl::MutexLock mutex_lock(mutex_);
CHECK(objective_or_null_ != nullptr);
// The problem is already solved!
//
// TODO(user): A thread might not be notified right away that the new bounds
// that it is pushing make the problem infeasible. Fix that. For now we just
// abort early here to avoid logging the "#Done" message multiple times.
if (inner_objective_lower_bound_ > inner_objective_upper_bound_) {
return;
}
2024-04-29 19:04:09 +02:00
const bool ub_change = ub < inner_objective_upper_bound_;
const bool lb_change = lb > inner_objective_lower_bound_;
if (!lb_change && !ub_change) return;
2024-04-29 19:04:09 +02:00
if (lb_change) {
// When the improving problem is infeasible, it is possible to report
// arbitrary high inner_objective_lower_bound_. We make sure it never cross
// the current best solution, so that we always report globally valid lower
// bound.
DCHECK_LE(inner_objective_upper_bound_, best_solution_objective_value_);
inner_objective_lower_bound_ =
std::min(best_solution_objective_value_, lb.value());
}
2024-04-29 19:04:09 +02:00
if (ub_change) {
inner_objective_upper_bound_ = ub.value();
}
if (always_synchronize_) {
synchronized_inner_objective_lower_bound_ =
IntegerValue(inner_objective_lower_bound_);
synchronized_inner_objective_upper_bound_ =
IntegerValue(inner_objective_upper_bound_);
}
2025-11-05 13:55:12 +01:00
if (!status_change_callbacks_.empty()) {
SolverStatusChangeInfo info = GetSolverStatusChangeInfo();
info.change_info = update_info;
if (inner_objective_lower_bound_ > inner_objective_upper_bound_) {
info.solved = true;
} else {
info.new_lower_bound = lb_change;
info.new_upper_bound = ub_change;
}
for (const auto& callback : status_change_callbacks_) {
callback(info);
}
}
if (inner_objective_lower_bound_ > inner_objective_upper_bound_) {
if (best_status_ == CpSolverStatus::FEASIBLE ||
best_status_ == CpSolverStatus::OPTIMAL) {
2022-11-06 10:58:59 +01:00
UpdateBestStatus(CpSolverStatus::OPTIMAL);
} else {
2022-11-06 10:58:59 +01:00
UpdateBestStatus(CpSolverStatus::INFEASIBLE);
}
if (update_integral_on_each_change_) UpdateGapIntegralInternal();
return;
}
2025-11-05 13:55:12 +01:00
if (!best_bound_callbacks_.empty()) {
2024-04-29 19:04:09 +02:00
if (lb_change) {
2025-11-05 13:55:12 +01:00
const CpObjectiveProto& obj = *objective_or_null_;
double new_lb = ScaleObjectiveValue(obj, inner_objective_lower_bound_);
2024-04-29 16:28:43 +02:00
for (const auto& callback_entry : best_bound_callbacks_) {
callback_entry.second(new_lb);
}
}
}
TestGapLimitsIfNeeded();
}
// Invariant: the status always start at UNKNOWN and can only evolve as follow:
// UNKNOWN -> FEASIBLE -> OPTIMAL
// UNKNOWN -> INFEASIBLE
void SharedResponseManager::NotifyThatImprovingProblemIsInfeasible(
2025-08-06 10:54:53 +02:00
absl::string_view worker_info) {
2025-09-22 17:24:20 +02:00
absl::MutexLock mutex_lock(mutex_);
if (best_status_ == CpSolverStatus::FEASIBLE ||
best_status_ == CpSolverStatus::OPTIMAL) {
// We also use this status to indicate that we enumerated all solutions to
// a feasible problem.
2022-11-06 10:58:59 +01:00
UpdateBestStatus(CpSolverStatus::OPTIMAL);
2025-11-05 13:55:12 +01:00
// We just proved that the best solution cannot be improved upon, so we
// have a new lower bound.
inner_objective_lower_bound_ = best_solution_objective_value_;
if (update_integral_on_each_change_) UpdateGapIntegralInternal();
} else {
2019-07-03 13:10:22 +02:00
CHECK_EQ(num_solutions_, 0);
2022-11-06 10:58:59 +01:00
UpdateBestStatus(CpSolverStatus::INFEASIBLE);
}
2025-11-05 13:55:12 +01:00
if (!status_change_callbacks_.empty()) {
SolverStatusChangeInfo info = GetSolverStatusChangeInfo();
info.change_info = worker_info;
info.solved = true;
for (const auto& callback : status_change_callbacks_) {
callback(info);
}
}
}
void SharedResponseManager::AddUnsatCore(const std::vector<int>& core) {
2025-09-22 17:24:20 +02:00
absl::MutexLock mutex_lock(mutex_);
unsat_cores_ = core;
}
IntegerValue SharedResponseManager::GetInnerObjectiveLowerBound() {
2025-09-22 17:24:20 +02:00
absl::MutexLock mutex_lock(mutex_);
return synchronized_inner_objective_lower_bound_;
}
IntegerValue SharedResponseManager::GetInnerObjectiveUpperBound() {
2025-09-22 17:24:20 +02:00
absl::MutexLock mutex_lock(mutex_);
return synchronized_inner_objective_upper_bound_;
}
void SharedResponseManager::Synchronize() {
2025-07-21 17:25:33 +02:00
solution_pool_.Synchronize(*random_);
2025-09-22 17:24:20 +02:00
absl::MutexLock mutex_lock(mutex_);
synchronized_inner_objective_lower_bound_ =
IntegerValue(inner_objective_lower_bound_);
synchronized_inner_objective_upper_bound_ =
IntegerValue(inner_objective_upper_bound_);
synchronized_best_status_ = best_status_;
2025-07-21 17:25:33 +02:00
if (solution_pool_.BestSolutions().NumSolutions() > 0) {
first_solution_solvers_should_stop_ = true;
}
logger_->FlushPendingThrottledLogs();
}
2019-07-03 13:10:22 +02:00
IntegerValue SharedResponseManager::BestSolutionInnerObjectiveValue() {
2025-09-22 17:24:20 +02:00
absl::MutexLock mutex_lock(mutex_);
2019-07-03 13:10:22 +02:00
return IntegerValue(best_solution_objective_value_);
}
double SharedResponseManager::GapIntegral() const {
2025-09-22 17:24:20 +02:00
absl::MutexLock mutex_lock(mutex_);
return gap_integral_;
2019-08-06 15:56:48 -07:00
}
void SharedResponseManager::AddSolutionPostprocessor(
std::function<void(std::vector<int64_t>*)> postprocessor) {
2025-09-22 17:24:20 +02:00
absl::MutexLock mutex_lock(mutex_);
solution_postprocessors_.push_back(postprocessor);
}
void SharedResponseManager::AddResponsePostprocessor(
std::function<void(CpSolverResponse*)> postprocessor) {
2025-09-22 17:24:20 +02:00
absl::MutexLock mutex_lock(mutex_);
postprocessors_.push_back(postprocessor);
}
void SharedResponseManager::AddFinalResponsePostprocessor(
std::function<void(CpSolverResponse*)> postprocessor) {
2025-09-22 17:24:20 +02:00
absl::MutexLock mutex_lock(mutex_);
final_postprocessors_.push_back(postprocessor);
}
2024-11-29 14:27:36 +01:00
void SharedResponseManager::AddStatisticsPostprocessor(
std::function<void(Model*, CpSolverResponse*)> postprocessor) {
2025-09-22 17:24:20 +02:00
absl::MutexLock mutex_lock(mutex_);
2024-11-29 14:27:36 +01:00
statistics_postprocessors_.push_back(postprocessor);
}
int SharedResponseManager::AddSolutionCallback(
std::function<void(const CpSolverResponse&)> callback) {
2025-09-22 17:24:20 +02:00
absl::MutexLock mutex_lock(mutex_);
const int id = next_callback_id_++;
callbacks_.emplace_back(id, std::move(callback));
return id;
}
void SharedResponseManager::UnregisterCallback(int callback_id) {
2025-09-22 17:24:20 +02:00
absl::MutexLock mutex_lock(mutex_);
for (int i = 0; i < callbacks_.size(); ++i) {
if (callbacks_[i].first == callback_id) {
callbacks_.erase(callbacks_.begin() + i);
return;
}
}
LOG(DFATAL) << "Callback id " << callback_id << " not registered.";
}
int SharedResponseManager::AddLogCallback(
std::function<std::string(const CpSolverResponse&)> callback) {
2025-09-22 17:24:20 +02:00
absl::MutexLock mutex_lock(mutex_);
const int id = next_search_log_callback_id_++;
search_log_callbacks_.emplace_back(id, std::move(callback));
return id;
}
2025-11-05 13:55:12 +01:00
void SharedResponseManager::AddStatusChangeCallback(
std::function<void(const SolverStatusChangeInfo&)> callback) {
absl::MutexLock mutex_lock(mutex_);
status_change_callbacks_.push_back(std::move(callback));
}
void SharedResponseManager::UnregisterLogCallback(int callback_id) {
2025-09-22 17:24:20 +02:00
absl::MutexLock mutex_lock(mutex_);
for (int i = 0; i < search_log_callbacks_.size(); ++i) {
if (search_log_callbacks_[i].first == callback_id) {
search_log_callbacks_.erase(search_log_callbacks_.begin() + i);
return;
}
}
LOG(DFATAL) << "Callback id " << callback_id << " not registered.";
}
int SharedResponseManager::AddBestBoundCallback(
std::function<void(double)> callback) {
2025-09-22 17:24:20 +02:00
absl::MutexLock mutex_lock(mutex_);
const int id = next_best_bound_callback_id_++;
best_bound_callbacks_.emplace_back(id, std::move(callback));
return id;
}
void SharedResponseManager::UnregisterBestBoundCallback(int callback_id) {
2025-09-22 17:24:20 +02:00
absl::MutexLock mutex_lock(mutex_);
for (int i = 0; i < best_bound_callbacks_.size(); ++i) {
if (best_bound_callbacks_[i].first == callback_id) {
best_bound_callbacks_.erase(best_bound_callbacks_.begin() + i);
return;
}
}
LOG(DFATAL) << "Callback id " << callback_id << " not registered.";
}
CpSolverResponse SharedResponseManager::GetResponseInternal(
absl::Span<const int64_t> variable_values,
2025-07-21 17:25:33 +02:00
absl::string_view solution_info) {
CpSolverResponse result;
2023-01-04 14:20:17 +01:00
result.set_status(best_status_);
if (!unsat_cores_.empty()) {
DCHECK_EQ(best_status_, CpSolverStatus::INFEASIBLE);
result.mutable_sufficient_assumptions_for_infeasibility()->Assign(
unsat_cores_.begin(), unsat_cores_.end());
}
FillObjectiveValuesInResponse(&result);
result.set_solution_info(solution_info);
// Tricky: We copy the solution now for the case where MergeFrom() belows
// override it!
//
// TODO(user): Fix. This is messy, we should really just override stats not
// important things like solution or status with the MergeFrom() below.
if (best_status_ == CpSolverStatus::FEASIBLE ||
best_status_ == CpSolverStatus::OPTIMAL) {
result.mutable_solution()->Assign(variable_values.begin(),
variable_values.end());
}
// Note that we allow subsolver_responses_ to override the fields set above.
// That is the status, solution_info and objective values...
if (!subsolver_responses_.empty()) {
result.MergeFrom(subsolver_responses_.front());
}
if (result.status() == CpSolverStatus::FEASIBLE ||
result.status() == CpSolverStatus::OPTIMAL) {
// We need to copy the solution before we postsolve it.
std::vector<int64_t> solution(result.solution().begin(),
result.solution().end());
for (int i = solution_postprocessors_.size(); --i >= 0;) {
solution_postprocessors_[i](&solution);
}
result.mutable_solution()->Assign(solution.begin(), solution.end());
}
// Apply response postprocessor to set things like timing information.
for (int i = postprocessors_.size(); --i >= 0;) {
postprocessors_[i](&result);
}
return result;
}
CpSolverResponse SharedResponseManager::GetResponse() {
2025-09-22 17:24:20 +02:00
absl::MutexLock mutex_lock(mutex_);
CpSolverResponse result;
2025-07-21 17:25:33 +02:00
if (solution_pool_.BestSolutions().NumSolutions() == 0) {
result = GetResponseInternal({}, "");
} else {
std::shared_ptr<const SharedSolutionRepository<int64_t>::Solution>
2025-07-21 17:25:33 +02:00
solution = solution_pool_.BestSolutions().GetSolution(0);
result = GetResponseInternal(solution->variable_values, solution->info);
}
// If this is true, we postsolve and copy all of our solutions.
if (parameters_.fill_additional_solutions_in_response()) {
std::vector<int64_t> temp;
2025-07-21 17:25:33 +02:00
const int size = solution_pool_.BestSolutions().NumSolutions();
for (int i = 0; i < size; ++i) {
const auto solution = solution_pool_.BestSolutions().GetSolution(i);
temp = solution->variable_values;
for (int i = solution_postprocessors_.size(); --i >= 0;) {
solution_postprocessors_[i](&temp);
}
result.add_additional_solutions()->mutable_values()->Assign(temp.begin(),
temp.end());
}
}
// final postprocessors will print out the final log. They must be called
// last.
for (int i = final_postprocessors_.size(); --i >= 0;) {
final_postprocessors_[i](&result);
}
return result;
}
void SharedResponseManager::AppendResponseToBeMerged(
const CpSolverResponse& response) {
2025-09-22 17:24:20 +02:00
absl::MutexLock mutex_lock(mutex_);
return subsolver_responses_.push_back(response);
}
void SharedResponseManager::FillObjectiveValuesInResponse(
CpSolverResponse* response) const {
if (objective_or_null_ == nullptr) return;
const CpObjectiveProto& obj = *objective_or_null_;
if (best_status_ == CpSolverStatus::INFEASIBLE) {
response->clear_objective_value();
response->clear_best_objective_bound();
response->clear_inner_objective_lower_bound();
return;
}
// Set the objective value.
// If we don't have any solution, we use our inner bound.
if (best_status_ == CpSolverStatus::UNKNOWN) {
response->set_objective_value(
ScaleObjectiveValue(obj, inner_objective_upper_bound_));
} else {
response->set_objective_value(
ScaleObjectiveValue(obj, best_solution_objective_value_));
}
// Update the best bound in the response.
response->set_inner_objective_lower_bound(
ScaleInnerObjectiveValue(obj, inner_objective_lower_bound_));
response->set_best_objective_bound(
ScaleObjectiveValue(obj, inner_objective_lower_bound_));
// Update the primal integral.
response->set_gap_integral(gap_integral_);
}
std::shared_ptr<const SharedSolutionRepository<int64_t>::Solution>
SharedResponseManager::NewSolution(absl::Span<const int64_t> solution_values,
2025-07-21 17:25:33 +02:00
absl::string_view solution_info,
Model* model, int source_id) {
2025-09-22 17:24:20 +02:00
absl::MutexLock mutex_lock(mutex_);
std::shared_ptr<const SharedSolutionRepository<int64_t>::Solution> ret;
// For SAT problems, we add the solution to the solution pool for retrieval
// later.
if (objective_or_null_ == nullptr) {
SharedSolutionRepository<int64_t>::Solution solution;
2022-10-17 17:11:59 +02:00
solution.variable_values.assign(solution_values.begin(),
solution_values.end());
solution.info = solution_info;
2025-07-21 17:25:33 +02:00
solution.source_id = source_id;
ret = solution_pool_.Add(solution);
} else {
2021-03-04 18:26:01 +01:00
const int64_t objective_value =
2022-10-17 17:11:59 +02:00
ComputeInnerObjective(*objective_or_null_, solution_values);
// Add this solution to the pool, even if it is not improving.
SharedSolutionRepository<int64_t>::Solution solution;
solution.variable_values.assign(solution_values.begin(),
solution_values.end());
solution.rank = objective_value;
solution.info = solution_info;
2025-07-21 17:25:33 +02:00
solution.source_id = source_id;
ret = solution_pool_.Add(solution);
// Ignore any non-strictly improving solution.
if (objective_value > inner_objective_upper_bound_) return ret;
// Our inner_objective_lower_bound_ should be a globally valid bound, until
// the problem become infeasible (i.e the lb > ub) in which case the bound
// is no longer globally valid. Here, because we have a strictly improving
// solution, we shouldn't be in the infeasible setting yet.
DCHECK_GE(objective_value, inner_objective_lower_bound_);
2019-09-04 15:31:58 +02:00
DCHECK_LT(objective_value, best_solution_objective_value_);
best_solution_objective_value_ = objective_value;
// Update the new bound.
inner_objective_upper_bound_ = objective_value - 1;
}
// In single thread, no one is synchronizing the solution manager, so we
// should do it from here.
if (always_synchronize_) {
2025-07-21 17:25:33 +02:00
solution_pool_.Synchronize(*random_);
first_solution_solvers_should_stop_ = true;
}
// Note that the objective will be filled by
// FillObjectiveValuesInResponse().
if (objective_or_null_ == nullptr && !parameters_.enumerate_all_solutions()) {
2022-11-06 10:58:59 +01:00
UpdateBestStatus(CpSolverStatus::OPTIMAL);
} else {
2022-11-06 10:58:59 +01:00
UpdateBestStatus(CpSolverStatus::FEASIBLE);
}
// Mark model as OPTIMAL if the inner bound crossed.
if (objective_or_null_ != nullptr &&
inner_objective_lower_bound_ > inner_objective_upper_bound_) {
2022-11-06 10:58:59 +01:00
UpdateBestStatus(CpSolverStatus::OPTIMAL);
}
// Logging.
++num_solutions_;
// Compute the post-solved response once.
CpSolverResponse tmp_postsolved_response;
if ((!search_log_callbacks_.empty() && logger_->LoggingIsEnabled()) ||
!callbacks_.empty()) {
tmp_postsolved_response =
GetResponseInternal(solution_values, solution_info);
2024-11-29 14:27:36 +01:00
// Same as FillSolveStatsInResponse() but since we already hold the mutex...
if (model != nullptr && !statistics_postprocessors_.empty()) {
for (const auto& set_stats : statistics_postprocessors_) {
set_stats(model, &tmp_postsolved_response);
}
}
}
2025-11-05 13:55:12 +01:00
std::string solution_message(solution_info);
if (tmp_postsolved_response.num_booleans() > 0) {
absl::StrAppend(&solution_message, " (fixed_bools=",
tmp_postsolved_response.num_fixed_booleans(), "/",
tmp_postsolved_response.num_booleans(), ")");
}
2025-11-05 13:55:12 +01:00
if (logger_->LoggingIsEnabled() && !search_log_callbacks_.empty()) {
for (const auto& pair : search_log_callbacks_) {
absl::StrAppend(&solution_message, " ",
pair.second(tmp_postsolved_response));
}
2025-11-05 13:55:12 +01:00
}
2025-11-05 13:55:12 +01:00
if (!status_change_callbacks_.empty()) {
SolverStatusChangeInfo info = GetSolverStatusChangeInfo();
info.change_info = solution_message;
info.new_best_solution = true;
for (const auto& callback : status_change_callbacks_) {
callback(info);
}
}
// Call callbacks.
// Note that we cannot call function that try to get the mutex_ here.
TestGapLimitsIfNeeded();
for (const auto& pair : callbacks_) {
pair.second(tmp_postsolved_response);
}
#if !defined(__PORTABLE_PLATFORM__)
2020-06-11 10:01:48 +02:00
// We protect solution dumping with log_updates as LNS subsolvers share
// another solution manager, and we do not want to dump those.
2022-12-20 11:52:55 +01:00
if (logger_->LoggingIsEnabled() &&
absl::GetFlag(FLAGS_cp_model_dump_solutions)) {
const std::string file =
absl::StrCat(dump_prefix_, "solution_", num_solutions_, ".pb.txt");
LOG(INFO) << "Dumping solution to '" << file << "'.";
2022-12-20 11:52:55 +01:00
// Note that here we only want to dump the non-post-solved solution.
2022-12-20 11:52:55 +01:00
// This is only used for debugging.
CpSolverResponse response;
response.mutable_solution()->Assign(solution_values.begin(),
solution_values.end());
CHECK_OK(file::SetTextProto(file, response, file::Defaults()));
}
2020-10-22 23:36:58 +02:00
#endif // __PORTABLE_PLATFORM__
return ret;
}
bool SharedResponseManager::ProblemIsSolved() const {
2025-09-22 17:24:20 +02:00
absl::MutexLock mutex_lock(mutex_);
return synchronized_best_status_ == CpSolverStatus::OPTIMAL ||
synchronized_best_status_ == CpSolverStatus::INFEASIBLE;
}
2022-11-06 10:58:59 +01:00
void SharedResponseManager::UpdateBestStatus(const CpSolverStatus& status) {
best_status_ = status;
if (always_synchronize_) {
synchronized_best_status_ = status;
}
}
2025-11-05 13:55:12 +01:00
SolverStatusChangeInfo SharedResponseManager::GetSolverStatusChangeInfo() {
SolverStatusChangeInfo result;
if (objective_or_null_ == nullptr) return result;
const CpObjectiveProto& obj = *objective_or_null_;
const double best = ScaleObjectiveValue(obj, best_solution_objective_value_);
double lb = ScaleObjectiveValue(obj, inner_objective_lower_bound_);
double ub = ScaleObjectiveValue(obj, inner_objective_upper_bound_);
if (obj.scaling_factor() < 0) {
std::swap(lb, ub);
}
result.best_objective_value = best;
result.cur_objective_value_lb = lb;
result.cur_objective_value_ub = ub;
return result;
2021-02-09 14:06:14 +01:00
}
SharedBoundsManager::SharedBoundsManager(const CpModelProto& model_proto)
2020-10-22 23:36:58 +02:00
: num_variables_(model_proto.variables_size()),
model_proto_(model_proto),
2021-03-04 18:26:01 +01:00
lower_bounds_(num_variables_, std::numeric_limits<int64_t>::min()),
upper_bounds_(num_variables_, std::numeric_limits<int64_t>::max()),
synchronized_lower_bounds_(num_variables_,
std::numeric_limits<int64_t>::min()),
synchronized_upper_bounds_(num_variables_,
std::numeric_limits<int64_t>::max()) {
changed_variables_since_last_synchronize_.ClearAndResize(num_variables_);
2019-04-10 19:09:50 -07:00
for (int i = 0; i < num_variables_; ++i) {
lower_bounds_[i] = model_proto.variables(i).domain(0);
const int domain_size = model_proto.variables(i).domain_size();
upper_bounds_[i] = model_proto.variables(i).domain(domain_size - 1);
synchronized_lower_bounds_[i] = lower_bounds_[i];
synchronized_upper_bounds_[i] = upper_bounds_[i];
2019-04-10 19:09:50 -07:00
}
// Fill symmetry data.
if (model_proto.has_symmetry()) {
const int num_vars = model_proto.variables().size();
std::vector<std::unique_ptr<SparsePermutation>> generators;
for (const SparsePermutationProto& perm :
model_proto.symmetry().permutations()) {
generators.emplace_back(CreateSparsePermutationFromProto(num_vars, perm));
}
if (generators.empty()) return;
// Get orbits in term of IntegerVariable.
var_to_orbit_index_ = GetOrbits(num_vars, generators);
// Fill orbits_.
std::vector<int> keys;
std::vector<int> values;
for (int var = 0; var < num_vars; ++var) {
const int orbit_index = var_to_orbit_index_[var];
if (orbit_index == -1) continue;
keys.push_back(orbit_index);
values.push_back(var);
}
if (keys.empty()) return;
has_symmetry_ = true;
orbits_.ResetFromFlatMapping(keys, values);
// Fill representative.
var_to_representative_.resize(num_vars);
for (int var = 0; var < num_vars; ++var) {
const int orbit_index = var_to_orbit_index_[var];
if (orbit_index == -1) {
var_to_representative_[var] = var;
} else {
var_to_representative_[var] = orbits_[orbit_index][0];
}
}
}
2018-12-17 16:50:15 +01:00
}
void SharedBoundsManager::ReportPotentialNewBounds(
const std::string& worker_name, absl::Span<const int> variables,
absl::Span<const int64_t> new_lower_bounds,
absl::Span<const int64_t> new_upper_bounds) {
2018-12-17 16:50:15 +01:00
CHECK_EQ(variables.size(), new_lower_bounds.size());
CHECK_EQ(variables.size(), new_upper_bounds.size());
int num_improvements = 0;
int num_symmetric_improvements = 0;
2025-09-22 17:24:20 +02:00
absl::MutexLock mutex_lock(mutex_);
2019-07-03 13:10:22 +02:00
for (int i = 0; i < variables.size(); ++i) {
int var = variables[i];
2020-10-22 23:36:58 +02:00
if (var >= num_variables_) continue;
// In the presence of symmetry we only update the representative.
if (has_symmetry_) {
var = var_to_representative_[var];
}
2021-03-04 18:26:01 +01:00
const int64_t old_lb = lower_bounds_[var];
const int64_t old_ub = upper_bounds_[var];
const int64_t new_lb = new_lower_bounds[i];
const int64_t new_ub = new_upper_bounds[i];
2019-07-03 13:10:22 +02:00
const bool changed_lb = new_lb > old_lb;
const bool changed_ub = new_ub < old_ub;
2020-10-22 23:36:58 +02:00
if (!changed_lb && !changed_ub) continue;
2019-07-03 13:10:22 +02:00
VLOG(3) << worker_name << " var=" << var << " [" << old_lb << "," << old_ub
<< "] -> [" << new_lb << "," << new_ub << "]";
2019-07-03 13:10:22 +02:00
if (changed_lb) {
if (DEBUG_MODE && !debug_solution_.empty()) {
CHECK_LE(new_lb, debug_solution_[var]) << worker_name << " var=" << var;
}
2019-07-03 13:10:22 +02:00
lower_bounds_[var] = new_lb;
}
if (changed_ub) {
if (DEBUG_MODE && !debug_solution_.empty()) {
CHECK_GE(new_ub, debug_solution_[var]) << worker_name << " var=" << var;
}
2019-07-03 13:10:22 +02:00
upper_bounds_[var] = new_ub;
}
changed_variables_since_last_synchronize_.Set(var);
num_improvements++;
if (has_symmetry_ && variables[i] != var) {
// We count -1 so that num_improvements + num_symmetric_improvements
// corresponds to the number of actual bound improvement.
num_symmetric_improvements +=
orbits_[var_to_orbit_index_[var]].size() - 1;
}
2018-12-17 16:50:15 +01:00
}
2020-11-19 18:12:18 +01:00
if (num_improvements > 0) {
total_num_improvements_ += num_improvements;
VLOG(3) << total_num_improvements_ << "/" << num_variables_;
bounds_exported_[worker_name].num_exported += num_improvements;
bounds_exported_[worker_name].num_symmetric += num_symmetric_improvements;
if (absl::GetFlag(FLAGS_cp_model_dump_tightened_models)) {
CpModelProto tight_model = model_proto_;
for (int i = 0; i < num_variables_; ++i) {
IntegerVariableProto* var_proto = tight_model.mutable_variables(i);
int rep = i;
if (has_symmetry_) rep = var_to_representative_[i];
const Domain domain = ReadDomainFromProto(*var_proto)
.IntersectionWith(Domain(lower_bounds_[rep],
upper_bounds_[rep]));
FillDomainInProto(domain, var_proto);
}
const std::string filename = absl::StrCat(dump_prefix_, "tighened_model_",
export_counter_, ".pb.txt");
LOG(INFO) << "Dumping tightened model proto to '" << filename << "'.";
export_counter_++;
CHECK(WriteModelProtoToFile(tight_model, filename));
}
2020-11-19 18:12:18 +01:00
}
2018-12-17 16:50:15 +01:00
}
// TODO(user): Because we look at the non-synchronized and up to date bounds,
// this break determinism if two solution for the same subpart comes at the same
// time.
void SharedBoundsManager::FixVariablesFromPartialSolution(
2024-11-15 14:40:47 +01:00
absl::Span<const int64_t> solution,
absl::Span<const int> variables_to_fix) {
// This function shouldn't be called if we has symmetry.
CHECK(!has_symmetry_);
2025-09-22 17:24:20 +02:00
absl::MutexLock mutex_lock(mutex_);
// Abort if incompatible. Note that we only check the position that we are
// about to fix. This should be enough. Otherwise we might never accept any
// solution because the base LNS solution was not the same in some of the
// variables that we fixed here.
for (const int var : variables_to_fix) {
const int64_t value = solution[var];
if (value < lower_bounds_[var] || value > upper_bounds_[var]) {
VLOG(1) << "Incompatibility in FixVariablesFromPartialSolution() "
<< "var: " << var << " value: " << value << " bounds: ["
<< lower_bounds_[var] << "," << upper_bounds_[var] << "]";
return;
}
}
// Fix the variables.
for (const int var : variables_to_fix) {
const int64_t old_lb = lower_bounds_[var];
const int64_t old_ub = upper_bounds_[var];
const bool changed_lb = solution[var] > old_lb;
const bool changed_ub = solution[var] < old_ub;
if (!changed_lb && !changed_ub) continue;
lower_bounds_[var] = solution[var];
upper_bounds_[var] = solution[var];
changed_variables_since_last_synchronize_.Set(var);
// This is problematic as we might find a different partial solution.
// To allow for further investigation, we currently fix it to the debug
// solution instead.
if (DEBUG_MODE && !debug_solution_.empty()) {
if (solution[var] != debug_solution_[var]) {
LOG(INFO) << "Fixing to a different solution for var=" << var
<< " debug=" << debug_solution_[var]
<< " partial=" << solution[var];
lower_bounds_[var] = debug_solution_[var];
upper_bounds_[var] = debug_solution_[var];
}
}
}
}
void SharedBoundsManager::Synchronize() {
2025-09-22 17:24:20 +02:00
absl::MutexLock mutex_lock(mutex_);
for (const int var :
changed_variables_since_last_synchronize_.PositionsSetAtLeastOnce()) {
DCHECK(!has_symmetry_ || var_to_representative_[var] == var);
synchronized_lower_bounds_[var] = lower_bounds_[var];
synchronized_upper_bounds_[var] = upper_bounds_[var];
for (int j = 0; j < id_to_changed_variables_.size(); ++j) {
id_to_changed_variables_[j].Set(var);
}
}
2025-03-07 10:33:36 +01:00
changed_variables_since_last_synchronize_.ResetAllToFalse();
}
int SharedBoundsManager::RegisterNewId() {
2025-09-22 17:24:20 +02:00
absl::MutexLock mutex_lock(mutex_);
const int id = id_to_changed_variables_.size();
id_to_changed_variables_.resize(id + 1);
id_to_changed_variables_[id].ClearAndResize(num_variables_);
for (int var = 0; var < num_variables_; ++var) {
2021-03-04 18:26:01 +01:00
const int64_t lb = model_proto_.variables(var).domain(0);
const int domain_size = model_proto_.variables(var).domain_size();
2021-03-04 18:26:01 +01:00
const int64_t ub = model_proto_.variables(var).domain(domain_size - 1);
if (lb != synchronized_lower_bounds_[var] ||
ub != synchronized_upper_bounds_[var]) {
DCHECK(!has_symmetry_ || var_to_representative_[var] == var);
id_to_changed_variables_[id].Set(var);
}
}
return id;
}
2020-10-22 23:36:58 +02:00
void SharedBoundsManager::GetChangedBounds(
2021-03-04 18:26:01 +01:00
int id, std::vector<int>* variables, std::vector<int64_t>* new_lower_bounds,
std::vector<int64_t>* new_upper_bounds) {
2018-12-17 16:50:15 +01:00
variables->clear();
new_lower_bounds->clear();
new_upper_bounds->clear();
{
2025-09-22 17:24:20 +02:00
absl::MutexLock mutex_lock(mutex_);
for (const int var :
id_to_changed_variables_[id].PositionsSetAtLeastOnce()) {
DCHECK(!has_symmetry_ || var_to_representative_[var] == var);
variables->push_back(var);
}
2025-03-07 10:33:36 +01:00
id_to_changed_variables_[id].ResetAllToFalse();
// We need to report the bounds in a deterministic order as it is difficult
// to guarantee that nothing depend on the order in which the new bounds are
// processed.
absl::c_sort(*variables);
for (const int var : *variables) {
new_lower_bounds->push_back(synchronized_lower_bounds_[var]);
new_upper_bounds->push_back(synchronized_upper_bounds_[var]);
}
}
// Now that the mutex is released, we can add all symmetric version if any.
// Note that alternatively we could do that in the client side, but the
// complexity will be the same, we will just save some memory that is usually
// just reused.
if (has_symmetry_) {
const int old_size = variables->size();
for (int i = 0; i < old_size; ++i) {
const int var = (*variables)[i];
const int orbit_index = var_to_orbit_index_[var];
if (orbit_index == -1) continue;
const int64_t lb = (*new_lower_bounds)[i];
const int64_t ub = (*new_upper_bounds)[i];
const auto orbit = orbits_[orbit_index];
CHECK_EQ(var, orbit[0]);
for (const int other : orbit.subspan(1)) {
variables->push_back(other);
new_lower_bounds->push_back(lb);
new_upper_bounds->push_back(ub);
}
}
2018-12-17 16:50:15 +01:00
}
}
void SharedBoundsManager::UpdateDomains(std::vector<Domain>* domains) {
2025-09-22 17:24:20 +02:00
absl::MutexLock mutex_lock(mutex_);
CHECK_EQ(domains->size(), synchronized_lower_bounds_.size());
for (int var = 0; var < domains->size(); ++var) {
(*domains)[var] = (*domains)[var].IntersectionWith(Domain(
synchronized_lower_bounds_[var], synchronized_upper_bounds_[var]));
}
}
void SharedBoundsManager::LogStatistics(SolverLogger* logger) {
2025-09-22 17:24:20 +02:00
absl::MutexLock mutex_lock(mutex_);
if (!bounds_exported_.empty()) {
std::vector<std::vector<std::string>> table;
table.push_back({"Improving bounds shared", "Num", "Sym"});
for (const auto& entry : bounds_exported_) {
table.push_back({FormatName(entry.first),
FormatCounter(entry.second.num_exported),
FormatCounter(entry.second.num_symmetric)});
}
SOLVER_LOG(logger, FormatTable(table));
}
}
int SharedBoundsManager::NumBoundsExported(absl::string_view worker_name) {
2025-09-22 17:24:20 +02:00
absl::MutexLock mutex_lock(mutex_);
const auto it = bounds_exported_.find(worker_name);
if (it == bounds_exported_.end()) return 0;
return it->second.num_exported;
}
UniqueClauseStream::UniqueClauseStream() {
for (auto& buffer : clauses_by_size_) {
buffer.reserve(kMaxLiteralsPerBatch);
}
fingerprints_.reserve(kMaxFingerprints);
}
bool UniqueClauseStream::Add(absl::Span<const int> clause, int lbd) {
if (!BlockClause(clause) || lbd > lbd_threshold_) return false;
std::vector<int>* buffer = MutableBufferForSize(clause.size());
CHECK_NE(buffer, nullptr);
if (buffer->size() + clause.size() <= kMaxLiteralsPerBatch) {
buffer->insert(buffer->end(), clause.begin(), clause.end());
} else {
// Maybe replace an old buffered clause of the same size if it has a smaller
// hash value. This means that the buffer will contain a deterministic
// sample of the clauses added independent of insertion order.
const int64_t replaced_clause_id =
HashClause(clause, 1) % NumClausesOfSize(clause.size());
absl::Span<int> replaced_clause = absl::MakeSpan(*buffer).subspan(
replaced_clause_id * clause.size(), clause.size());
dropped_literals_since_last_batch_ += clause.size();
2025-12-18 13:05:33 +01:00
if (HashClause(clause, 2) >= HashClause(replaced_clause, 2)) return false;
absl::c_copy(clause, replaced_clause.begin());
}
return true;
}
bool UniqueClauseStream::BlockClause(absl::Span<const int> clause) {
if (clause.size() > kMaxClauseSize) return false;
if (clause.size() <= 2) return false;
const auto hash = HashClause(clause);
return fingerprints_.emplace(hash).second &&
!old_fingerprints_.contains(hash);
}
CompactVectorVector<int> UniqueClauseStream::NextBatch() {
CompactVectorVector<int> batch;
batch.reserve(kMaxLiteralsPerBatch / kMinClauseSize, kMaxLiteralsPerBatch);
int to_fill = kMaxLiteralsPerBatch;
for (int size = kMinClauseSize; size <= kMaxClauseSize; ++size) {
CHECK_EQ(NumLiteralsOfSize(size) % size, 0);
std::vector<int>* buffer = MutableBufferForSize(size);
while (to_fill >= size && !buffer->empty()) {
batch.Add(NextClause(size));
to_fill -= size;
PopClause(size);
}
if (to_fill < size) {
dropped_literals_since_last_batch_ += buffer->size();
buffer->clear();
}
}
if (fingerprints_.size() >= kMaxFingerprints / 2) {
VLOG(2) << "Clearing fingerprints: " << fingerprints_.size() / 1024 << "Ki";
std::swap(fingerprints_, old_fingerprints_);
fingerprints_.clear();
fingerprints_.reserve(kMaxFingerprints);
}
if (to_fill > kMaxLiteralsPerBatch / 2 && lbd_threshold_ < kMaxLbd) {
lbd_threshold_ += 1;
VLOG(2) << "Inc lbd: " << lbd_threshold_;
} else if (dropped_literals_since_last_batch_ > 0 &&
lbd_threshold_ > kMinLbd) {
lbd_threshold_ -= 1;
VLOG(2) << "Dec lbd: " << lbd_threshold_;
}
dropped_literals_since_last_batch_ = 0;
return batch;
}
int UniqueClauseStream::NumBufferedLiterals() const {
int result = 0;
for (const auto& buffer : clauses_by_size_) {
result += buffer.size();
}
return result;
}
size_t UniqueClauseStream::HashClause(absl::Span<const int> clause,
size_t hash_seed) {
size_t hash = absl::HashOf(hash_seed, clause.size());
for (int i = 0; i < clause.size(); ++i) {
hash ^= absl::HashOf(clause[i], hash_seed);
}
return hash;
}
absl::Span<const int> UniqueClauseStream::NextClause(int size) const {
absl::Span<const int> buffer = BufferForSize(size);
return buffer.subspan(buffer.size() - size, size);
}
void UniqueClauseStream::PopClause(int size) {
std::vector<int>* buffer = MutableBufferForSize(size);
buffer->erase(buffer->end() - size, buffer->end());
}
int UniqueClauseStream::NumClausesOfSize(int size) const {
return NumLiteralsOfSize(size) / size;
}
int UniqueClauseStream::NumLiteralsOfSize(int size) const {
return BufferForSize(size).size();
}
SharedClausesManager::SharedClausesManager(bool always_synchronize)
: always_synchronize_(always_synchronize) {}
2025-07-21 17:25:33 +02:00
int SharedClausesManager::RegisterNewId(absl::string_view worker_name,
bool may_terminate_early) {
2025-09-22 17:24:20 +02:00
absl::MutexLock mutex_lock(mutex_);
num_full_workers_ += may_terminate_early ? 0 : 1;
const int id = id_to_last_processed_binary_clause_.size();
id_to_last_processed_binary_clause_.resize(id + 1, 0);
id_to_last_returned_batch_.resize(id + 1, -1);
id_to_last_finished_batch_.resize(id + 1, -1);
id_to_num_imported_.resize(id + 1, 0);
2025-07-21 17:25:33 +02:00
id_to_num_exported_.resize(id + 1, 0);
id_to_worker_name_.resize(id + 1);
id_to_worker_name_[id] = worker_name;
return id;
}
2025-07-21 17:25:33 +02:00
int SharedLinear2Bounds::RegisterNewId(std::string worker_name) {
2025-09-22 17:24:20 +02:00
absl::MutexLock mutex_lock(mutex_);
2025-07-21 17:25:33 +02:00
const int id = id_to_worker_name_.size();
id_to_stats_.resize(id + 1);
id_to_worker_name_.resize(id + 1);
id_to_worker_name_[id] = worker_name;
2025-07-21 17:25:33 +02:00
return id;
}
bool SharedClausesManager::ShouldReadBatch(int reader_id, int writer_id) {
return reader_id != writer_id;
}
void SharedClausesManager::AddBinaryClause(int id, int lit1, int lit2) {
if (lit2 < lit1) std::swap(lit1, lit2);
const auto p = std::make_pair(lit1, lit2);
2025-09-22 17:24:20 +02:00
absl::MutexLock mutex_lock(mutex_);
const auto [unused_it, inserted] = added_binary_clauses_set_.insert(p);
if (inserted) {
added_binary_clauses_.push_back(p);
if (always_synchronize_) ++last_visible_binary_clause_;
2025-07-21 17:25:33 +02:00
id_to_num_exported_[id]++;
// Small optim. If the worker is already up to date with clauses to import,
// we can mark this new clause as already seen.
if (id_to_last_processed_binary_clause_[id] ==
added_binary_clauses_.size() - 1) {
id_to_last_processed_binary_clause_[id]++;
}
}
}
void SharedClausesManager::AddBatch(int id, CompactVectorVector<int> batch) {
2025-09-22 17:24:20 +02:00
absl::MutexLock mutex_lock(mutex_);
2025-07-21 17:25:33 +02:00
id_to_num_exported_[id] += batch.size();
pending_batches_.push_back(std::move(batch));
}
const CompactVectorVector<int>& SharedClausesManager::GetUnseenClauses(int id) {
std::vector<absl::Span<const int>> result;
{
2025-09-22 17:24:20 +02:00
absl::MutexLock mutex_lock(mutex_);
id_to_last_finished_batch_[id] = id_to_last_returned_batch_[id];
if (id_to_last_returned_batch_[id] + 1 < batches_.size()) {
id_to_last_returned_batch_[id] += 1;
return batches_[id_to_last_returned_batch_[id]];
}
}
static CompactVectorVector<int>* const empty_batch =
new CompactVectorVector<int>();
return *empty_batch;
}
void SharedClausesManager::GetUnseenBinaryClauses(
int id, std::vector<std::pair<int, int>>* new_clauses) {
new_clauses->clear();
2025-09-22 17:24:20 +02:00
absl::MutexLock mutex_lock(mutex_);
const int last_binary_clause_seen = id_to_last_processed_binary_clause_[id];
if (last_binary_clause_seen >= last_visible_binary_clause_) return;
new_clauses->assign(
added_binary_clauses_.begin() + last_binary_clause_seen,
added_binary_clauses_.begin() + last_visible_binary_clause_);
id_to_last_processed_binary_clause_[id] = last_visible_binary_clause_;
}
void SharedClausesManager::NotifyNumImported(int id, int64_t num_imported) {
absl::MutexLock mutex_lock(mutex_);
id_to_num_imported_[id] += num_imported;
}
void SharedClausesManager::LogStatistics(SolverLogger* logger) {
2025-09-22 17:24:20 +02:00
absl::MutexLock mutex_lock(mutex_);
std::vector<std::tuple<std::string, int64_t, int64_t, int64_t, int64_t>>
name_to_table_line;
2025-07-21 17:25:33 +02:00
for (int id = 0; id < id_to_num_exported_.size(); ++id) {
name_to_table_line.push_back(
{id_to_worker_name_[id], id_to_num_exported_[id],
id_to_num_imported_[id], id_to_last_processed_binary_clause_[id],
last_visible_binary_clause_});
}
2025-07-21 17:25:33 +02:00
if (!name_to_table_line.empty()) {
absl::c_sort(name_to_table_line);
std::vector<std::vector<std::string>> table;
table.push_back({"Clauses shared", "#Exported", "#Imported", "#BinaryRead",
"#BinaryTotal"});
for (const auto& [name, exported, imported, binary_read, binary_total] :
name_to_table_line) {
table.push_back({FormatName(name), FormatCounter(exported),
FormatCounter(imported), FormatCounter(binary_read),
FormatCounter(binary_total)});
2025-07-21 17:25:33 +02:00
}
SOLVER_LOG(logger, FormatTable(table));
}
}
// TODO(user): Add some library to simplify this "transposition". Ideally we
// could merge small table with few columns. I am thinking list (row_name,
// col_name, count) + function that create table?
void SharedLinear2Bounds::LogStatistics(SolverLogger* logger) {
2025-09-22 17:24:20 +02:00
absl::MutexLock mutex_lock(mutex_);
2025-07-21 17:25:33 +02:00
absl::btree_map<std::string, Stats> name_to_table_line;
for (int id = 0; id < id_to_stats_.size(); ++id) {
const Stats stats = id_to_stats_[id];
if (!stats.empty()) {
name_to_table_line[id_to_worker_name_[id]] = stats;
}
}
for (int import_id = 0; import_id < import_id_to_index_.size(); ++import_id) {
name_to_table_line[import_id_to_name_[import_id]].num_imported =
import_id_to_num_imported_[import_id];
}
if (!name_to_table_line.empty()) {
std::vector<std::vector<std::string>> table;
table.push_back({"Linear2 shared", "New", "Updated", "Imported"});
for (const auto& [name, stats] : name_to_table_line) {
table.push_back({FormatName(name), FormatCounter(stats.num_new),
FormatCounter(stats.num_update),
FormatCounter(stats.num_imported)});
}
SOLVER_LOG(logger, FormatTable(table));
}
}
void SharedClausesManager::Synchronize() {
std::vector<CompactVectorVector<int>> batches_to_merge;
{
2025-09-22 17:24:20 +02:00
absl::MutexLock mutex_lock(mutex_);
last_visible_binary_clause_ = added_binary_clauses_.size();
const int num_workers = id_to_last_processed_binary_clause_.size();
if (num_workers <= 1) return;
if (pending_batches_.size() >= num_full_workers_) {
batches_to_merge = std::move(pending_batches_);
}
// Delete batches that have been consumed by all workers.
// Keep a few batches around for startup (min finished batch doesn't count
// workers that haven't registered yet).
if (batches_.size() > kMinBatches) {
const int min_finished_batch =
std::min<int>(batches_.size() - kMinBatches,
*absl::c_min_element(id_to_last_finished_batch_) + 1);
for (int i = 0; i < min_finished_batch; ++i) {
VLOG(2) << "Erasing batch";
batches_.pop_front();
}
for (int id = 0; id < id_to_last_finished_batch_.size(); ++id) {
id_to_last_returned_batch_[id] -= min_finished_batch;
id_to_last_finished_batch_[id] -= min_finished_batch;
}
}
// TODO(user): We could cleanup binary clauses that have been consumed.
}
if (batches_to_merge.empty()) return;
UniqueClauseStream next_batch;
for (const auto& batch : batches_to_merge) {
for (int i = 0; i < batch.size(); ++i) {
next_batch.Add(batch[i]);
}
}
if (next_batch.NumBufferedLiterals() > 0) {
2025-09-22 17:24:20 +02:00
absl::MutexLock mutex_lock(mutex_);
VLOG(2) << "Merging batch";
batches_.push_back(next_batch.NextBatch());
}
}
2025-07-21 17:25:33 +02:00
void SharedLinear2Bounds::Add(int id, Key expr, IntegerValue lb,
IntegerValue ub) {
DCHECK(expr.IsCanonicalized()) << expr;
2025-09-22 17:24:20 +02:00
absl::MutexLock mutex_lock(mutex_);
2025-07-21 17:25:33 +02:00
auto [it, inserted] = shared_bounds_.insert({expr, {lb, ub}});
if (inserted) {
// It is new.
id_to_stats_[id].num_new++;
newly_updated_keys_.push_back(expr);
} else {
// Update the individual bounds if the new ones are better.
auto& bounds = it->second;
const bool update_lb = lb > bounds.first;
if (update_lb) bounds.first = lb;
const bool update_ub = ub < bounds.second;
if (update_ub) bounds.second = ub;
if (update_lb || update_ub) {
id_to_stats_[id].num_update++;
newly_updated_keys_.push_back(expr);
}
}
}
int SharedLinear2Bounds::RegisterNewImportId(std::string name) {
2025-09-22 17:24:20 +02:00
absl::MutexLock mutex_lock(mutex_);
2025-07-21 17:25:33 +02:00
const int import_id = import_id_to_index_.size();
import_id_to_name_.push_back(name);
import_id_to_index_.push_back(0);
import_id_to_num_imported_.push_back(0);
return import_id;
}
std::vector<
std::pair<SharedLinear2Bounds::Key, std::pair<IntegerValue, IntegerValue>>>
SharedLinear2Bounds::NewlyUpdatedBounds(int import_id) {
std::vector<std::pair<Key, std::pair<IntegerValue, IntegerValue>>> result;
2025-09-22 17:24:20 +02:00
absl::MutexLock mutex_lock(mutex_);
2025-07-21 17:25:33 +02:00
MaybeCompressNewlyUpdateKeys();
const int size = newly_updated_keys_.size();
for (int i = import_id_to_index_[import_id]; i < size; ++i) {
const auto& key = newly_updated_keys_[i];
result.push_back({key, shared_bounds_[key]});
}
import_id_to_index_[import_id] = size;
return result;
}
void SharedLinear2Bounds::MaybeCompressNewlyUpdateKeys() {
int min_index = 0;
for (const int index : import_id_to_index_) {
min_index = std::min(index, min_index);
}
if (min_index == 0) return;
newly_updated_keys_.erase(newly_updated_keys_.begin(),
newly_updated_keys_.begin() + min_index);
for (int& index_ref : import_id_to_index_) {
index_ref -= min_index;
}
}
void SharedStatistics::AddStats(
absl::Span<const std::pair<std::string, int64_t>> stats) {
2025-09-22 17:24:20 +02:00
absl::MutexLock mutex_lock(mutex_);
for (const auto& [key, count] : stats) {
stats_[key] += count;
}
}
void SharedStatistics::Log(SolverLogger* logger) {
2025-09-22 17:24:20 +02:00
absl::MutexLock mutex_lock(mutex_);
if (stats_.empty()) return;
SOLVER_LOG(logger, "Stats across workers (summed):");
std::vector<std::pair<std::string, int64_t>> to_sort_;
for (const auto& [key, count] : stats_) {
to_sort_.push_back({key, count});
}
std::sort(to_sort_.begin(), to_sort_.end());
for (const auto& [key, count] : to_sort_) {
SOLVER_LOG(logger, " ", key, ": ", FormatCounter(count));
}
SOLVER_LOG(logger, "");
}
SharedLratProofStatus::SharedLratProofStatus()
: num_subsolvers_(0),
num_valid_proofs_(0),
num_invalid_proofs_(0),
num_unknown_proofs_(0),
lrat_check_enabled_(false),
drat_check_enabled_(false),
num_assumed_clauses_(0),
walltime_in_seconds_(0.0) {}
int SharedLratProofStatus::NewSubSolverId() {
absl::MutexLock mutex_lock(mutex_);
return num_subsolvers_++;
}
void SharedLratProofStatus::NewSubsolverProofStatus(
DratChecker::Status status, bool lrat_check_enabled,
bool drat_check_enabled, int num_assumed_clauses,
double walltime_in_seconds) {
absl::MutexLock mutex_lock(mutex_);
if (status == DratChecker::Status::VALID) {
num_valid_proofs_++;
} else if (status == DratChecker::Status::INVALID) {
num_invalid_proofs_++;
} else if (status == DratChecker::Status::UNKNOWN) {
num_unknown_proofs_++;
}
lrat_check_enabled_ |= lrat_check_enabled;
drat_check_enabled_ |= drat_check_enabled;
num_assumed_clauses_ += num_assumed_clauses;
if (drat_check_enabled) {
walltime_in_seconds_ += walltime_in_seconds;
}
}
void SharedLratProofStatus::NewProofFile(absl::string_view filename) {
absl::MutexLock mutex_lock(mutex_);
proof_filenames_.push_back(std::string(filename));
}
std::vector<std::string> SharedLratProofStatus::GetProofFilenames() {
absl::MutexLock mutex_lock(mutex_);
return proof_filenames_;
}
void SharedLratProofStatus::Log(SolverLogger* logger) {
absl::MutexLock mutex_lock(mutex_);
if (lrat_check_enabled_ || drat_check_enabled_) {
if (num_valid_proofs_ == num_subsolvers_) {
if (num_assumed_clauses_ > 0) {
SOLVER_LOG(logger, "LRAT_status: VALID_WITH_ASSUMED_CLAUSES");
} else {
SOLVER_LOG(logger, "LRAT_status: VALID");
}
} else if (num_invalid_proofs_ > 0) {
SOLVER_LOG(logger, "LRAT_status: INVALID");
} else {
SOLVER_LOG(logger, "LRAT_status: UNKNOWN");
}
if (drat_check_enabled_) {
SOLVER_LOG(logger, "DRAT_walltime: ", walltime_in_seconds_);
}
} else {
// Always log an LRAT status to make it easier to extract it from a
// multirun result with awk.
SOLVER_LOG(logger, "LRAT_status: NA");
if (drat_check_enabled_) {
SOLVER_LOG(logger, "DRAT_walltime: NA");
}
}
}
2020-10-22 23:36:58 +02:00
} // namespace sat
} // namespace operations_research