[CP-SAT] more presolve on linear

This commit is contained in:
Laurent Perron
2022-10-19 13:51:03 +02:00
parent e8e53ae6ee
commit 775dda655d
7 changed files with 306 additions and 141 deletions

View File

@@ -2509,6 +2509,7 @@ void CpModelPresolver::TryToReduceCoefficientsOfLinearConstraint(
struct Entry {
int64_t magnitude;
int64_t max_variation;
int index;
};
std::vector<Entry> entries;
std::vector<int> vars;
@@ -2544,7 +2545,7 @@ void CpModelPresolver::TryToReduceCoefficientsOfLinearConstraint(
ubs.push_back(ub);
coeffs.push_back(coeff);
magnitudes.push_back(magnitude);
entries.push_back({magnitude, magnitude * (ub - lb)});
entries.push_back({magnitude, magnitude * (ub - lb), i});
max_variation += entries.back().max_variation;
}
@@ -2568,60 +2569,106 @@ void CpModelPresolver::TryToReduceCoefficientsOfLinearConstraint(
// No point doing more work for constraint with all coeff at +/-1.
if (max_magnitude <= 1) return;
// TODO(user): All the lb/ub_feasible/infeasible class are updated in
// exactly the same way. Find a more efficient algo?
if (use_lb) {
lb_feasible_.Reset(rhs_lb.value());
lb_infeasible_.Reset(rhs.Min() - lb_sum - 1);
}
if (use_ub) {
ub_feasible_.Reset(rhs_ub.value());
ub_infeasible_.Reset(ub_sum - rhs.Max() - 1);
}
// Process entries by decreasing magnitude. Update max_error to correspond
// only to the sum of the not yet processed terms.
//
// TODO(user): we could use partial dynamic programming to also compute
// the maximum reachable value instead of just relying on gcds.
uint64_t gcd = 0;
int64_t max_error = max_variation;
std::sort(entries.begin(), entries.end(),
[](const Entry& a, Entry& b) { return a.magnitude > b.magnitude; });
std::stable_sort(
entries.begin(), entries.end(),
[](const Entry& a, const Entry& b) { return a.magnitude > b.magnitude; });
std::vector<int64_t> divisors;
for (const Entry& e : entries) {
int64_t range = 0;
for (int i = 0; i < entries.size(); ++i) {
const Entry& e = entries[i];
gcd = MathUtil::GCD64(gcd, e.magnitude);
max_error -= e.max_variation;
if (e.magnitude == 1) break; // No point continuing, and gcd == 1.
// We regroup all term with the same coefficient into one.
//
// TODO(user): I am not sure there is no possible simplification across two
// term with the same coeff, but it should be rare if it ever happens.
range += e.max_variation / e.magnitude;
if (i + 1 < entries.size() && e.magnitude == entries[i + 1].magnitude) {
continue;
}
const int64_t saved_range = range;
range = 0;
if ((!use_ub ||
max_error <= PositiveRemainder(rhs_ub, IntegerValue(e.magnitude))) &&
(!use_lb ||
max_error <= PositiveRemainder(rhs_lb, IntegerValue(e.magnitude)))) {
if (divisors.empty() || e.magnitude != divisors.back()) {
divisors.push_back(e.magnitude);
divisors.push_back(e.magnitude);
}
bool simplify_lb = false;
if (use_lb) {
lb_feasible_.AddMultiples(e.magnitude, saved_range);
lb_infeasible_.AddMultiples(e.magnitude, saved_range);
// For a <= constraint, the max_feasible + error is still feasible.
if (lb_feasible_.CurrentMax() + max_error <= lb_feasible_.Bound()) {
simplify_lb = true;
}
// For a <= constraint describing the infeasible set, the max_infeasible +
// error is still infeasible.
if (lb_infeasible_.CurrentMax() + max_error <= lb_infeasible_.Bound()) {
simplify_lb = true;
}
} else {
simplify_lb = true;
}
bool simplify_ub = false;
if (use_ub) {
ub_feasible_.AddMultiples(e.magnitude, saved_range);
ub_infeasible_.AddMultiples(e.magnitude, saved_range);
if (ub_feasible_.CurrentMax() + max_error <= ub_feasible_.Bound()) {
simplify_ub = true;
}
if (ub_infeasible_.CurrentMax() + max_error <= ub_infeasible_.Bound()) {
simplify_ub = true;
}
} else {
simplify_ub = true;
}
if (max_error == 0) break; // Last term.
if ((!use_ub ||
max_error <= PositiveRemainder(rhs_ub, IntegerValue(gcd))) &&
(!use_lb ||
max_error <= PositiveRemainder(rhs_lb, IntegerValue(gcd)))) {
// We have a simplification using gcd * part1 + part2, we know that part2
// can be ignored.
int64_t shift_lb = 0;
int64_t shift_ub = 0;
context_->UpdateRuleStats("linear: simplify using partial gcd");
if (simplify_lb && simplify_ub) {
// We have a simplification since the second part can be ignored.
context_->UpdateRuleStats("linear: remove irrelevant part");
LinearConstraintProto* mutable_linear = ct->mutable_linear();
mutable_linear->clear_vars();
mutable_linear->clear_coeffs();
for (int i = 0; i < magnitudes.size(); ++i) {
const int64_t m = magnitudes[i];
if (m < e.magnitude) continue;
shift_lb += lbs[i] * m;
shift_ub += ubs[i] * m;
mutable_linear->add_vars(vars[i]);
mutable_linear->add_coeffs(coeffs[i]);
int64_t shift_lb = 0;
int64_t shift_ub = 0;
for (int j = 0; j <= i; ++j) {
const int index = entries[j].index;
const int64_t m = magnitudes[index];
shift_lb += lbs[index] * m;
shift_ub += ubs[index] * m;
mutable_linear->add_vars(vars[index]);
mutable_linear->add_coeffs(coeffs[index]);
}
// The constraint become:
// sum ci (X - lb) <= rhs_ub
// sum ci (ub - X) <= rhs_lb
// sum ci ub - rhs_lb <= sum ci X <= rhs_ub + sum ci lb.
int64_t new_rhs_lb = use_lb ? shift_ub - rhs_lb.value() : shift_lb;
int64_t new_rhs_ub = use_ub ? shift_lb + rhs_ub.value() : shift_ub;
const int64_t new_rhs_lb =
use_lb ? shift_ub - lb_feasible_.CurrentMax() : shift_lb;
const int64_t new_rhs_ub =
use_ub ? shift_lb + ub_feasible_.CurrentMax() : shift_ub;
FillDomainInProto(Domain(new_rhs_lb, new_rhs_ub), mutable_linear);
DivideLinearByGcd(ct);
context_->UpdateConstraintVariableUsage(c);
@@ -2638,6 +2685,18 @@ void CpModelPresolver::TryToReduceCoefficientsOfLinearConstraint(
return;
}
// We didn't remove any irrelevant part, but we might be able to tighten
// the constraint bound.
if ((use_lb && lb_feasible_.CurrentMax() < lb_feasible_.Bound()) ||
(use_ub && ub_feasible_.CurrentMax() < ub_feasible_.Bound())) {
context_->UpdateRuleStats("linear: reduce rhs with DP");
const int64_t new_rhs_lb =
use_lb ? ub_sum - lb_feasible_.CurrentMax() : lb_sum;
const int64_t new_rhs_ub =
use_ub ? lb_sum + ub_feasible_.CurrentMax() : ub_sum;
FillDomainInProto(Domain(new_rhs_lb, new_rhs_ub), ct->mutable_linear());
}
// Limit the number of "divisor" we try for approximate gcd.
if (divisors.size() > 3) divisors.resize(3);
for (const int64_t divisor : divisors) {
@@ -3192,6 +3251,56 @@ bool CpModelPresolver::PropagateDomainsInLinear(int ct_index,
return false;
}
// The constraint from its lower value is sum positive_coeff * X <= threshold.
// If from_lower_bound is false, then it is the constraint from its upper value.
void CpModelPresolver::LowerThanCoeffStrengthening(bool from_lower_bound,
int64_t min_magnitude,
int64_t threshold,
ConstraintProto* ct) {
const LinearConstraintProto& arg = ct->linear();
const int num_vars = arg.vars_size();
const int64_t second_threshold = threshold - min_magnitude;
int64_t rhs_offset = 0;
for (int i = 0; i < num_vars; ++i) {
int ref = arg.vars(i);
int64_t coeff = arg.coeffs(i);
if (coeff < 0) {
ref = NegatedRef(ref);
coeff = -coeff;
}
if (coeff > threshold) {
if (ct->enforcement_literal().empty()) {
// Shifted variable must be zero.
context_->UpdateRuleStats("linear: fix variable to its bound.");
CHECK(context_->IntersectDomainWith(
ref, Domain(from_lower_bound ? context_->MinOf(ref)
: context_->MaxOf(ref))));
}
// TODO(user): What to do with the coeff if there is enforcement?
continue;
}
if (coeff > second_threshold && coeff < threshold) {
context_->UpdateRuleStats(
"linear: coefficient strengthening by increasing it.");
if (from_lower_bound) {
// coeff * (X - LB + LB) -> threshold * (X - LB) + coeff * LB
rhs_offset -= (coeff - threshold) * context_->MinOf(ref);
} else {
// coeff * (X - UB + UB) -> threshold * (X - UB) + coeff * UB
rhs_offset -= (coeff - threshold) * context_->MaxOf(ref);
}
ct->mutable_linear()->set_coeffs(
i, arg.coeffs(i) > 0 ? threshold : -threshold);
}
}
if (rhs_offset != 0) {
FillDomainInProto(ReadDomainFromProto(arg).AdditionWith(Domain(rhs_offset)),
ct->mutable_linear());
}
}
// Identify Boolean variable that makes the constraint always true when set to
// true or false. Moves such literal to the constraint enforcement literals
// list.
@@ -3225,6 +3334,7 @@ void CpModelPresolver::ExtractEnforcementLiteralFromLinearConstraint(
min_sum += std::min(term_a, term_b);
max_sum += std::max(term_a, term_b);
}
if (max_coeff_magnitude == 1) return;
// We can only extract enforcement literals if the maximum coefficient
// magnitude is large enough. Note that we handle complex domain.
@@ -3238,6 +3348,23 @@ void CpModelPresolver::ExtractEnforcementLiteralFromLinearConstraint(
const Domain rhs_domain = ReadDomainFromProto(ct->linear());
if (max_coeff_magnitude + min_coeff_magnitude <
std::max(ub_threshold, lb_threshold)) {
// We also have other kind of coefficient strengthening.
// In something like 3x + 5y <= 6, the coefficient 5 can be changed to 6.
if (domain.size() == 2 && min_coeff_magnitude > 1) {
const int64_t rhs_min = domain[0];
const int64_t rhs_max = domain[1];
if (min_sum >= rhs_min &&
max_coeff_magnitude + min_coeff_magnitude > rhs_max - min_sum) {
LowerThanCoeffStrengthening(/*from_lower_bound=*/true,
min_coeff_magnitude, rhs_max - min_sum, ct);
}
if (max_sum <= rhs_max &&
max_coeff_magnitude + min_coeff_magnitude > max_sum - rhs_min) {
LowerThanCoeffStrengthening(/*from_lower_bound=*/false,
min_coeff_magnitude, max_sum - rhs_min, ct);
}
}
return;
}
@@ -3254,10 +3381,6 @@ void CpModelPresolver::ExtractEnforcementLiteralFromLinearConstraint(
// not be the most efficient, but it simplify the presolve code by not having
// to do anything special to trigger a new presolving of these constraints.
// Try to improve if this becomes a problem.
//
// TODO(user): At the end of the presolve we should probably remerge any
// identical linear constraints. That also cover the corner cases where
// constraints are just redundant...
const bool lower_bounded = min_sum < rhs_domain.Min();
const bool upper_bounded = max_sum > rhs_domain.Max();
if (!lower_bounded && !upper_bounded) return;
@@ -3325,6 +3448,8 @@ void CpModelPresolver::ExtractEnforcementLiteralFromLinearConstraint(
coeff = -coeff;
}
// TODO(user): If the encoding Boolean already exist, we could extract
// the non-Boolean enforcement term.
const bool is_boolean = context_->CanBeUsedAsLiteral(ref);
if (context_->IsFixed(ref) || coeff < threshold ||
(only_extract_booleans && !is_boolean)) {

View File

@@ -233,6 +233,8 @@ class CpModelPresolver {
void ExtractEnforcementLiteralFromLinearConstraint(int ct_index,
ConstraintProto* ct);
void LowerThanCoeffStrengthening(bool from_lower_bound, int64_t min_magnitude,
int64_t threshold, ConstraintProto* ct);
// Extracts cliques from bool_and and small at_most_one constraints and
// transforms them into maximal cliques.
@@ -277,6 +279,12 @@ class CpModelPresolver {
absl::flat_hash_map<int, int> temp_map_;
absl::flat_hash_set<int> temp_set_;
ConstraintProto temp_ct_;
// Use by TryToReduceCoefficientsOfLinearConstraint().
MaxBoundedSubsetSum lb_feasible_;
MaxBoundedSubsetSum lb_infeasible_;
MaxBoundedSubsetSum ub_feasible_;
MaxBoundedSubsetSum ub_infeasible_;
};
// This helper class perform copy with simplification from a model and a

View File

@@ -660,6 +660,10 @@ std::vector<SatParameters> GetDiverseSetOfParameters(
continue;
}
if (name == "less_encoding") continue;
// TODO(user): Enable lb_tree_search in deterministic mode.
if (params.optimize_with_lb_tree_search() && params.interleave_search()) {
continue;
}
} else {
if (params.optimize_with_lb_tree_search()) continue;
if (params.optimize_with_core()) continue;

View File

@@ -1045,14 +1045,20 @@ void RegisterObjectiveBestBoundExport(
SharedResponseManager* shared_response_manager, Model* model) {
auto* integer_trail = model->Get<IntegerTrail>();
const auto broadcast_objective_lower_bound =
[objective_var, integer_trail, shared_response_manager,
model](const std::vector<IntegerVariable>& unused) {
shared_response_manager->UpdateInnerObjectiveBounds(
model->Name(), integer_trail->LevelZeroLowerBound(objective_var),
integer_trail->LevelZeroUpperBound(objective_var));
// If we are not in interleave_search we synchronize right away.
if (!model->Get<SatParameters>()->interleave_search()) {
shared_response_manager->Synchronize();
[objective_var, integer_trail, shared_response_manager, model,
best_obj_lb =
kMinIntegerValue](const std::vector<IntegerVariable>&) mutable {
const IntegerValue objective_lb =
integer_trail->LevelZeroLowerBound(objective_var);
if (objective_lb > best_obj_lb) {
best_obj_lb = objective_lb;
shared_response_manager->UpdateInnerObjectiveBounds(
model->Name(), objective_lb,
integer_trail->LevelZeroUpperBound(objective_var));
// If we are not in interleave_search we synchronize right away.
if (!model->Get<SatParameters>()->interleave_search()) {
shared_response_manager->Synchronize();
}
}
};
model->GetOrCreate<GenericLiteralWatcher>()
@@ -1521,10 +1527,16 @@ void LoadCpModel(const CpModelProto& model_proto, Model* model) {
// SolveLoadedCpModel().
const std::string solution_info = model->Name();
const auto solution_observer = [&model_proto, model, solution_info,
shared_response_manager]() {
shared_response_manager,
best_obj_ub = kMaxIntegerValue]() mutable {
const std::vector<int64_t> solution =
GetSolutionValues(model_proto, *model);
shared_response_manager->NewSolution(solution, solution_info, model);
const IntegerValue obj_ub =
ComputeInnerObjective(model_proto.objective(), solution);
if (obj_ub < best_obj_ub) {
best_obj_ub = obj_ub;
shared_response_manager->NewSolution(solution, solution_info, model);
}
};
const auto& objective = *model->GetOrCreate<ObjectiveDefinition>();
@@ -1554,11 +1566,21 @@ void SolveLoadedCpModel(const CpModelProto& model_proto, Model* model) {
if (shared_response_manager->ProblemIsSolved()) return;
const std::string& solution_info = model->Name();
const auto solution_observer = [&model_proto, &model, &solution_info,
&shared_response_manager]() {
auto solution_observer = [&model_proto, model, solution_info,
shared_response_manager,
best_obj_ub = kMaxIntegerValue]() mutable {
const std::vector<int64_t> solution =
GetSolutionValues(model_proto, *model);
shared_response_manager->NewSolution(solution, solution_info, model);
if (model_proto.has_objective()) {
const IntegerValue obj_ub =
ComputeInnerObjective(model_proto.objective(), solution);
if (obj_ub < best_obj_ub) {
best_obj_ub = obj_ub;
shared_response_manager->NewSolution(solution, solution_info, model);
}
} else {
shared_response_manager->NewSolution(solution, solution_info, model);
}
};
// Reconfigure search heuristic if it was changed.
@@ -2994,9 +3016,10 @@ void SolveCpModelParallel(const CpModelProto& model_proto,
}
}
SOLVER_LOG(logger, "");
SOLVER_LOG(logger,
absl::StrFormat("Starting Search at %.2fs with %i workers.",
shared.wall_timer->Get(), params.num_workers()));
SOLVER_LOG(logger, absl::StrFormat(
"Starting %s search at %.2fs with %i workers.",
params.interleave_search() ? "deterministic" : "",
shared.wall_timer->Get(), params.num_workers()));
SOLVER_LOG(logger, full_subsolver_index, " full subsolvers: [",
absl::StrJoin(names.begin(),
names.begin() + full_subsolver_index, ", "),
@@ -3009,8 +3032,15 @@ void SolveCpModelParallel(const CpModelProto& model_proto,
// Launch the main search loop.
if (params.interleave_search()) {
DeterministicLoop(subsolvers, params.num_workers(),
params.interleave_batch_size());
int batch_size = params.interleave_batch_size();
if (batch_size == 0) {
batch_size = params.num_workers() == 1 ? 1 : params.num_workers() * 3;
SOLVER_LOG(
logger,
"Setting number of tasks in each batch of interleaved search to ",
batch_size);
}
DeterministicLoop(subsolvers, params.num_workers(), batch_size);
} else {
NonDeterministicLoop(subsolvers, params.num_workers());
}
@@ -3233,17 +3263,6 @@ CpSolverResponse SolveCpModel(const CpModelProto& model_proto, Model* model) {
!params.interleave_search() || params.num_workers() <= 1;
shared_response_manager->SetSynchronizationMode(always_synchronize);
// Set up the batch size in interleave mode.
if (params.interleave_search() && params.interleave_batch_size() == 0) {
const int batch_size =
params.num_workers() == 1 ? 1 : params.num_workers() * 5;
SOLVER_LOG(
logger,
"Setting number of tasks in each batch of interleaved search to ",
batch_size);
model->GetOrCreate<SatParameters>()->set_interleave_batch_size(batch_size);
}
// Special case for pure-sat problem.
// TODO(user): improve the normal presolver to do the same thing.
// TODO(user): Support solution hint, but then the first TODO will make it

View File

@@ -380,7 +380,7 @@ SatSolver::Status LbTreeSearch::Search(
// If the search has not just been restarted (in which case nodes_ would be
// empty), and if we are at level zero (either naturally, or if the
// backtrack level was set to zero in the above code), let's run a different
// heuristic to decide whether to retart the search from scratch or not.
// heuristic to decide whether to restart the search from scratch or not.
//
// We ignore small search trees.
if (sat_solver_->CurrentDecisionLevel() == 0 && num_nodes_in_tree_ > 50) {

View File

@@ -405,24 +405,83 @@ void CompressTuples(absl::Span<const int64_t> domain_sizes,
void MaxBoundedSubsetSum::Reset(int64_t bound) {
DCHECK_GE(bound, 0);
gcd_ = 0;
sums_ = {0};
expanded_sums_.clear();
current_max_ = 0;
bound_ = bound;
}
void MaxBoundedSubsetSum::Add(int64_t value) {
DCHECK_GE(value, 0);
if (value == 0) return;
if (value > bound_) return;
gcd_ = std::gcd(gcd_, value);
AddChoicesInternal({value});
}
void MaxBoundedSubsetSum::AddChoices(absl::Span<const int64_t> choices) {
if (DEBUG_MODE) {
for (const int64_t c : choices) {
DCHECK_GE(c, 0);
}
}
// The max is already reachable or we aborted.
if (current_max_ == bound_) return;
if (value > bound_) return; // Can be ignored.
// Filter out zero and values greater than bound_.
filtered_values_.clear();
for (const int64_t c : choices) {
if (c == 0 || c > bound_) continue;
filtered_values_.push_back(c);
gcd_ = std::gcd(gcd_, c);
}
if (filtered_values_.empty()) return;
// So we can abort early in the AddChoicesInternal() inner loops.
std::sort(filtered_values_.begin(), filtered_values_.end());
AddChoicesInternal(filtered_values_);
}
void MaxBoundedSubsetSum::AddMultiples(int64_t coeff, int64_t max_value) {
DCHECK_GE(coeff, 0);
DCHECK_GE(max_value, 0);
if (coeff == 0 || max_value == 0) return;
if (coeff > bound_) return;
if (current_max_ == bound_) return;
gcd_ = std::gcd(gcd_, coeff);
const int64_t num_values = std::min(max_value, FloorOfRatio(bound_, coeff));
if (num_values > 10) {
// We only keep GCD in this case.
sums_.clear();
expanded_sums_.clear();
current_max_ = FloorOfRatio(bound_, gcd_) * gcd_;
return;
}
filtered_values_.clear();
for (int multiple = 1; multiple <= num_values; ++multiple) {
const int64_t v = multiple * coeff;
if (v == bound_) {
current_max_ = bound_;
return;
}
filtered_values_.push_back(v);
}
AddChoicesInternal(filtered_values_);
}
void MaxBoundedSubsetSum::AddChoicesInternal(absl::Span<const int64_t> values) {
// Mode 1: vector of all possible sums (with duplicates).
if (!sums_.empty() && sums_.size() <= kMaxComplexityPerAdd) {
const int old_size = sums_.size();
for (int i = 0; i < old_size; ++i) {
const int64_t s = sums_[i] + value;
if (s <= bound_) {
for (const int64_t value : values) {
const int64_t s = sums_[i] + value;
if (s > bound_) break;
sums_.push_back(s);
current_max_ = std::max(current_max_, s);
if (current_max_ == bound_) return; // Abort
@@ -442,84 +501,28 @@ void MaxBoundedSubsetSum::Add(int64_t value) {
}
// The reverse order is important to not add the current value twice.
for (int i = bound_ - value; i >= 0; --i) {
if (expanded_sums_[i]) {
expanded_sums_[i + value] = true;
current_max_ = std::max(current_max_, i + value);
}
}
return;
}
if (!expanded_sums_.empty()) {
for (int64_t i = bound_ - 1; i >= 0; --i) {
if (!expanded_sums_[i]) continue;
for (const int64_t value : values) {
if (i + value > bound_) break;
// Abort.
current_max_ = bound_;
}
void MaxBoundedSubsetSum::AddChoices(absl::Span<const int64_t> choices) {
if (DEBUG_MODE) {
for (const int64_t c : choices) {
DCHECK_GE(c, 0);
}
}
// The max is already reachable or we aborted.
if (current_max_ == bound_) return;
// Filter out zero and values greater than bound_.
filtered_values_.clear();
for (const int64_t c : choices) {
if (c == 0 || c > bound_) continue;
filtered_values_.push_back(c);
}
if (filtered_values_.empty()) return;
if (filtered_values_.size() == 1) {
Add(filtered_values_[0]);
return;
}
// Mode 1: vector of all possible sums (with duplicates).
if (!sums_.empty() && sums_.size() <= kMaxComplexityPerAdd) {
const int old_size = sums_.size();
for (int i = 0; i < old_size; ++i) {
for (const int64_t value : filtered_values_) {
const int64_t s = sums_[i] + value;
if (s <= bound_) {
sums_.push_back(s);
current_max_ = std::max(current_max_, s);
expanded_sums_[i + value] = true;
current_max_ = std::max(current_max_, i + value);
if (current_max_ == bound_) return; // Abort
}
}
return;
}
return;
}
// Mode 2: bitset of all possible sums.
if (bound_ <= kMaxComplexityPerAdd) {
if (!sums_.empty()) {
expanded_sums_.assign(bound_ + 1, false);
for (const int64_t s : sums_) {
expanded_sums_[s] = true;
}
sums_.clear();
}
// The reverse order is important to not add the current value twice.
for (int64_t i = bound_ - 1; i >= 0; --i) {
if (expanded_sums_[i]) {
for (const int64_t value : filtered_values_) {
if (i + value <= bound_) {
expanded_sums_[i + value] = true;
current_max_ = std::max(current_max_, i + value);
if (current_max_ == bound_) return; // Abort
}
}
}
}
return;
// Fall back to gcd_.
DCHECK_NE(gcd_, 0);
if (gcd_ == 1) {
current_max_ = bound_;
} else {
current_max_ = FloorOfRatio(bound_, gcd_) * gcd_;
}
// Abort.
current_max_ = bound_;
}
BasicKnapsackSolver::Result BasicKnapsackSolver::Solve(

View File

@@ -191,9 +191,6 @@ int MoveOneUnprocessedLiteralLast(
// become too important.
//
// Precondition: Both bound and all added values must be >= 0.
//
// TODO(user): Compute gcd of all value so we can return a better bound for
// large sets?
class MaxBoundedSubsetSum {
public:
MaxBoundedSubsetSum() { Reset(0); }
@@ -207,8 +204,13 @@ class MaxBoundedSubsetSum {
void Add(int64_t value);
// Add a choice of values to the base set for which subset sums will be taken.
// Note that even if this doesn't include zero, not taking any choices will
// also be an option.
void AddChoices(absl::Span<const int64_t> choices);
// Adds [0, coeff, 2 * coeff, ... max_value * coeff].
void AddMultiples(int64_t coeff, int64_t max_value);
// Returns an upper bound (inclusive) on the maximum sum <= bound_.
// This might return bound_ if we aborted the computation.
int64_t CurrentMax() const { return current_max_; }
@@ -216,8 +218,12 @@ class MaxBoundedSubsetSum {
int64_t Bound() const { return bound_; }
private:
// This assumes filtered values.
void AddChoicesInternal(absl::Span<const int64_t> values);
static constexpr int kMaxComplexityPerAdd = 50;
int64_t gcd_;
int64_t bound_;
int64_t current_max_;
std::vector<int64_t> sums_;