[CP-SAT] more dual reductions

This commit is contained in:
Laurent Perron
2022-07-28 00:22:33 +02:00
parent ad1b8ee5e4
commit 29a9e59653
11 changed files with 579 additions and 62 deletions

View File

@@ -1240,6 +1240,7 @@ cc_library(
"//ortools/base:stl_util",
"//ortools/util:random_engine",
"//ortools/util:saturated_arithmetic",
"//ortools/util:sorted_interval_list",
"//ortools/util:time_limit",
"@com_google_absl//absl/container:btree",
"@com_google_absl//absl/container:flat_hash_map",

View File

@@ -1659,6 +1659,101 @@ bool CpModelPresolver::RemoveSingletonInLinear(ConstraintProto* ct) {
}
}
// If the whole linear is independent from the rest of the problem, we
// can solve it now. If it is enforced, then each variable will have two
// values: Its minimum one and one minimizing the objective under the
// constraint. The switch can be controlled by a single Boolean.
//
// TODO(user): Cover more case like dedicated algorithm to solve for a small
// number of variable that are faster than the DP we use here.
if (index_to_erase.empty()) {
int num_singletons = 0;
for (const int var : ct->linear().vars()) {
if (!RefIsPositive(var)) break;
if (!context_->VariableWithCostIsUniqueAndRemovable(var) &&
!context_->VariableIsUniqueAndRemovable(var)) {
break;
}
++num_singletons;
}
if (num_singletons == num_vars) {
// Try to solve the equation.
std::vector<Domain> domains;
std::vector<int64_t> coeffs;
std::vector<int64_t> costs;
for (int i = 0; i < num_vars; ++i) {
const int var = ct->linear().vars(i);
CHECK(RefIsPositive(var));
domains.push_back(context_->DomainOf(var));
coeffs.push_back(ct->linear().coeffs(i));
costs.push_back(context_->ObjectiveCoeff(var));
}
BasicKnapsackSolver solver;
const auto& result = solver.Solve(domains, coeffs, costs,
ReadDomainFromProto(ct->linear()));
if (!result.solved) {
context_->UpdateRuleStats(
"TODO independent linear: minimize single linear constraint");
} else if (result.infeasible) {
context_->UpdateRuleStats(
"independent linear: no DP solution to simple constraint");
return MarkConstraintAsFalse(ct);
} else {
if (ct->enforcement_literal().empty()) {
// Just fix everything.
context_->UpdateRuleStats("independent linear: solved by DP");
for (int i = 0; i < num_vars; ++i) {
if (!context_->IntersectDomainWith(ct->linear().vars(i),
Domain(result.solution[i]))) {
return false;
}
}
return RemoveConstraint(ct);
}
// Each variable will take two values according to a single Boolean.
int indicator;
if (ct->enforcement_literal().size() == 1) {
indicator = ct->enforcement_literal(0);
} else {
indicator = context_->NewBoolVar();
auto* new_ct = context_->working_model->add_constraints();
*new_ct->mutable_enforcement_literal() = ct->enforcement_literal();
new_ct->mutable_bool_or()->add_literals(indicator);
context_->UpdateNewConstraintsVariableUsage();
}
for (int i = 0; i < num_vars; ++i) {
const int64_t best_value =
costs[i] > 0 ? domains[i].Min() : domains[i].Max();
const int64_t other_value = result.solution[i];
if (best_value == other_value) {
if (!context_->IntersectDomainWith(ct->linear().vars(i),
Domain(best_value))) {
return false;
}
continue;
}
if (RefIsPositive(indicator)) {
if (!context_->StoreAffineRelation(ct->linear().vars(i), indicator,
other_value - best_value,
best_value)) {
return false;
}
} else {
if (!context_->StoreAffineRelation(
ct->linear().vars(i), PositiveRef(indicator),
best_value - other_value, other_value)) {
return false;
}
}
}
context_->UpdateRuleStats(
"independent linear: with enforcement, but solved by DP");
return RemoveConstraint(ct);
}
}
}
// If we didn't find any, look for the one appearing in the objective.
if (index_to_erase.empty()) {
// Note that we only do that if we have a non-reified equality.
@@ -1694,6 +1789,13 @@ bool CpModelPresolver::RemoveSingletonInLinear(ConstraintProto* ct) {
CHECK_NE(coeff, 0);
if (objective_coeff % coeff != 0) continue;
// TODO(user): We have an issue if objective coeff is not one, because
// the RecomputeSingletonObjectiveDomain() do not properly put holes
// in the objective domain, which might cause an issue. Note that this
// presolve rule is actually almost never applied on the miplib. It is
// also a bit redundant with ExpandObjective().
if (std::abs(objective_coeff) != 1) continue;
// We do not do that if the domain of rhs becomes too complex.
bool exact;
const auto term_domain =
@@ -2462,10 +2564,6 @@ bool RhsCanBeFixedToMax(int64_t coeff, const Domain& var_domain,
// TODO(user): merge code with the fully included case.
// TODO(user): Similarly amo and bool_or intersection or amo and enforcement
// literals can be presolved.
//
// TODO(user): while this reduce the problem size significantly on vpphard2,
// for some reason, the core do not find lb anymore. It seems due to
// ExpandObjective(). Investigate.
void CpModelPresolver::DetectAndProcessAtMostOneInLinear(int ct_index,
ConstraintProto* ct) {
if (ct->constraint_case() != ConstraintProto::kLinear) return;
@@ -5954,6 +6052,7 @@ void CpModelPresolver::ExpandObjective() {
// However, it might be more robust to just handle this case properly.
bool is_present = false;
int64_t objective_coeff;
Domain implied = ReadDomainFromProto(ct.linear());
for (int i = 0; i < num_terms; ++i) {
const int ref = ct.linear().vars(i);
const int64_t coeff = ct.linear().coeffs(i);
@@ -5964,10 +6063,25 @@ void CpModelPresolver::ExpandObjective() {
} else {
// This is not possible since we only consider relevant constraints.
CHECK(!processed_vars.contains(PositiveRef(ref)));
implied = implied
.AdditionWith(
context_->DomainOf(ref).ContinuousMultiplicationBy(
-coeff))
.RelaxIfTooComplex();
}
}
CHECK(is_present);
// Important: We will only use equation where the implied lb on the
// objective var is tight. This is important for core based search, see
// for instance vpphard where without this the core do not solve it.
implied = implied.InverseMultiplicationBy(objective_coeff);
if (context_->ObjectiveCoeff(objective_var) > 0) {
if (implied.Min() < context_->MinOf(objective_var)) continue;
} else {
if (implied.Max() > context_->MaxOf(objective_var)) continue;
}
// We use the longest equality we can find.
//
// TODO(user): Deal with objective_coeff with a magnitude greater than
@@ -7948,6 +8062,55 @@ void CpModelPresolver::ProcessVariableOnlyUsedInEncoding(int var) {
return;
}
// If a variable var only appear in enf => var \in domain and in the
// objective, we can remove its costs and the variable/constraint by
// transferring part of the cost to the enforcement.
//
// More generally, we can reduce the domain to just two values. Later this
// will be replaced by a Boolean, and the equivalence to the enforcement
// literal will be added if it is unique.
//
// TODO(user): maybe we should do more here rather than delaying some
// reduction. But then it is more code.
if (context_->VariableWithCostIsUniqueAndRemovable(var)) {
int unique_c = -1;
for (const int c : context_->VarToConstraints(var)) {
if (c < 0) continue;
CHECK_EQ(unique_c, -1);
unique_c = c;
}
CHECK_NE(unique_c, -1);
const ConstraintProto& ct = context_->working_model->constraints(unique_c);
const int64_t cost = context_->ObjectiveCoeff(var);
if (ct.linear().vars(0) == var) {
const Domain implied = ReadDomainFromProto(ct.linear())
.InverseMultiplicationBy(ct.linear().coeffs(0))
.IntersectionWith(context_->DomainOf(var));
if (implied.IsEmpty()) {
return (void)MarkConstraintAsFalse(
context_->working_model->mutable_constraints(unique_c));
}
int64_t value1, value2;
if (cost == 0) {
context_->UpdateRuleStats("variables: fix singleton var in linear1");
return (void)context_->IntersectDomainWith(var, Domain(implied.Min()));
} else if (cost > 0) {
value1 = context_->MinOf(var);
value2 = implied.Min();
} else {
value1 = context_->MaxOf(var);
value2 = implied.Max();
}
// Nothing else to do in this case, the constraint will be reduced to
// a pure Boolean constraint later.
context_->UpdateRuleStats("variables: reduced domain to two values");
return (void)context_->IntersectDomainWith(
var, Domain::FromValues({value1, value2}));
}
}
// We can currently only deal with the case where all encoding constraint
// are of the form literal => var ==/!= value.
// If they are more complex linear1 involved, we just abort.
@@ -8411,6 +8574,7 @@ void CpModelPresolver::PresolveToFixPoint() {
//
// TODO(user): Avoid reprocessing the constraints that changed the domain?
if (context_->ModelIsUnsat()) return;
if (time_limit->LimitReached()) break;
in_queue.resize(context_->working_model->constraints_size(), false);
const auto& vector_that_can_grow_during_iter =
context_->modified_domains.PositionsSetAtLeastOnce();
@@ -8459,6 +8623,12 @@ void CpModelPresolver::PresolveToFixPoint() {
DetectDominanceRelations(*context_, &var_dom,
&dual_bound_strengthening);
if (!dual_bound_strengthening.Strengthen(context_)) return;
if (dual_bound_strengthening.NumDeletedConstraints() > 0) {
// Loop again.
// TODO(user): Optimize the code to reach a fix point faster?
i = -1;
continue;
}
// TODO(user): The Strengthen() function above might make some
// inequality tight. Currently, because we only do that for implication,

View File

@@ -629,7 +629,8 @@ int DetectMakespan(const std::vector<IntervalVariable>& intervals,
IntegerValue horizon = kMinIntegerValue;
for (int i = 0; i < intervals.size(); ++i) {
if (repository->IsAbsent(intervals[i])) continue;
horizon = std::max(horizon, integer_trail->UpperBound(repository->End(intervals[i])));
horizon = std::max(
horizon, integer_trail->UpperBound(repository->End(intervals[i])));
}
const IntegerValue capacity_value = integer_trail->FixedValue(capacity);
@@ -674,8 +675,8 @@ void AppendNoOverlapRelaxationAndCutGenerator(const ConstraintProto& ct,
new SchedulingDemandHelper(demands, helper, model);
model->TakeOwnership(demands_helper);
AddCumulativeRelaxation(/*capacity=*/one, helper, demands_helper, model,
relaxation);
AddCumulativeRelaxation(/*capacity=*/one, helper, demands_helper, makespan,
model, relaxation);
if (model->GetOrCreate<SatParameters>()->linearization_level() > 1) {
AddNoOverlapCutGenerator(helper, makespan, model, relaxation);
}
@@ -710,7 +711,8 @@ void AppendCumulativeRelaxationAndCutGenerator(const ConstraintProto& ct,
model->TakeOwnership(demands_helper);
// We can now add the relaxation and the cut generators.
AddCumulativeRelaxation(capacity, helper, demands_helper, model, relaxation);
AddCumulativeRelaxation(capacity, helper, demands_helper, makespan, model,
relaxation);
if (model->GetOrCreate<SatParameters>()->linearization_level() > 1) {
AddCumulativeCutGenerator(capacity, helper, demands_helper, makespan, model,
relaxation);
@@ -720,10 +722,10 @@ void AppendCumulativeRelaxationAndCutGenerator(const ConstraintProto& ct,
// This relaxation will compute the bounding box of all tasks in the cumulative,
// and add the constraint that the sum of energies of each task must fit in the
// capacity * span area.
// TODO(user): Exploit the makespan if found.
void AddCumulativeRelaxation(const AffineExpression& capacity,
SchedulingConstraintHelper* helper,
SchedulingDemandHelper* demands_helper,
std::optional<AffineExpression> makespan,
Model* model, LinearRelaxation* relaxation) {
const int num_intervals = helper->NumTasks();
demands_helper->CacheAllEnergyValues();
@@ -773,8 +775,10 @@ void AddCumulativeRelaxation(const AffineExpression& capacity,
const IntegerVariable span_start =
integer_trail->AddIntegerVariable(min_of_starts, max_of_ends);
const IntegerVariable span_end =
integer_trail->AddIntegerVariable(min_of_starts, max_of_ends);
const AffineExpression span_end =
makespan.has_value()
? makespan.value()
: integer_trail->AddIntegerVariable(min_of_starts, max_of_ends);
auto* sat_solver = model->GetOrCreate<SatSolver>();
const Literal cumulative_is_not_empty =
@@ -792,8 +796,10 @@ void AddCumulativeRelaxation(const AffineExpression& capacity,
// Link span_start and span_end to the starts and ends of the tasks.
model->Add(EqualMinOfSelectedVariables(cumulative_is_not_empty, span_start,
starts, presence_literals));
model->Add(EqualMaxOfSelectedVariables(cumulative_is_not_empty, span_end,
ends, presence_literals));
if (!makespan.has_value()) {
model->Add(EqualMaxOfSelectedVariables(cumulative_is_not_empty, span_end,
ends, presence_literals));
}
LinearConstraintBuilder lc(model, kMinIntegerValue, IntegerValue(0));
lc.AddTerm(span_end, -integer_trail->UpperBound(capacity));

View File

@@ -14,6 +14,7 @@
#ifndef OR_TOOLS_SAT_LINEAR_RELAXATION_H_
#define OR_TOOLS_SAT_LINEAR_RELAXATION_H_
#include <optional>
#include <vector>
#include "ortools/sat/cp_model.pb.h"
@@ -146,7 +147,7 @@ void AppendNoOverlapRelaxationAndCutGenerator(const ConstraintProto& ct,
// Adds linearization of cumulative constraints.The second part adds an
// energetic equation linking the duration of all potential tasks to the actual
// max span * capacity of the cumulative constraint.
// span * capacity of the cumulative constraint.
void AppendCumulativeRelaxationAndCutGenerator(const ConstraintProto& ct,
Model* model,
LinearRelaxation* relaxation);
@@ -174,11 +175,13 @@ void AddNoOverlap2dCutGenerator(const ConstraintProto& ct, Model* m,
// Adds linearization of cumulative constraints.The second part adds an
// energetic equation linking the duration of all potential tasks to the actual
// max span * capacity of the cumulative constraint.
// span * capacity of the cumulative constraint. It uses the makespan to compute
// the span of the constraint if defined.
void AddCumulativeRelaxation(const AffineExpression& capacity,
SchedulingConstraintHelper* helper,
SchedulingDemandHelper* demands, Model* model,
LinearRelaxation* relaxation);
SchedulingDemandHelper* demands,
std::optional<AffineExpression> makespan,
Model* model, LinearRelaxation* relaxation);
void AddCumulativeCutGenerator(const AffineExpression& capacity,
SchedulingConstraintHelper* helper,

View File

@@ -1049,7 +1049,8 @@ bool ConvertCpModelProtoToMPModelProto(const CpModelProto& input,
for (int c = 0; c < num_constraints; ++c) {
const ConstraintProto& ct = input.constraints(c);
if (!ct.enforcement_literal().empty() &&
ct.constraint_case() != ConstraintProto::kBoolAnd) {
(ct.constraint_case() != ConstraintProto::kBoolAnd &&
ct.constraint_case() != ConstraintProto::kLinear)) {
// TODO(user): Support more constraints with enforcement.
VLOG(1) << "Cannot convert constraint: " << ct.DebugString();
return false;
@@ -1093,20 +1094,92 @@ bool ConvertCpModelProtoToMPModelProto(const CpModelProto& input,
}
case ConstraintProto::kLinear: {
if (ct.linear().domain().size() != 2) {
VLOG(1) << "Cannot convert constraint: " << ct.DebugString();
VLOG(1) << "Cannot convert constraint: " << ct.ShortDebugString();
return false;
}
if (ct.enforcement_literal_size() > 1) {
VLOG(1) << "Cannot convert constraint: " << ct.ShortDebugString();
return false;
}
MPConstraintProto* out_ct = output->add_constraint();
out_ct->set_lower_bound(ct.linear().domain(0));
out_ct->set_upper_bound(ct.linear().domain(1));
// Compute min/max activity.
int64_t min_activity = 0;
int64_t max_activity = 0;
const int num_terms = ct.linear().vars().size();
for (int i = 0; i < num_terms; ++i) {
const int var = ct.linear().vars(i);
if (var < 0) return false;
out_ct->add_var_index(var);
out_ct->add_coefficient(ct.linear().coeffs(i));
DCHECK_EQ(input.variables(var).domain().size(), 2);
const int64_t coeff = ct.linear().coeffs(i);
if (coeff > 0) {
min_activity += coeff * input.variables(var).domain(0);
max_activity += coeff * input.variables(var).domain(1);
} else {
min_activity += coeff * input.variables(var).domain(1);
max_activity += coeff * input.variables(var).domain(0);
}
}
if (ct.enforcement_literal().empty()) {
MPConstraintProto* out_ct = output->add_constraint();
if (min_activity < ct.linear().domain(0)) {
out_ct->set_lower_bound(ct.linear().domain(0));
} else {
out_ct->set_lower_bound(-std::numeric_limits<double>::infinity());
}
if (max_activity > ct.linear().domain(1)) {
out_ct->set_upper_bound(ct.linear().domain(1));
} else {
out_ct->set_lower_bound(std::numeric_limits<double>::infinity());
}
for (int i = 0; i < num_terms; ++i) {
const int var = ct.linear().vars(i);
if (var < 0) return false;
out_ct->add_var_index(var);
out_ct->add_coefficient(ct.linear().coeffs(i));
}
break;
}
std::vector<MPConstraintProto*> out_cts;
if (ct.linear().domain(1) < max_activity) {
MPConstraintProto* high_out_ct = output->add_constraint();
high_out_ct->set_lower_bound(
-std::numeric_limits<double>::infinity());
if (RefIsPositive(ct.enforcement_literal(0))) {
high_out_ct->add_var_index(ct.enforcement_literal(0));
high_out_ct->add_coefficient(max_activity - ct.linear().domain(1));
high_out_ct->set_upper_bound(max_activity);
} else {
// 1 - enf.
high_out_ct->add_var_index(PositiveRef(ct.enforcement_literal(0)));
high_out_ct->add_coefficient(ct.linear().domain(1) - max_activity);
high_out_ct->set_upper_bound(ct.linear().domain(1));
}
out_cts.push_back(high_out_ct);
}
if (ct.linear().domain(0) > min_activity) {
MPConstraintProto* low_out_ct = output->add_constraint();
low_out_ct->set_upper_bound(std::numeric_limits<double>::infinity());
if (RefIsPositive(ct.enforcement_literal(0))) {
low_out_ct->add_var_index(ct.enforcement_literal(0));
low_out_ct->add_coefficient(min_activity - ct.linear().domain(0));
low_out_ct->set_lower_bound(min_activity);
} else {
// 1 - enf.
low_out_ct->add_var_index(PositiveRef(ct.enforcement_literal(0)));
low_out_ct->add_coefficient(ct.linear().domain(0) - min_activity);
low_out_ct->set_lower_bound(ct.linear().domain(0));
}
out_cts.push_back(low_out_ct);
}
for (MPConstraintProto* out_ct : out_cts) {
for (int i = 0; i < num_terms; ++i) {
const int var = ct.linear().vars(i);
if (var < 0) return false;
out_ct->add_var_index(var);
out_ct->add_coefficient(ct.linear().coeffs(i));
}
}
break;
}

View File

@@ -24,7 +24,7 @@ option csharp_namespace = "Google.OrTools.Sat";
// Contains the definitions for all the sat algorithm parameters and their
// default values.
//
// NEXT TAG: 222
// NEXT TAG: 223
message SatParameters {
// In some context, like in a portfolio of search, it makes sense to name a
// given parameters set for logging purpose.
@@ -1204,6 +1204,20 @@ message SatParameters {
// domain by 2 to make it implied integer.
optional bool mip_automatically_scale_variables = 166 [default = true];
// If one try to solve a MIP model with CP-SAT, because we assume all variable
// to be integer after scaling, we will not necessarily have the correct
// optimal. Note however that all feasible solutions are valid since we will
// just solve a more restricted version of the original problem.
//
// This parameters is here to prevent user to think the solution is optimal
// when it might not be. One will need to manually set this to false to solve
// a MIP model where the optimal might be different.
//
// Note that this is tested after some MIP presolve steps, so even if not
// all original variable are integer, we might end up with a pure IP after
// presolve and after implied integer detection.
optional bool only_solve_ip = 222 [default = false];
// When scaling constraint with double coefficients to integer coefficients,
// we will multiply by a power of 2 and round the coefficients. We will choose
// the lowest power such that we have no potential overflow (see

View File

@@ -346,7 +346,7 @@ void GenerateCumulativeEnergeticCuts(
if (makespan.has_value()) {
const double energy_up_to_makespan_lp =
capacity_lp * (makespan_lp - ToDouble(window_start));
if (energy_up_to_makespan_lp < max_energy_lp) {
if (energy_up_to_makespan_lp <= max_energy_lp) {
max_energy_lp = energy_up_to_makespan_lp;
use_makespan_in_cut = true;
}

View File

@@ -454,6 +454,142 @@ void MaxBoundedSubsetSum::Add(int64_t value) {
current_max_ = bound_;
}
BasicKnapsackSolver::Result BasicKnapsackSolver::Solve(
const std::vector<Domain>& domains, const std::vector<int64_t>& coeffs,
const std::vector<int64_t>& costs, const Domain& rhs) {
const int num_vars = domains.size();
if (num_vars == 0) return {};
int64_t min_activity = 0;
int64_t max_domain_size = 0;
for (int i = 0; i < num_vars; ++i) {
max_domain_size = std::max(max_domain_size, domains[i].Size());
if (coeffs[i] > 0) {
min_activity += coeffs[i] * domains[i].Min();
} else {
min_activity += coeffs[i] * domains[i].Max();
}
}
// The complexity of our DP will depends on the number of "activity" values
// that need to be considered.
//
// TODO(user): We can also solve efficiently if max_activity - rhs.Min() is
// small. Implement.
const int64_t num_values = rhs.Max() - min_activity + 1;
if (num_values < 0) {
// Problem is clearly infeasible, we can report the result right away.
Result result;
result.solved = true;
result.infeasible = true;
return result;
}
// Abort if complexity too large.
const int64_t max_work_per_variable = std::min(num_values, max_domain_size);
if (rhs.Max() - min_activity > 1e6) return {};
if (num_vars * num_values * max_work_per_variable > 1e8) return {};
// Canonicalize to positive coeffs and non-negative variables.
domains_.clear();
coeffs_.clear();
costs_.clear();
for (int i = 0; i < num_vars; ++i) {
if (coeffs[i] > 0) {
domains_.push_back(domains[i].AdditionWith(Domain(-domains[i].Min())));
coeffs_.push_back(coeffs[i]);
costs_.push_back(costs[i]);
} else {
domains_.push_back(
domains[i].Negation().AdditionWith(Domain(domains[i].Max())));
coeffs_.push_back(-coeffs[i]);
costs_.push_back(-costs[i]);
}
}
Result result =
InternalSolve(num_values, rhs.AdditionWith(Domain(-min_activity)));
if (result.solved && !result.infeasible) {
// Transform solution back.
for (int i = 0; i < num_vars; ++i) {
if (coeffs[i] > 0) {
result.solution[i] += domains[i].Min();
} else {
result.solution[i] = domains[i].Max() - result.solution[i];
}
}
}
return result;
}
BasicKnapsackSolver::Result BasicKnapsackSolver::InternalSolve(
int64_t num_values, const Domain& rhs) {
const int num_vars = domains_.size();
// The set of DP states that we will fill.
var_activity_states_.assign(num_vars, std::vector<State>(num_values));
// Initialize with first variable.
for (const int64_t v : domains_[0].Values()) {
const int64_t value = v * coeffs_[0];
CHECK_GE(value, 0);
if (value >= num_values) break;
var_activity_states_[0][value].cost = v * costs_[0];
var_activity_states_[0][value].value = v;
}
// Fill rest of the DP states.
for (int i = 1; i < num_vars; ++i) {
const std::vector<State>& prev = var_activity_states_[i - 1];
std::vector<State>& current = var_activity_states_[i];
for (int prev_value = 0; prev_value < num_values; ++prev_value) {
if (prev[prev_value].cost == std::numeric_limits<int64_t>::max()) {
continue;
}
for (const int64_t v : domains_[i].Values()) {
const int64_t value = prev_value + v * coeffs_[i];
CHECK_GE(value, 0);
if (value >= num_values) break;
const int64_t new_cost = prev[prev_value].cost + v * costs_[i];
if (new_cost < current[value].cost) {
current[value].cost = new_cost;
current[value].value = v;
}
}
}
}
Result result;
result.solved = true;
int64_t best_cost = std::numeric_limits<int64_t>::max();
int64_t best_activity;
for (int v = 0; v < num_values; ++v) {
// TODO(user): optimize this?
if (!rhs.Contains(v)) continue;
if (var_activity_states_.back()[v].cost < best_cost) {
best_cost = var_activity_states_.back()[v].cost;
best_activity = v;
}
}
if (best_cost == std::numeric_limits<int64_t>::max()) {
result.infeasible = true;
return result;
}
// Recover the values.
result.solution.resize(num_vars);
int64_t current_activity = best_activity;
for (int i = num_vars - 1; i >= 0; --i) {
const int64_t var_value = var_activity_states_[i][current_activity].value;
result.solution[i] = var_value;
current_activity -= coeffs_[i] * var_value;
}
return result;
}
namespace {
// We will call FullyCompressTuplesRecursive() for a set of prefixes of the

View File

@@ -16,6 +16,7 @@
#include <cstdint>
#include <deque>
#include <limits>
#include <vector>
#include "ortools/base/logging.h"
@@ -31,6 +32,7 @@
#include "ortools/sat/sat_base.h"
#include "ortools/sat/sat_parameters.pb.h"
#include "ortools/util/random_engine.h"
#include "ortools/util/sorted_interval_list.h"
#include "ortools/util/time_limit.h"
namespace operations_research {
@@ -218,6 +220,47 @@ class MaxBoundedSubsetSum {
std::vector<bool> expanded_sums_;
};
// Use Dynamic programming to solve a single knapsack. This is used by the
// presolver to simplify variables appearing in a single linear constraint.
//
// Complexity is the best of
// - O(num_variables * num_relevant_values ^ 2) or
// - O(num_variables * num_relevant_values * max_domain_size).
class BasicKnapsackSolver {
public:
// Solves the problem:
// - minimize sum costs * X[i]
// - subject to sum coeffs[i] * X[i] \in rhs, with X[i] \in Domain(i).
//
// Returns:
// - (solved = false) if complexity is too high.
// - (solved = true, infeasible = true) if proven infeasible.
// - (solved = true, infeasible = false, solution) otherwise.
struct Result {
bool solved = false;
bool infeasible = false;
std::vector<int64_t> solution;
};
Result Solve(const std::vector<Domain>& domains,
const std::vector<int64_t>& coeffs,
const std::vector<int64_t>& costs, const Domain& rhs);
private:
Result InternalSolve(int64_t num_values, const Domain& rhs);
// Canonicalized version.
std::vector<Domain> domains_;
std::vector<int64_t> coeffs_;
std::vector<int64_t> costs_;
// We only need to keep one state with the same activity.
struct State {
int64_t cost = std::numeric_limits<int64_t>::max();
int64_t value = 0;
};
std::vector<std::vector<State>> var_activity_states_;
};
// Manages incremental averages.
class IncrementalAverage {
public:

View File

@@ -21,6 +21,7 @@
#include <limits>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/strings/str_cat.h"
@@ -641,16 +642,60 @@ void DualBoundStrengthening::ProcessLinearConstraint(
namespace {
ConstraintProto CopyConstraintForDuplicateDetection(const ConstraintProto& ct) {
ConstraintProto copy = ct;
copy.clear_name();
copy.mutable_enforcement_literal()->Clear();
// This is used to detect if two linear constraint are equivalent if the literal
// ref is mapped to another value.
ConstraintProto CopyLinearWithSpecialBoolean(const ConstraintProto& ct,
int ref) {
DCHECK_EQ(ct.constraint_case(), ConstraintProto::kLinear);
ConstraintProto copy;
// Deal with enforcement.
bool in_enforcement = false;
for (const int literal : ct.enforcement_literal()) {
if (literal == NegatedRef(ref)) {
in_enforcement = true;
continue;
}
copy.add_enforcement_literal(literal);
}
if (in_enforcement) {
// We add a sentinel at the end
copy.add_enforcement_literal(std::numeric_limits<int32_t>::max());
}
// Deal with linear part.
int64_t coeff = 0;
int64_t offset = 0;
for (int i = 0; i < ct.linear().vars().size(); ++i) {
const int v = ct.linear().vars(i);
const int64_t c = ct.linear().coeffs(i);
if (v == ref) {
coeff += c;
} else if (v == NegatedRef(ref)) {
// c * v = -c * (1 - v) + c
offset += c;
coeff -= c;
} else {
copy.mutable_linear()->add_vars(v);
copy.mutable_linear()->add_coeffs(c);
}
}
if (coeff != 0) {
copy.mutable_linear()->add_vars(std::numeric_limits<int32_t>::max());
copy.mutable_linear()->add_coeffs(coeff);
}
FillDomainInProto(
ReadDomainFromProto(ct.linear()).AdditionWith(Domain(-offset)),
copy.mutable_linear());
CHECK(in_enforcement || coeff != 0);
return copy;
}
} // namespace
bool DualBoundStrengthening::Strengthen(PresolveContext* context) {
num_deleted_constraints_ = 0;
const CpModelProto& cp_model = *context->working_model;
const int num_vars = cp_model.variables_size();
for (int var = 0; var < num_vars; ++var) {
@@ -715,10 +760,11 @@ bool DualBoundStrengthening::Strengthen(PresolveContext* context) {
}
}
// For detecting duplicate enforced constraint that can be made equivalent.
// For detecting near-duplicate constraint that can be made equivalent.
// hash -> (ct_index, modified ref).
absl::flat_hash_map<uint64_t, std::pair<int, int>> equiv_modified_constraints;
ConstraintProto copy;
std::string s;
absl::flat_hash_map<uint64_t, int> equiv_constraints;
// If there is only one blocking constraint, we can simplify the problem in
// a few situation.
@@ -739,6 +785,10 @@ bool DualBoundStrengthening::Strengthen(PresolveContext* context) {
const int ct_index = locking_ct_index_[var];
const ConstraintProto& ct = context->working_model->constraints(ct_index);
if (ct.constraint_case() == ConstraintProto::CONSTRAINT_NOT_SET) {
// TODO(user): Fix variable right away rather than waiting for next call.
continue;
}
if (ct.constraint_case() == ConstraintProto::kAtMostOne) {
context->UpdateRuleStats("TODO dual: tighten at most one");
continue;
@@ -815,39 +865,54 @@ bool DualBoundStrengthening::Strengthen(PresolveContext* context) {
// We can add an implication not_enforced => var to its bound ?
context->UpdateRuleStats("TODO dual: add implied bound");
} else if (!ct.enforcement_literal().empty()) {
// If this is a duplicate of another enforced constraint with an unique
// blocking enforcement literal, we can make the two Booleans
// equivalent!
//
// TODO(user): deal with other enforcement literals if any.
if (ct.enforcement_literal(0) == NegatedRef(ref)) {
copy = CopyConstraintForDuplicateDetection(ct);
s = copy.SerializeAsString();
const uint64_t hash = absl::Hash<std::string>()(s);
const auto [it, inserted] =
equiv_constraints.insert({hash, ct_index});
if (!inserted) {
// Already present!
const int other_c_with_same_hash = it->second;
const auto& other_ct =
context->working_model->constraints(other_c_with_same_hash);
copy = CopyConstraintForDuplicateDetection(other_ct);
if (s == copy.SerializeAsString()) {
const int a = ct.enforcement_literal(0);
const int b = other_ct.enforcement_literal(0);
if (!processed[PositiveRef(b)]) {
context->UpdateRuleStats(
"dual: equivalent enforcement for duplicate constraints");
processed[PositiveRef(a)] = true;
processed[PositiveRef(b)] = true;
context->StoreBooleanEqualityRelation(a, b);
continue;
}
}
// If We have two Booleans with a blocking constraint that differ just
// on them, we can make the Boolean equivalent. This is because they
// will be forced to their bad value only if it is needed for that
// constraint.
//
// TODO(user): Generalize to non-Boolean. Also for Boolean, we might
// miss some possible reduction if replacing X by 1 - X make a constraint
// near-duplicate of another.
//
// TODO(user): We can generalize to non-linear constraint.
if (ct.constraint_case() == ConstraintProto::kLinear &&
context->CanBeUsedAsLiteral(ref)) {
copy = CopyLinearWithSpecialBoolean(ct, ref);
s = copy.SerializeAsString();
const uint64_t hash = absl::Hash<std::string>()(s);
const auto [it, inserted] =
equiv_modified_constraints.insert({hash, {ct_index, ref}});
if (!inserted) {
// Already present!
const auto [other_c_with_same_hash, other_ref] = it->second;
CHECK_NE(other_c_with_same_hash, ct_index);
const auto& other_ct =
context->working_model->constraints(other_c_with_same_hash);
copy = CopyLinearWithSpecialBoolean(other_ct, other_ref);
if (s == copy.SerializeAsString()) {
// We have a true equality. The two ref can be made equivalent.
if (!processed[PositiveRef(other_ref)]) {
context->UpdateRuleStats(
"dual: equivalent Boolean in near-duplicate constraints");
processed[PositiveRef(ref)] = true;
processed[PositiveRef(other_ref)] = true;
context->StoreBooleanEqualityRelation(ref, other_ref);
// We can delete one of the constraint since they are duplicate
// now.
++num_deleted_constraints_;
context->working_model->mutable_constraints(ct_index)->Clear();
context->UpdateConstraintVariableUsage(ct_index);
continue;
}
}
}
}
// Other potential cases?
if (!ct.enforcement_literal().empty()) {
// We can make enf equivalent to the constraint instead of just =>.
// This might also be useful for encoding if vars(0) is not a literal.
if (ct.constraint_case() == ConstraintProto::kLinear &&
@@ -908,6 +973,7 @@ bool DualBoundStrengthening::Strengthen(PresolveContext* context) {
context->UpdateRuleStats("dual: enforced equivalence");
}
VLOG(2) << "Num deleted constraints: " << num_deleted_constraints_;
return true;
}

View File

@@ -253,6 +253,9 @@ class DualBoundStrengthening {
return can_freely_decrease_until_[RefToIntegerVariable(ref)].value();
}
// Reset on each Strengthen() call.
int NumDeletedConstraints() const { return num_deleted_constraints_; }
private:
// We encode proto ref as IntegerVariable for indexing vectors.
static IntegerVariable RefToIntegerVariable(int ref) {
@@ -270,6 +273,8 @@ class DualBoundStrengthening {
// If num_locks_[var] == 1, this will be the unique constraint that block var
// in this direction. Note that it can be set to -1 if this wasn't recorded.
absl::StrongVector<IntegerVariable, int64_t> locking_ct_index_;
int num_deleted_constraints_ = 0;
};
// Detect the variable dominance relations within the given model. Note that