[CP-SAT] reduce memory consumptions for large linear models; fix #3965

This commit is contained in:
Laurent Perron
2023-11-07 14:07:11 +01:00
parent 0198fcbdb9
commit 037a0a8f3e
4 changed files with 125 additions and 102 deletions

View File

@@ -2229,40 +2229,46 @@ bool CpModelPresolver::PresolveLinearEqualityWithModulo(ConstraintProto* ct) {
}
bool CpModelPresolver::PresolveLinearOfSizeOne(ConstraintProto* ct) {
DCHECK_EQ(ct->linear().vars().size(), 1);
CHECK_EQ(ct->linear().vars().size(), 1);
CHECK(RefIsPositive(ct->linear().vars(0)));
const int var = ct->linear().vars(0);
const Domain var_domain = context_->DomainOf(var);
const Domain rhs = ReadDomainFromProto(ct->linear())
.InverseMultiplicationBy(ct->linear().coeffs(0))
.IntersectionWith(var_domain);
if (rhs.IsEmpty()) {
context_->UpdateRuleStats("linear1: infeasible");
return MarkConstraintAsFalse(ct);
}
if (rhs == context_->DomainOf(var)) {
context_->UpdateRuleStats("linear1: always true");
return RemoveConstraint(ct);
}
// We can always canonicalize the constraint to a coefficient of 1.
// Note that this should never trigger as we usually divide by gcd already.
if (ct->linear().coeffs(0) != 1) {
context_->UpdateRuleStats("linear1: canonicalized");
ct->mutable_linear()->set_coeffs(0, 1);
FillDomainInProto(rhs, ct->mutable_linear());
}
// Size one constraint with no enforcement?
if (!HasEnforcementLiteral(*ct)) {
const int64_t coeff = RefIsPositive(ct->linear().vars(0))
? ct->linear().coeffs(0)
: -ct->linear().coeffs(0);
context_->UpdateRuleStats("linear1: without enforcement");
const int var = PositiveRef(ct->linear().vars(0));
const Domain rhs = ReadDomainFromProto(ct->linear());
if (!context_->IntersectDomainWith(var,
rhs.InverseMultiplicationBy(coeff))) {
return false;
}
if (!context_->IntersectDomainWith(var, rhs)) return false;
return RemoveConstraint(ct);
}
// This is just an implication, lets convert it right away.
if (context_->CanBeUsedAsLiteral(ct->linear().vars(0))) {
const Domain rhs = ReadDomainFromProto(ct->linear());
const bool zero_ok = rhs.Contains(0);
const bool one_ok = rhs.Contains(ct->linear().coeffs(0));
context_->UpdateRuleStats("linear1: is boolean implication");
if (!zero_ok && !one_ok) {
return MarkConstraintAsFalse(ct);
}
if (zero_ok && one_ok) {
return RemoveConstraint(ct);
}
const int ref = ct->linear().vars(0);
if (zero_ok) {
ct->mutable_bool_and()->add_literals(NegatedRef(ref));
if (context_->CanBeUsedAsLiteral(var)) {
DCHECK(rhs.IsFixed());
if (rhs.FixedValue() == 1) {
ct->mutable_bool_and()->add_literals(var);
} else {
ct->mutable_bool_and()->add_literals(ref);
CHECK_EQ(rhs.FixedValue(), 0);
ct->mutable_bool_and()->add_literals(NegatedRef(var));
}
// No var <-> constraint graph changes.
@@ -2270,20 +2276,67 @@ bool CpModelPresolver::PresolveLinearOfSizeOne(ConstraintProto* ct) {
return true;
}
// Detect encoding.
if (ct->enforcement_literal().size() == 1) {
// If we already have an encoding literal, this constraint is really
// an implication.
const int lit = ct->enforcement_literal(0);
if (rhs.IsFixed()) {
const int64_t value = rhs.FixedValue();
int encoding_lit;
if (context_->HasVarValueEncoding(var, value, &encoding_lit)) {
if (lit == encoding_lit) return false;
context_->AddImplication(lit, encoding_lit);
context_->UpdateNewConstraintsVariableUsage();
ct->Clear();
context_->UpdateRuleStats("linear1: transformed to implication");
return true;
} else {
if (context_->StoreLiteralImpliesVarEqValue(lit, var, value)) {
// The domain is not actually modified, but we want to rescan the
// constraints linked to this variable.
context_->modified_domains.Set(var);
}
context_->UpdateNewConstraintsVariableUsage();
}
return false;
}
const Domain complement = rhs.Complement().IntersectionWith(var_domain);
if (complement.IsFixed()) {
const int64_t value = complement.FixedValue();
int encoding_lit;
if (context_->HasVarValueEncoding(var, value, &encoding_lit)) {
if (NegatedRef(lit) == encoding_lit) return false;
context_->AddImplication(lit, NegatedRef(encoding_lit));
context_->UpdateNewConstraintsVariableUsage();
ct->Clear();
context_->UpdateRuleStats("linear1: transformed to implication");
return true;
} else {
if (context_->StoreLiteralImpliesVarNEqValue(lit, var, value)) {
// The domain is not actually modified, but we want to rescan the
// constraints linked to this variable.
context_->modified_domains.Set(var);
}
context_->UpdateNewConstraintsVariableUsage();
}
return false;
}
}
// If the constraint is literal => x in domain and x = abs(abs_arg), we can
// replace x by abs_arg and hopefully remove the variable x later.
//
// Tricky: Note that we do not do that for "encoding" constraints as this
// can be problematic if it is referenced by HasVarValueEncoding().
int abs_arg;
if (ct->linear().coeffs(0) == 1 &&
context_->GetAbsRelation(ct->linear().vars(0), &abs_arg) &&
PositiveRef(ct->linear().vars(0)) != abs_arg) {
if (context_->GetAbsRelation(var, &abs_arg) && var != abs_arg) {
DCHECK(RefIsPositive(abs_arg));
// TODO(user): Deal with coeff = -1, here or during canonicalization.
context_->UpdateRuleStats("linear1: remove abs from abs(x) in domain");
const Domain implied_abs_target_domain =
ReadDomainFromProto(ct->linear())
.IntersectionWith({0, std::numeric_limits<int64_t>::max()})
.IntersectionWith(context_->DomainOf(ct->linear().vars(0)));
rhs.IntersectionWith({0, std::numeric_limits<int64_t>::max()});
if (implied_abs_target_domain.IsEmpty()) {
return MarkConstraintAsFalse(ct);
}
@@ -2292,7 +2345,6 @@ bool CpModelPresolver::PresolveLinearOfSizeOne(ConstraintProto* ct) {
implied_abs_target_domain
.UnionWith(implied_abs_target_domain.Negation())
.IntersectionWith(context_->DomainOf(abs_arg));
if (new_abs_var_domain.IsEmpty()) {
return MarkConstraintAsFalse(ct);
}
@@ -2305,68 +2357,6 @@ bool CpModelPresolver::PresolveLinearOfSizeOne(ConstraintProto* ct) {
return true;
}
// Detect encoding.
if (ct->enforcement_literal_size() != 1) return false;
// If we already have an encoding literal, this constraint is really
// an implication.
const int lit = ct->enforcement_literal(0);
const int var = ct->linear().vars(0);
const Domain var_domain = context_->DomainOf(var);
const Domain rhs = ReadDomainFromProto(ct->linear())
.InverseMultiplicationBy(ct->linear().coeffs(0))
.IntersectionWith(var_domain);
if (rhs.IsEmpty()) {
context_->UpdateRuleStats("linear1: infeasible");
return MarkConstraintAsFalse(ct);
}
if (rhs == var_domain) {
context_->UpdateRuleStats("linear1: always true");
return RemoveConstraint(ct);
}
if (rhs.IsFixed()) {
const int64_t value = rhs.FixedValue();
int encoding_lit;
if (context_->HasVarValueEncoding(var, value, &encoding_lit)) {
if (lit == encoding_lit) return false;
context_->AddImplication(lit, encoding_lit);
context_->UpdateNewConstraintsVariableUsage();
ct->Clear();
context_->UpdateRuleStats("linear1: transformed to implication");
return true;
} else {
if (context_->StoreLiteralImpliesVarEqValue(lit, var, value)) {
// The domain is not actually modified, but we want to rescan the
// constraints linked to this variable.
context_->modified_domains.Set(var);
}
context_->UpdateNewConstraintsVariableUsage();
}
return false;
}
const Domain complement = rhs.Complement().IntersectionWith(var_domain);
if (complement.IsFixed()) {
const int64_t value = complement.FixedValue();
int encoding_lit;
if (context_->HasVarValueEncoding(var, value, &encoding_lit)) {
if (NegatedRef(lit) == encoding_lit) return false;
context_->AddImplication(lit, NegatedRef(encoding_lit));
context_->UpdateNewConstraintsVariableUsage();
ct->Clear();
context_->UpdateRuleStats("linear1: transformed to implication");
return true;
} else {
if (context_->StoreLiteralImpliesVarNEqValue(lit, var, value)) {
// The domain is not actually modified, but we want to rescan the
// constraints linked to this variable.
context_->modified_domains.Set(var);
}
context_->UpdateNewConstraintsVariableUsage();
}
}
return false;
}

View File

@@ -625,6 +625,7 @@ void LinearProgrammingConstraint::SetLevel(int level) {
// Get rid of all optimal constraint each time we go back to level zero.
if (level == 0) rev_optimal_constraints_size_ = 0;
optimal_constraints_.resize(rev_optimal_constraints_size_);
cumulative_optimal_constraint_sizes_.resize(rev_optimal_constraints_size_);
if (lp_solution_is_set_ && level < lp_solution_level_) {
lp_solution_is_set_ = false;
}
@@ -652,6 +653,26 @@ void LinearProgrammingConstraint::AddCutGenerator(CutGenerator generator) {
bool LinearProgrammingConstraint::IncrementalPropagate(
const std::vector<int>& watch_indices) {
// If we have a really deep branch, with a lot of LP explanation constraint,
// we could take a quadratic amount of memory: O(num_var) per number of
// propagation in that branch. To avoid that, once the memory starts to be
// over a few GB, we only propagate from time to time. This way we do not need
// to keep that many constraints around.
//
// Note that 100 Millions int32_t variables, with the int128 coefficients and
// extra propagation vector is already a few GB.
if (!cumulative_optimal_constraint_sizes_.empty()) {
const double current_size =
static_cast<double>(cumulative_optimal_constraint_sizes_.back());
const double low_limit = 1e7;
if (current_size > low_limit) {
// We only propagate if we use less that 100 times the number of current
// integer literal enqueued.
const double num_enqueues = static_cast<double>(integer_trail_->Index());
if ((current_size - low_limit) > 100 * num_enqueues) return true;
}
}
if (!lp_solution_is_set_) {
return Propagate();
}
@@ -2213,7 +2234,13 @@ bool LinearProgrammingConstraint::PropagateLpConstraint(
const int64_t stamp = integer_trail_->num_enqueues();
const bool no_conflict = cp_constraint->Propagate();
if (no_conflict && integer_trail_->num_enqueues() == stamp) return true;
const int64_t current_size =
cumulative_optimal_constraint_sizes_.empty()
? 0
: cumulative_optimal_constraint_sizes_.back();
optimal_constraints_.push_back(std::move(cp_constraint));
cumulative_optimal_constraint_sizes_.push_back(current_size + ct.vars.size());
rev_optimal_constraints_size_ = optimal_constraints_.size();
return no_conflict;
}

View File

@@ -531,6 +531,7 @@ class LinearProgrammingConstraint : public PropagatorInterface,
// fixing.
int rev_optimal_constraints_size_ = 0;
std::vector<std::unique_ptr<IntegerSumLE128>> optimal_constraints_;
std::vector<int64_t> cumulative_optimal_constraint_sizes_;
// Last OPTIMAL solution found by a call to the underlying LP solver.
// On IncrementalPropagate(), if the bound updates do not invalidate this

View File

@@ -1111,7 +1111,8 @@ bool PresolveContext::StoreAbsRelation(int target_ref, int ref) {
if (!insert_status.second) {
// Tricky: overwrite if the old value refer to a now unused variable.
const int candidate = insert_status.first->second.Get();
if (removed_variables_.contains(candidate)) {
if (removed_variables_.contains(candidate) ||
GetAffineRelation(candidate).representative != candidate) {
insert_status.first->second = SavedVariable(PositiveRef(ref));
return true;
}
@@ -1121,16 +1122,20 @@ bool PresolveContext::StoreAbsRelation(int target_ref, int ref) {
}
bool PresolveContext::GetAbsRelation(int target_ref, int* ref) {
// This is currently only called with representative and positive ref.
// It is important to keep it like this.
CHECK(RefIsPositive(target_ref));
CHECK_EQ(GetAffineRelation(target_ref).representative, target_ref);
auto it = abs_relations_.find(target_ref);
if (it == abs_relations_.end()) return false;
// Tricky: In some rare case the stored relation can refer to a deleted
// variable, so we need to ignore it.
//
// TODO(user): Incorporate this as part of SavedVariable/SavedLiteral so we
// make sure we never forget about this.
// variable, so we need to ignore it. We also ignore any relation involving
// a variable that has a different affine representatitive.
const int candidate = PositiveRef(it->second.Get());
if (removed_variables_.contains(candidate)) {
if (removed_variables_.contains(candidate) ||
GetAffineRelation(candidate).representative != candidate) {
abs_relations_.erase(it);
return false;
}