more dual presolve in CP-SAT

This commit is contained in:
Laurent Perron
2021-01-14 10:32:45 +01:00
parent 3482577a04
commit 692b590128
5 changed files with 175 additions and 49 deletions

View File

@@ -4908,7 +4908,8 @@ void CpModelPresolver::PresolveToFixPoint() {
if (context_->ModelIsUnsat()) return;
// Detect & exploit dominance between variables, or variables that can move
// freely in one direction.
// freely in one direction. Or variables that are just blocked by one
// constraint in one direction.
//
// TODO(user): We can support assumptions but we need to not cut them out of
// the feasible region.
@@ -4918,6 +4919,11 @@ void CpModelPresolver::PresolveToFixPoint() {
DualBoundStrengthening dual_bound_strengthening;
DetectDominanceRelations(*context_, &var_dom, &dual_bound_strengthening);
if (!dual_bound_strengthening.Strengthen(context_)) return;
// TODO(user): The Strengthen() function above might make some inequality
// tight. Currently, because we only do that for implication, this will not
// change who dominate who, but in general we should process the new
// constraint direction before calling this.
if (!ExploitDominanceRelations(var_dom, context_)) return;
}

View File

@@ -47,9 +47,6 @@ using glop::ColIndex;
using glop::Fractional;
using glop::RowIndex;
const double LinearProgrammingConstraint::kCpEpsilon = 1e-4;
const double LinearProgrammingConstraint::kLpEpsilon = 1e-6;
void ScatteredIntegerVector::ClearAndResize(int size) {
if (is_sparse_) {
for (const glop::ColIndex col : non_zeros_) {
@@ -955,12 +952,21 @@ bool LinearProgrammingConstraint::PostprocessAndAddCut(
extra_info);
}
// TODO(user): This can be still too slow on some problems like
// 30_70_45_05_100.mps.gz. Not this actual function, but the set of computation
// it triggers. We should add heuristics to abort earlier if a cut is not
// promising. Or only test a few positions and not all rows.
void LinearProgrammingConstraint::AddCGCuts() {
const RowIndex num_rows = lp_data_.num_constraints();
std::vector<std::pair<RowIndex, double>> lp_multipliers;
std::vector<std::pair<RowIndex, IntegerValue>> integer_multipliers;
for (RowIndex row(0); row < num_rows; ++row) {
ColIndex basis_col = simplex_.GetBasis(row);
const Fractional lp_value = GetVariableValueAtCpScale(basis_col);
// Only consider fractional basis element. We ignore element that are close
// to an integer to reduce the amount of positions we try.
//
// TODO(user): We could just look at the diff with std::floor() in the hope
// that when we are just under an integer, the exact computation below will
// also be just under it.
@@ -972,29 +978,43 @@ void LinearProgrammingConstraint::AddCGCuts() {
if (time_limit_->LimitReached()) break;
const glop::ScatteredRow& lambda = simplex_.GetUnitRowLeftInverse(row);
glop::DenseColumn lp_multipliers(num_rows, 0.0);
// TODO(user): Avoid code duplication between the sparse/dense path.
double magnitude = 0.0;
int num_non_zeros = 0;
for (RowIndex row(0); row < num_rows; ++row) {
lp_multipliers[row] = lambda.values[glop::RowToColIndex(row)];
if (std::abs(lp_multipliers[row]) < 1e-12) {
lp_multipliers[row] = 0.0;
continue;
}
lp_multipliers.clear();
const glop::ScatteredRow& lambda = simplex_.GetUnitRowLeftInverse(row);
if (lambda.non_zeros.empty()) {
for (RowIndex row(0); row < num_rows; ++row) {
const double value = lambda.values[glop::RowToColIndex(row)];
if (std::abs(value) < kZeroTolerance) continue;
// There should be no BASIC status, but they could be imprecision
// in the GetUnitRowLeftInverse() code? not sure, so better be safe.
const auto status = simplex_.GetConstraintStatus(row);
if (status == glop::ConstraintStatus::BASIC) {
VLOG(1) << "BASIC row not expected! " << lp_multipliers[row];
lp_multipliers[row] = 0.0;
}
// There should be no BASIC status, but they could be imprecision
// in the GetUnitRowLeftInverse() code? not sure, so better be safe.
const auto status = simplex_.GetConstraintStatus(row);
if (status == glop::ConstraintStatus::BASIC) {
VLOG(1) << "BASIC row not expected! " << value;
continue;
}
magnitude = std::max(magnitude, std::abs(lp_multipliers[row]));
if (lp_multipliers[row] != 0.0) ++num_non_zeros;
magnitude = std::max(magnitude, std::abs(value));
lp_multipliers.push_back({row, value});
}
} else {
for (const ColIndex col : lambda.non_zeros) {
const RowIndex row = glop::ColToRowIndex(col);
const double value = lambda.values[col];
if (std::abs(value) < kZeroTolerance) continue;
const auto status = simplex_.GetConstraintStatus(row);
if (status == glop::ConstraintStatus::BASIC) {
VLOG(1) << "BASIC row not expected! " << value;
continue;
}
magnitude = std::max(magnitude, std::abs(value));
lp_multipliers.push_back({row, value});
}
}
if (num_non_zeros == 0) continue;
if (lp_multipliers.empty()) continue;
Fractional scaling;
for (int i = 0; i < 2; ++i) {
@@ -1003,14 +1023,14 @@ void LinearProgrammingConstraint::AddCGCuts() {
//
// TODO(user): Maybe add an heuristic to know beforehand which sign to
// use?
for (RowIndex row(0); row < num_rows; ++row) {
lp_multipliers[row] = -lp_multipliers[row];
for (std::pair<RowIndex, double>& p : lp_multipliers) {
p.second = -p.second;
}
}
// TODO(user): We use a lower value here otherwise we might run into
// overflow while computing the cut. This should be fixable.
const std::vector<std::pair<RowIndex, IntegerValue>> integer_multipliers =
integer_multipliers =
ScaleLpMultiplier(/*take_objective_into_account=*/false,
lp_multipliers, &scaling, /*max_pow=*/52);
AddCutFromConstraints("CG", integer_multipliers);
@@ -1750,20 +1770,20 @@ void LinearProgrammingConstraint::SetImpliedLowerBoundReason(
integer_trail_->RemoveLevelZeroBounds(&integer_reason_);
}
// TODO(user): Provide a sparse interface.
std::vector<std::pair<RowIndex, IntegerValue>>
LinearProgrammingConstraint::ScaleLpMultiplier(
bool take_objective_into_account,
const glop::DenseColumn& dense_lp_multipliers, Fractional* scaling,
int max_pow) const {
const std::vector<std::pair<RowIndex, double>>& lp_multipliers,
Fractional* scaling, int max_pow) const {
double max_sum = 0.0;
std::vector<std::pair<RowIndex, Fractional>> cp_multipliers;
for (RowIndex row(0); row < dense_lp_multipliers.size(); ++row) {
const Fractional lp_multi = dense_lp_multipliers[row];
tmp_cp_multipliers_.clear();
for (const std::pair<RowIndex, double>& p : lp_multipliers) {
const RowIndex row = p.first;
const Fractional lp_multi = p.second;
// We ignore small values since these are likely errors and will not
// contribute much to the new lp constraint anyway.
if (std::abs(lp_multi) < 1e-12) continue;
if (std::abs(lp_multi) < kZeroTolerance) continue;
// Remove trivial bad cases.
//
@@ -1779,7 +1799,7 @@ LinearProgrammingConstraint::ScaleLpMultiplier(
}
const Fractional cp_multi = scaler_.UnscaleDualValue(row, lp_multi);
cp_multipliers.push_back({row, cp_multi});
tmp_cp_multipliers_.push_back({row, cp_multi});
max_sum += ToDouble(infinity_norms_[row]) * std::abs(cp_multi);
}
@@ -1809,7 +1829,7 @@ LinearProgrammingConstraint::ScaleLpMultiplier(
// Scale the multipliers by *scaling.
//
// TODO(user): Maybe use int128 to avoid overflow?
for (const auto entry : cp_multipliers) {
for (const auto entry : tmp_cp_multipliers_) {
const IntegerValue coeff(std::round(entry.second * (*scaling)));
if (coeff != 0) integer_multipliers.push_back({entry.first, coeff});
}
@@ -2015,9 +2035,11 @@ bool LinearProgrammingConstraint::ExactLpReasonning() {
//
// TODO(user): Provide and use a sparse API in Glop to get the duals.
const RowIndex num_rows = simplex_.GetProblemNumRows();
glop::DenseColumn lp_multipliers(num_rows);
std::vector<std::pair<RowIndex, double>> lp_multipliers;
for (RowIndex row(0); row < num_rows; ++row) {
lp_multipliers[row] = -simplex_.GetDualValue(row);
const double value = -simplex_.GetDualValue(row);
if (std::abs(value) < kZeroTolerance) continue;
lp_multipliers.push_back({row, value});
}
Fractional scaling;
@@ -2073,9 +2095,16 @@ bool LinearProgrammingConstraint::ExactLpReasonning() {
bool LinearProgrammingConstraint::FillExactDualRayReason() {
Fractional scaling;
const glop::DenseColumn ray = simplex_.GetDualRay();
std::vector<std::pair<RowIndex, double>> lp_multipliers;
for (RowIndex row(0); row < ray.size(); ++row) {
const double value = ray[row];
if (std::abs(value) < kZeroTolerance) continue;
lp_multipliers.push_back({row, value});
}
std::vector<std::pair<RowIndex, IntegerValue>> integer_multipliers =
ScaleLpMultiplier(/*take_objective_into_account=*/false,
simplex_.GetDualRay(), &scaling);
ScaleLpMultiplier(/*take_objective_into_account=*/false, lp_multipliers,
&scaling);
IntegerValue new_constraint_ub;
if (!ComputeNewLinearConstraint(integer_multipliers, &tmp_scattered_vector_,

View File

@@ -289,8 +289,8 @@ class LinearProgrammingConstraint : public PropagatorInterface,
// will still be exact as it will work for any set of multiplier.
std::vector<std::pair<glop::RowIndex, IntegerValue>> ScaleLpMultiplier(
bool take_objective_into_account,
const glop::DenseColumn& dense_lp_multipliers, glop::Fractional* scaling,
int max_pow = 62) const;
const std::vector<std::pair<glop::RowIndex, double>>& lp_multipliers,
glop::Fractional* scaling, int max_pow = 62) const;
// Computes from an integer linear combination of the integer rows of the LP a
// new constraint of the form "sum terms <= upper_bound". All computation are
@@ -372,10 +372,14 @@ class LinearProgrammingConstraint : public PropagatorInterface,
// This epsilon is related to the precision of the value/reduced_cost returned
// by the LP once they have been scaled back into the CP domain. So for large
// domain or cost coefficient, we may have some issues.
static const double kCpEpsilon;
static constexpr double kCpEpsilon = 1e-4;
// Same but at the LP scale.
static const double kLpEpsilon;
static constexpr double kLpEpsilon = 1e-6;
// Anything coming from the LP with a magnitude below that will be assumed to
// be zero.
static constexpr double kZeroTolerance = 1e-12;
// Class responsible for managing all possible constraints that may be part
// of the LP.
@@ -416,6 +420,9 @@ class LinearProgrammingConstraint : public PropagatorInterface,
std::vector<glop::RowIndex> tmp_slack_rows_;
std::vector<IntegerValue> tmp_slack_bounds_;
// Used by ScaleLpMultiplier().
mutable std::vector<std::pair<glop::RowIndex, double>> tmp_cp_multipliers_;
// Structures used for mirroring IntegerVariables inside the underlying LP
// solver: an integer variable var is mirrored by mirror_lp_variable_[var].
// Note that these indices are dense in [0, mirror_lp_variable_.size()] so

View File

@@ -514,17 +514,24 @@ std::string VarDomination::DominationDebugString(IntegerVariable var) const {
return result;
}
void DualBoundStrengthening::CannotDecrease(absl::Span<const int> refs) {
// TODO(user): No need to set locking_ct_index_[var] if num_locks_[var] > 1
void DualBoundStrengthening::CannotDecrease(absl::Span<const int> refs,
int ct_index) {
for (const int ref : refs) {
const IntegerVariable var = RefToIntegerVariable(ref);
can_freely_decrease_until_[var] = kMaxIntegerValue;
num_locks_[var]++;
locking_ct_index_[var] = ct_index;
}
}
void DualBoundStrengthening::CannotIncrease(absl::Span<const int> refs) {
void DualBoundStrengthening::CannotIncrease(absl::Span<const int> refs,
int ct_index) {
for (const int ref : refs) {
const IntegerVariable var = RefToIntegerVariable(ref);
can_freely_decrease_until_[NegationOf(var)] = kMaxIntegerValue;
num_locks_[NegationOf(var)]++;
locking_ct_index_[NegationOf(var)] = ct_index;
}
}
@@ -533,6 +540,8 @@ void DualBoundStrengthening::CannotMove(absl::Span<const int> refs) {
const IntegerVariable var = RefToIntegerVariable(ref);
can_freely_decrease_until_[var] = kMaxIntegerValue;
can_freely_decrease_until_[NegationOf(var)] = kMaxIntegerValue;
num_locks_[var]++;
num_locks_[NegationOf(var)]++;
}
}
@@ -558,6 +567,7 @@ void DualBoundStrengthening::ProcessLinearConstraint(
// lb side.
if (min_activity < lb_limit) {
num_locks_[var]++;
if (min_activity + term_diff < lb_limit) {
can_freely_decrease_until_[var] = kMaxIntegerValue;
} else {
@@ -572,12 +582,14 @@ void DualBoundStrengthening::ProcessLinearConstraint(
if (is_objective) {
// We never want to increase the objective value.
num_locks_[NegationOf(var)]++;
can_freely_decrease_until_[NegationOf(var)] = kMaxIntegerValue;
continue;
}
// ub side.
if (max_activity > ub_limit) {
num_locks_[NegationOf(var)]++;
if (max_activity - term_diff > ub_limit) {
can_freely_decrease_until_[NegationOf(var)] = kMaxIntegerValue;
} else {
@@ -652,6 +664,62 @@ bool DualBoundStrengthening::Strengthen(PresolveContext* context) {
CHECK(context->IntersectDomainWith(var, Domain(new_lb, new_ub)));
}
}
// If (a => b) is the only constraint blocking a literal a in the up
// direction, then we can set a == b !
//
// TODO(user): We can deal with more general situation. For instance an at
// most one that is the only blocking constraint can become an exactly one.
std::vector<bool> processed(num_vars, false);
for (int positive_ref = 0; positive_ref < num_vars; ++positive_ref) {
if (processed[positive_ref]) continue;
if (context->IsFixed(positive_ref)) continue;
const IntegerVariable var = RefToIntegerVariable(positive_ref);
int ct_index = -1;
if (num_locks_[var] == 1 && locking_ct_index_[var] != -1) {
ct_index = locking_ct_index_[var];
} else if (num_locks_[NegationOf(var)] == 1 &&
locking_ct_index_[NegationOf(var)] != -1) {
ct_index = locking_ct_index_[NegationOf(var)];
} else {
continue;
}
const ConstraintProto& ct = context->working_model->constraints(ct_index);
if (ct.constraint_case() != ConstraintProto::kBoolAnd) continue;
if (ct.enforcement_literal().size() != 1) continue;
// Recover a => b where a is having an unique up_lock (i.e this constraint).
// Note that if many implications are encoded in the same bool_and, we have
// to be careful that a is appearing in just one of them.
int a = ct.enforcement_literal(0);
int b = 1;
if (PositiveRef(a) == positive_ref &&
num_locks_[RefToIntegerVariable(NegatedRef(a))] == 1) {
// Here, we can only add the equivalence if the literal is the only
// on the lhs, otherwise there is actually more lock.
if (ct.bool_and().literals().size() != 1) continue;
b = ct.bool_and().literals(0);
} else {
bool found = false;
b = NegatedRef(ct.enforcement_literal(0));
for (const int lhs : ct.bool_and().literals()) {
if (PositiveRef(lhs) == positive_ref &&
num_locks_[RefToIntegerVariable(lhs)] == 1) {
found = true;
a = NegatedRef(lhs);
break;
}
}
CHECK(found);
}
CHECK_EQ(num_locks_[RefToIntegerVariable(NegatedRef(a))], 1);
processed[PositiveRef(a)] = true;
processed[PositiveRef(b)] = true;
context->StoreBooleanEqualityRelation(a, b);
context->UpdateRuleStats("dual: enforced equivalence");
}
return true;
}
@@ -724,7 +792,7 @@ void DetectDominanceRelations(
for (int c = 0; c < num_constraints; ++c) {
const ConstraintProto& ct = cp_model.constraints(c);
if (phase == 0) {
dual_bound_strengthening->CannotIncrease(ct.enforcement_literal());
dual_bound_strengthening->CannotIncrease(ct.enforcement_literal(), c);
}
switch (ct.constraint_case()) {
case ConstraintProto::kBoolOr:
@@ -737,7 +805,8 @@ void DetectDominanceRelations(
break;
case ConstraintProto::kBoolAnd:
if (phase == 0) {
dual_bound_strengthening->CannotDecrease(ct.bool_and().literals());
dual_bound_strengthening->CannotDecrease(ct.bool_and().literals(),
c);
}
// We process it like n clauses.

View File

@@ -213,11 +213,13 @@ class DualBoundStrengthening {
// This must be called before processing the constraints.
void Reset(int num_variables) {
can_freely_decrease_until_.assign(2 * num_variables, kMinIntegerValue);
num_locks_.assign(2 * num_variables, 0);
locking_ct_index_.assign(2 * num_variables, -1);
}
// All constraints should be mapped to one of more call to these functions.
void CannotDecrease(absl::Span<const int> refs);
void CannotIncrease(absl::Span<const int> refs);
void CannotDecrease(absl::Span<const int> refs, int ct_index = -1);
void CannotIncrease(absl::Span<const int> refs, int ct_index = -1);
void CannotMove(absl::Span<const int> refs);
// Most of the logic here deals with linear constraints.
@@ -229,6 +231,11 @@ class DualBoundStrengthening {
// Once ALL constraints have been processed, call this to fix variables or
// reduce their domain if possible.
//
// Note that this also tighten some constraint that are the only one blocking
// in one direction. Currently we only do that for implication, so that if we
// have two Booleans such that a + b <= 1 we transform that to = 1 and we
// remove one variable since we have now an equivalence relation.
bool Strengthen(PresolveContext* context);
// The given ref can always freely decrease until the returned value.
@@ -246,6 +253,14 @@ class DualBoundStrengthening {
// Starts with kMaxIntegerValue, and decrease as constraints are processed.
absl::StrongVector<IntegerVariable, IntegerValue> can_freely_decrease_until_;
// How many times can_freely_decrease_until_[var] was set by a constraints.
// If only one constraint is blocking, we can do more presolve.
absl::StrongVector<IntegerVariable, int64> num_locks_;
// If num_locks_[var] == 1, this will be the unique constraint that block var
// in this direction. Note that it can be set to -1 if this wasn't recorded.
absl::StrongVector<IntegerVariable, int64> locking_ct_index_;
};
// Detect the variable dominance relations within the given model. Note that