[CP-SAT] speedup on constraint_violation; cleanup clause invariants; fix explanation of divition

This commit is contained in:
Laurent Perron
2023-10-15 18:08:33 +02:00
parent 6f08d473d8
commit 18b5b80010
12 changed files with 420 additions and 69 deletions

View File

@@ -458,7 +458,7 @@ SatClause* LiteralWatchers::InprocessingAddClause(
void LiteralWatchers::CleanUpWatchers() {
SCOPED_TIME_STAT(&stats_);
for (LiteralIndex index : needs_cleaning_.PositionsSetAtLeastOnce()) {
for (const LiteralIndex index : needs_cleaning_.PositionsSetAtLeastOnce()) {
DCHECK(needs_cleaning_[index]);
RemoveIf(&(watchers_on_false_[index]), [](const Watcher& watcher) {
return !watcher.clause->IsAttached();
@@ -521,6 +521,8 @@ bool BinaryImplicationGraph::AddBinaryClause(Literal a, Literal b) {
drat_proof_handler_->AddClause({a, b});
}
DCHECK(!is_removed_[a]);
DCHECK(!is_removed_[b]);
estimated_sizes_[a.NegatedIndex()]++;
estimated_sizes_[b.NegatedIndex()]++;
implications_[a.NegatedIndex()].push_back(b);
@@ -966,7 +968,7 @@ void BinaryImplicationGraph::MinimizeConflictExperimental(
SCOPED_TIME_STAT(&stats_);
is_marked_.ClearAndResize(LiteralIndex(implications_.size()));
is_simplified_.ClearAndResize(LiteralIndex(implications_.size()));
for (Literal lit : *conflict) {
for (const Literal lit : *conflict) {
is_marked_.Set(lit);
}
@@ -987,7 +989,7 @@ void BinaryImplicationGraph::MinimizeConflictExperimental(
const Literal lit = (*conflict)[i];
const int lit_level = trail.Info(lit.Variable()).level;
bool keep_literal = true;
for (Literal implied : implications_[lit]) {
for (const Literal implied : implications_[lit]) {
if (is_marked_[implied]) {
DCHECK_LE(lit_level, trail.Info(implied.Variable()).level);
if (lit_level == trail.Info(implied.Variable()).level &&
@@ -1044,7 +1046,13 @@ void BinaryImplicationGraph::RemoveFixedVariables() {
// limit. I think it will be good to maintain that invariant though,
// otherwise fixed literals might never be removed from these lists...
for (const Literal lit : implications_[true_literal.NegatedIndex()]) {
is_marked_.Set(lit.NegatedIndex());
if (lit.NegatedIndex() < representative_of_.size() &&
representative_of_[lit.Negated()] != kNoLiteralIndex) {
// We mark its representative instead.
is_marked_.Set(representative_of_[lit.Negated()]);
} else {
is_marked_.Set(lit.NegatedIndex());
}
}
gtl::STLClearObject(&(implications_[true_literal]));
gtl::STLClearObject(&(implications_[true_literal.NegatedIndex()]));
@@ -1188,6 +1196,7 @@ bool BinaryImplicationGraph::DetectEquivalences(bool log_info) {
if (!Propagate(trail_)) return false;
RemoveFixedVariables();
const VariablesAssignment& assignment = trail_->Assignment();
DCHECK(InvariantsAreOk());
// TODO(user): We could just do it directly though.
int num_fixed_during_scc = 0;
@@ -1346,6 +1355,10 @@ bool BinaryImplicationGraph::DetectEquivalences(bool log_info) {
}
time_limit_->AdvanceDeterministicTime(dtime);
if (num_fixed_during_scc > 0) {
RemoveFixedVariables();
}
DCHECK(InvariantsAreOk());
LOG_IF(INFO, log_info) << "SCC. " << num_equivalences
<< " redundant equivalent literals. "
<< num_fixed_during_scc << " fixed. "
@@ -1372,6 +1385,7 @@ bool BinaryImplicationGraph::ComputeTransitiveReduction(bool log_info) {
// with any of that here.
if (!Propagate(trail_)) return false;
RemoveFixedVariables();
DCHECK(InvariantsAreOk());
log_info |= VLOG_IS_ON(1);
WallTimer wall_timer;
@@ -1380,6 +1394,7 @@ bool BinaryImplicationGraph::ComputeTransitiveReduction(bool log_info) {
int64_t num_fixed = 0;
int64_t num_new_redundant_implications = 0;
bool aborted = false;
tmp_removed_.clear();
work_done_in_mark_descendants_ = 0;
int marked_index = 0;
@@ -1396,9 +1411,7 @@ bool BinaryImplicationGraph::ComputeTransitiveReduction(bool log_info) {
//
// TODO(user): Can we exploit the fact that the implication graph is a
// skew-symmetric graph (isomorphic to its transposed) so that we do less
// work? Also it would be nice to keep the property that even if we abort
// during the algorithm, if a => b, then not(b) => not(a) is also present in
// the other direct implication list.
// work?
const LiteralIndex size(implications_.size());
LiteralIndex previous = kNoLiteralIndex;
for (const LiteralIndex root : reverse_topological_order_) {
@@ -1457,6 +1470,21 @@ bool BinaryImplicationGraph::ComputeTransitiveReduction(bool log_info) {
<< "DetectEquivalences() should have removed cycles!";
is_marked_.Set(root);
// Also mark all the ones reachable through the root AMOs.
if (root < at_most_ones_.size()) {
for (const int start : at_most_ones_[root]) {
for (int i = start;; ++i) {
const Literal l = at_most_one_buffer_[i];
if (l.Index() == kNoLiteralIndex) break;
if (l.Index() == root) continue;
if (!is_marked_[l.Negated()] && !is_redundant_[l.Negated()]) {
is_marked_.SetUnsafe(l.Negated());
MarkDescendants(l.Negated());
}
}
}
}
// Failed literal probing. If both x and not(x) are marked then root must be
// false. Note that because we process "roots" in reverse topological order,
// we will fix the LCA of x and not(x) first.
@@ -1489,6 +1517,7 @@ bool BinaryImplicationGraph::ComputeTransitiveReduction(bool log_info) {
if (!is_marked_[l]) {
direct_implications[new_size++] = l;
} else {
tmp_removed_.push_back({Literal(root), l});
CHECK(!is_redundant_[l]);
}
}
@@ -1507,6 +1536,31 @@ bool BinaryImplicationGraph::ComputeTransitiveReduction(bool log_info) {
is_marked_.ClearAndResize(size);
// If we aborted early, we might no longer have both a=>b and not(b)=>not(a).
// This is not desirable has some algo relies on this invariant. That code fix
// this.
if (num_fixed > 0) {
RemoveFixedVariables();
}
if (aborted) {
absl::flat_hash_set<std::pair<LiteralIndex, LiteralIndex>> removed;
for (const auto [a, b] : tmp_removed_) {
removed.insert({a.Index(), b.Index()});
}
for (LiteralIndex i(0); i < implications_.size(); ++i) {
int new_size = 0;
const LiteralIndex negated_i = Literal(i).NegatedIndex();
auto& implication = implications_[i];
for (const Literal l : implication) {
if (removed.contains({l.NegatedIndex(), negated_i})) continue;
implication[new_size++] = l;
}
implication.resize(new_size);
}
}
DCHECK(InvariantsAreOk());
gtl::STLClearObject(&tmp_removed_);
const double dtime = 1e-8 * work_done_in_mark_descendants_;
time_limit_->AdvanceDeterministicTime(dtime);
num_redundant_implications_ += num_new_redundant_implications;
@@ -2122,13 +2176,16 @@ void BinaryImplicationGraph::RemoveBooleanVariable(
direct_implications_of_negated_literal_ =
DirectImplications(literal.Negated());
for (const Literal b : DirectImplications(literal)) {
if (is_removed_[b]) continue;
estimated_sizes_[b.NegatedIndex()]--;
for (const Literal a_negated : direct_implications_of_negated_literal_) {
if (a_negated.Negated() == b) continue;
if (is_removed_[a_negated]) continue;
AddImplication(a_negated.Negated(), b);
}
}
for (const Literal a_negated : direct_implications_of_negated_literal_) {
if (is_removed_[a_negated]) continue;
estimated_sizes_[a_negated.NegatedIndex()]--;
}
@@ -2149,19 +2206,25 @@ void BinaryImplicationGraph::RemoveBooleanVariable(
// We need to remove any occurrence of var in our implication lists, this will
// be delayed to the CleanupAllRemovedVariables() call.
for (LiteralIndex index : {literal.Index(), literal.NegatedIndex()}) {
for (const LiteralIndex index : {literal.Index(), literal.NegatedIndex()}) {
is_removed_[index] = true;
implications_[index].clear();
if (!is_redundant_[index]) {
++num_redundant_literals_;
is_redundant_.Set(index);
}
implications_[index].clear();
}
}
void BinaryImplicationGraph::CleanupAllRemovedVariables() {
for (auto& implication : implications_) {
for (LiteralIndex a(0); a < implications_.size(); ++a) {
if (is_removed_[a]) {
DCHECK(implications_[a].empty());
continue;
}
int new_size = 0;
auto& implication = implications_[a];
for (const Literal l : implication) {
if (!is_removed_[l]) implication[new_size++] = l;
}
@@ -2171,6 +2234,94 @@ void BinaryImplicationGraph::CleanupAllRemovedVariables() {
// Clean-up at most ones.
at_most_ones_.clear();
CleanUpAndAddAtMostOnes(/*base_index=*/0);
DCHECK(InvariantsAreOk());
}
bool BinaryImplicationGraph::InvariantsAreOk() {
// We check that if a => b then not(b) => not(a).
absl::flat_hash_set<std::pair<LiteralIndex, LiteralIndex>> seen;
int num_redundant = 0;
int num_fixed = 0;
for (LiteralIndex a_index(0); a_index < implications_.size(); ++a_index) {
if (trail_->Assignment().LiteralIsAssigned(Literal(a_index))) {
++num_fixed;
if (!implications_[a_index].empty()) {
LOG(ERROR) << "Fixed literal has non-cleared implications";
return false;
}
continue;
}
if (is_redundant_[a_index]) {
++num_redundant;
// TODO(user): nothing really prevent other parts of the solver to re-add
// implication containing redundant literal. It should be up to us to
// filter them and replace by the representative...
if (false && implications_[a_index].size() != 1) {
LOG(ERROR)
<< "Redundant literal should only point to its representative "
<< Literal(a_index) << " => " << implications_[a_index];
return false;
}
}
for (const Literal b : implications_[a_index]) {
seen.insert({a_index, b.Index()});
}
}
// Check that reverse topo order is correct.
absl::StrongVector<LiteralIndex, int> lit_to_order;
if (is_dag_) {
lit_to_order.assign(implications_.size(), -1);
for (int i = 0; i < reverse_topological_order_.size(); ++i) {
lit_to_order[reverse_topological_order_[i]] = i;
}
}
VLOG(2) << "num_redundant " << num_redundant;
VLOG(2) << "num_fixed " << num_fixed;
for (LiteralIndex a_index(0); a_index < implications_.size(); ++a_index) {
const LiteralIndex not_a_index = Literal(a_index).NegatedIndex();
for (const Literal b : implications_[a_index]) {
if (!seen.contains({b.NegatedIndex(), not_a_index})) {
LOG(ERROR) << "We have " << Literal(a_index) << " => " << b
<< " but not the reverse implication!";
LOG(ERROR) << is_redundant_[a_index] << " "
<< trail_->Assignment().LiteralIsAssigned(Literal(a_index));
LOG(ERROR) << is_redundant_[b] << " "
<< trail_->Assignment().LiteralIsAssigned(b);
return false;
}
// Test that if we have a dag, our topo order is correct.
if (is_dag_ && !is_redundant_[b] && !is_redundant_[a_index]) {
CHECK_NE(lit_to_order[b], -1);
CHECK_LE(lit_to_order[b], lit_to_order[a_index]);
}
}
}
// Check the at-most ones.
absl::flat_hash_set<std::pair<LiteralIndex, int>> lit_to_start;
for (LiteralIndex i(0); i < at_most_ones_.size(); ++i) {
for (const int start : at_most_ones_[i]) {
lit_to_start.insert({i, start});
}
}
int index = 0;
int start = 0;
for (int i = 0; i < at_most_one_buffer_.size(); ++i) {
if (at_most_one_buffer_[i].Index() == kNoLiteralIndex) {
++index;
start = i + 1;
continue;
}
if (!lit_to_start.contains({at_most_one_buffer_[i], start})) {
return false;
}
}
return true;
}
// ----- SatClause -----

View File

@@ -713,13 +713,13 @@ class BinaryImplicationGraph : public SatPropagator {
return estimated_sizes_[literal];
}
// Variable elimination by replacing everything of the form a => var => b by a
// => b. We ignore any a => a so the number of new implications is not always
// just the product of the two direct implication list of var and not(var).
// However, if a => var => a, then a and var are equivalent, so this case will
// be removed if one run DetectEquivalences() before this. Similarly, if a =>
// var => not(a) then a must be false and this is detected and dealt with by
// FindFailedLiteralAroundVar().
// Variable elimination by replacing everything of the form a => var => b by
// a => b. We ignore any a => a so the number of new implications is not
// always just the product of the two direct implication list of var and
// not(var). However, if a => var => a, then a and var are equivalent, so this
// case will be removed if one run DetectEquivalences() before this.
// Similarly, if a => var => not(a) then a must be false and this is detected
// and dealt with by FindFailedLiteralAroundVar().
bool FindFailedLiteralAroundVar(BooleanVariable var, bool* is_unsat);
int64_t NumImplicationOnVariableRemoval(BooleanVariable var);
void RemoveBooleanVariable(
@@ -774,6 +774,9 @@ class BinaryImplicationGraph : public SatPropagator {
// If the final AMO size is smaller than "expansion_size" we fully expand it.
bool CleanUpAndAddAtMostOnes(int base_index, int expansion_size = 10);
// To be used in DCHECKs().
bool InvariantsAreOk();
mutable StatsGroup stats_;
TimeLimit* time_limit_;
ModelRandomGenerator* random_;
@@ -835,6 +838,11 @@ class BinaryImplicationGraph : public SatPropagator {
int64_t work_done_in_mark_descendants_ = 0;
std::vector<Literal> bfs_stack_;
// Used by ComputeTransitiveReduction() in case we abort early to maintain
// the invariant checked by InvariantsAreOk(). Some of our algo
// relies on this to be always true.
std::vector<std::pair<Literal, Literal>> tmp_removed_;
// Filled by DetectEquivalences().
bool is_dag_ = false;
std::vector<LiteralIndex> reverse_topological_order_;

View File

@@ -404,7 +404,7 @@ void LinearIncrementalEvaluator::UpdateScoreOnNewlyUnenforced(
}
}
// We just need to modifie the old/new transition that decrease the number of
// We just need to modify the old/new transition that decrease the number of
// enforcement literal at false.
void LinearIncrementalEvaluator::UpdateScoreOfEnforcementIncrease(
int c, double score_change, absl::Span<const int64_t> jump_deltas,
@@ -1092,6 +1092,35 @@ int64_t ComputeOverloadArea(
return overload;
}
int64_t ComputeOverlap(const ConstraintProto& interval1,
const ConstraintProto& interval2,
absl::Span<const int64_t> solution) {
for (const int lit : interval1.enforcement_literal()) {
if (!LiteralValue(lit, solution)) return 0;
}
for (const int lit : interval2.enforcement_literal()) {
if (!LiteralValue(lit, solution)) return 0;
}
const int64_t start1 = ExprValue(interval1.interval().start(), solution);
const int64_t size1 = ExprValue(interval1.interval().size(), solution);
const int64_t end1 =
std::min(start1 + size1, ExprValue(interval1.interval().end(), solution));
const int64_t start2 = ExprValue(interval2.interval().start(), solution);
const int64_t size2 = ExprValue(interval2.interval().size(), solution);
const int64_t end2 =
std::min(start2 + size2, ExprValue(interval2.interval().end(), solution));
if (start1 >= end2 || start2 >= end1) return 0; // Disjoint.
// We force a min cost of 1 to cover the case where a interval of size 0 is in
// the middle of another interval.
return std::max(std::min(std::min(end2 - start2, end1 - start1),
std::min(end2 - start1, end1 - start2)),
int64_t{1});
}
} // namespace
CompiledNoOverlapConstraint::CompiledNoOverlapConstraint(
@@ -1100,6 +1129,12 @@ CompiledNoOverlapConstraint::CompiledNoOverlapConstraint(
int64_t CompiledNoOverlapConstraint::ComputeViolation(
absl::Span<const int64_t> solution) {
DCHECK_GE(ct_proto().no_overlap().intervals_size(), 2);
if (ct_proto().no_overlap().intervals_size() == 2) {
return ComputeOverlap(
cp_model_.constraints(ct_proto().no_overlap().intervals(0)),
cp_model_.constraints(ct_proto().no_overlap().intervals(1)), solution);
}
return ComputeOverloadArea(ct_proto().no_overlap().intervals(), {}, cp_model_,
solution, 1, events_);
}
@@ -1118,6 +1153,42 @@ int64_t CompiledCumulativeConstraint::ComputeViolation(
ExprValue(ct_proto().cumulative().capacity(), solution), events_);
}
// ----- CompiledCumulativeConstraint -----
CompiledNoOverlap2dConstraint::CompiledNoOverlap2dConstraint(
const ConstraintProto& ct_proto, const CpModelProto& cp_model)
: CompiledConstraint(ct_proto), cp_model_(cp_model) {}
int64_t CompiledNoOverlap2dConstraint::ComputeOverlapArea(
absl::Span<const int64_t> solution, int i, int j) const {
const int64_t x_overlap = ComputeOverlap(
cp_model_.constraints(ct_proto().no_overlap_2d().x_intervals(i)),
cp_model_.constraints(ct_proto().no_overlap_2d().x_intervals(j)),
solution);
if (x_overlap > 0) {
return x_overlap *
ComputeOverlap(
cp_model_.constraints(ct_proto().no_overlap_2d().y_intervals(i)),
cp_model_.constraints(ct_proto().no_overlap_2d().y_intervals(j)),
solution);
} else {
return 0;
}
}
int64_t CompiledNoOverlap2dConstraint::ComputeViolation(
absl::Span<const int64_t> solution) {
DCHECK_GE(ct_proto().no_overlap_2d().x_intervals_size(), 2);
const int size = ct_proto().no_overlap_2d().x_intervals_size();
int64_t violation = 0;
for (int i = 0; i + 1 < size; ++i) {
for (int j = i + 1; j < size; ++j) {
violation += ComputeOverlapArea(solution, i, j);
}
}
return violation;
}
// ----- CompiledCircuitConstraint -----
// The violation of a circuit has three parts:
@@ -1281,7 +1352,9 @@ void AddCircuitFlowConstraints(LinearIncrementalEvaluator& linear_evaluator,
// ----- LsEvaluator -----
LsEvaluator::LsEvaluator(const CpModelProto& cp_model) : cp_model_(cp_model) {
LsEvaluator::LsEvaluator(const CpModelProto& cp_model,
const SatParameters& params)
: cp_model_(cp_model), params_(params) {
var_to_constraints_.resize(cp_model_.variables_size());
jump_value_optimal_.resize(cp_model_.variables_size(), true);
num_violated_constraint_per_var_.assign(cp_model_.variables_size(), 0);
@@ -1294,9 +1367,10 @@ LsEvaluator::LsEvaluator(const CpModelProto& cp_model) : cp_model_(cp_model) {
}
LsEvaluator::LsEvaluator(
const CpModelProto& cp_model, const std::vector<bool>& ignored_constraints,
const CpModelProto& cp_model, const SatParameters& params,
const std::vector<bool>& ignored_constraints,
const std::vector<ConstraintProto>& additional_constraints)
: cp_model_(cp_model) {
: cp_model_(cp_model), params_(params) {
var_to_constraints_.resize(cp_model_.variables_size());
jump_value_optimal_.resize(cp_model_.variables_size(), true);
num_violated_constraint_per_var_.assign(cp_model_.variables_size(), 0);
@@ -1441,9 +1515,37 @@ void LsEvaluator::CompileOneConstraint(const ConstraintProto& ct) {
break;
}
case ConstraintProto::ConstraintCase::kNoOverlap: {
CompiledNoOverlapConstraint* no_overlap =
new CompiledNoOverlapConstraint(ct, cp_model_);
constraints_.emplace_back(no_overlap);
if (ct.no_overlap().intervals_size() <= 1) break;
if (ct.no_overlap().intervals_size() >
params_.feasibility_jump_max_expanded_constraint_size()) {
CompiledNoOverlapConstraint* no_overlap =
new CompiledNoOverlapConstraint(ct, cp_model_);
constraints_.emplace_back(no_overlap);
} else {
// We expand the no_overlap constraints into a quadratic number of
// disjunctions.
for (int i = 0; i + 1 < ct.no_overlap().intervals_size(); ++i) {
const IntervalConstraintProto& interval_i =
cp_model_.constraints(ct.no_overlap().intervals(i)).interval();
const int64_t min_start_i = ExprMin(interval_i.start(), cp_model_);
const int64_t max_end_i = ExprMax(interval_i.end(), cp_model_);
for (int j = i + 1; j < ct.no_overlap().intervals_size(); ++j) {
const IntervalConstraintProto& interval_j =
cp_model_.constraints(ct.no_overlap().intervals(j)).interval();
const int64_t min_start_j = ExprMin(interval_j.start(), cp_model_);
const int64_t max_end_j = ExprMax(interval_j.end(), cp_model_);
if (min_start_i >= max_end_j || min_start_j >= max_end_i) continue;
ConstraintProto* disj = expanded_constraints_.add_constraints();
disj->mutable_no_overlap()->add_intervals(
ct.no_overlap().intervals(i));
disj->mutable_no_overlap()->add_intervals(
ct.no_overlap().intervals(j));
CompiledNoOverlapConstraint* no_overlap =
new CompiledNoOverlapConstraint(*disj, cp_model_);
constraints_.emplace_back(no_overlap);
}
}
}
break;
}
case ConstraintProto::ConstraintCase::kCumulative: {
@@ -1451,6 +1553,54 @@ void LsEvaluator::CompileOneConstraint(const ConstraintProto& ct) {
new CompiledCumulativeConstraint(ct, cp_model_));
break;
}
case ConstraintProto::ConstraintCase::kNoOverlap2D: {
const auto& x_intervals = ct.no_overlap_2d().x_intervals();
const auto& y_intervals = ct.no_overlap_2d().y_intervals();
if (x_intervals.size() <= 1) break;
if (x_intervals.size() >
params_.feasibility_jump_max_expanded_constraint_size()) {
CompiledNoOverlap2dConstraint* no_overlap_2d =
new CompiledNoOverlap2dConstraint(ct, cp_model_);
constraints_.emplace_back(no_overlap_2d);
break;
}
for (int i = 0; i + 1 < x_intervals.size(); ++i) {
const IntervalConstraintProto& x_interval_i =
cp_model_.constraints(x_intervals[i]).interval();
const int64_t x_min_start_i = ExprMin(x_interval_i.start(), cp_model_);
const int64_t x_max_end_i = ExprMax(x_interval_i.end(), cp_model_);
const IntervalConstraintProto& y_interval_i =
cp_model_.constraints(y_intervals[i]).interval();
const int64_t y_min_start_i = ExprMin(y_interval_i.start(), cp_model_);
const int64_t y_max_end_i = ExprMax(y_interval_i.end(), cp_model_);
for (int j = i + 1; j < x_intervals.size(); ++j) {
const IntervalConstraintProto& x_interval_j =
cp_model_.constraints(x_intervals[j]).interval();
const int64_t x_min_start_j =
ExprMin(x_interval_j.start(), cp_model_);
const int64_t x_max_end_j = ExprMax(x_interval_j.end(), cp_model_);
const IntervalConstraintProto& y_interval_j =
cp_model_.constraints(y_intervals[j]).interval();
const int64_t y_min_start_j =
ExprMin(y_interval_j.start(), cp_model_);
const int64_t y_max_end_j = ExprMax(y_interval_j.end(), cp_model_);
if (x_min_start_i >= x_max_end_j || x_min_start_j >= x_max_end_i ||
y_min_start_i >= y_max_end_j || y_min_start_j >= y_max_end_i) {
continue;
}
ConstraintProto* diffn = expanded_constraints_.add_constraints();
diffn->mutable_no_overlap_2d()->add_x_intervals(x_intervals[i]);
diffn->mutable_no_overlap_2d()->add_x_intervals(x_intervals[j]);
diffn->mutable_no_overlap_2d()->add_y_intervals(y_intervals[i]);
diffn->mutable_no_overlap_2d()->add_y_intervals(y_intervals[j]);
CompiledNoOverlap2dConstraint* no_overlap_2d =
new CompiledNoOverlap2dConstraint(*diffn, cp_model_);
constraints_.emplace_back(no_overlap_2d);
}
}
break;
}
case ConstraintProto::ConstraintCase::kCircuit:
case ConstraintProto::ConstraintCase::kRoutes:
constraints_.emplace_back(new CompiledCircuitConstraint(ct));

View File

@@ -14,16 +14,15 @@
#ifndef OR_TOOLS_SAT_CONSTRAINT_VIOLATION_H_
#define OR_TOOLS_SAT_CONSTRAINT_VIOLATION_H_
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <memory>
#include <utility>
#include <vector>
#include "absl/log/check.h"
#include "absl/types/span.h"
#include "ortools/sat/cp_model.pb.h"
#include "ortools/sat/sat_parameters.pb.h"
#include "ortools/util/sorted_interval_list.h"
namespace operations_research {
@@ -288,8 +287,8 @@ class CompiledConstraint {
class LsEvaluator {
public:
// The cp_model must outlive this class.
explicit LsEvaluator(const CpModelProto& cp_model);
LsEvaluator(const CpModelProto& cp_model,
LsEvaluator(const CpModelProto& cp_model, const SatParameters& params);
LsEvaluator(const CpModelProto& cp_model, const SatParameters& params,
const std::vector<bool>& ignored_constraints,
const std::vector<ConstraintProto>& additional_constraints);
@@ -420,6 +419,8 @@ class LsEvaluator {
void UpdateViolatedList(int c);
const CpModelProto& cp_model_;
const SatParameters& params_;
CpModelProto expanded_constraints_;
LinearIncrementalEvaluator linear_evaluator_;
std::vector<std::unique_ptr<CompiledConstraint>> constraints_;
std::vector<std::vector<int>> var_to_constraints_;
@@ -530,6 +531,21 @@ class CompiledCumulativeConstraint : public CompiledConstraint {
std::vector<std::pair<int64_t, int64_t>> events_;
};
// The violation of a no_overlap is the sum of overloads over time.
class CompiledNoOverlap2dConstraint : public CompiledConstraint {
public:
explicit CompiledNoOverlap2dConstraint(const ConstraintProto& ct_proto,
const CpModelProto& cp_model);
~CompiledNoOverlap2dConstraint() override = default;
int64_t ComputeViolation(absl::Span<const int64_t> solution) override;
private:
int64_t ComputeOverlapArea(absl::Span<const int64_t> solution, int i,
int j) const;
const CpModelProto& cp_model_;
};
} // namespace sat
} // namespace operations_research

View File

@@ -356,8 +356,7 @@ message ConstraintProto {
// then you should use instead of t = a / b, the int_prod constraint
// a = b * t.
//
// Note that this forces exprs[1] to never take the value 0.
// Important: currently we do not support exprs[1] spanning across 0.
// If 0 belongs to the domain of exprs[1], then the model is deemed invalid.
LinearArgumentProto int_div = 7;
// The int_mod constraint forces the target to equal exprs[0] % exprs[1].

View File

@@ -3516,11 +3516,8 @@ void SolveCpModelParallel(const CpModelProto& model_proto,
params, helper, &shared));
}
const bool feasibility_jump_possible =
!params.interleave_search() &&
helper->TypeToConstraints(ConstraintProto::kNoOverlap2D).empty();
const LinearModel* linear_model = global_model->Get<LinearModel>();
if (params.num_violation_ls() > 0 && feasibility_jump_possible &&
if (params.num_violation_ls() > 0 && !params.interleave_search() &&
model_proto.has_objective()) {
const int num_violation_ls = params.test_feasibility_jump()
? params.num_workers()
@@ -3568,10 +3565,10 @@ void SolveCpModelParallel(const CpModelProto& model_proto,
// schedule more than the available number of threads. They will just be
// interleaved. We will get an higher diversity, but use more memory.
const int num_feasibility_jump =
feasibility_jump_possible
? (params.test_feasibility_jump() ? num_available
: (num_available + 1) / 2)
: 0;
params.interleave_search()
? 0
: (params.test_feasibility_jump() ? num_available
: (num_available + 1) / 2);
const int num_first_solution_subsolvers =
num_available - num_feasibility_jump;

View File

@@ -122,11 +122,13 @@ void FeasibilityJumpSolver::Initialize() {
// For now we just disable or enable it.
// But in the future we might have more variation.
if (params_.feasibility_jump_linearization_level() == 0) {
evaluator_ = std::make_unique<LsEvaluator>(linear_model_->model_proto());
evaluator_ =
std::make_unique<LsEvaluator>(linear_model_->model_proto(), params_);
} else {
evaluator_ = std::make_unique<LsEvaluator>(
linear_model_->model_proto(), linear_model_->ignored_constraints(),
linear_model_->additional_constraints());
evaluator_ =
std::make_unique<LsEvaluator>(linear_model_->model_proto(), params_,
linear_model_->ignored_constraints(),
linear_model_->additional_constraints());
}
const int num_variables = linear_model_->model_proto().variables().size();

View File

@@ -1191,7 +1191,33 @@ DivisionPropagator::DivisionPropagator(AffineExpression num,
negated_div_(div.Negated()),
integer_trail_(integer_trail) {}
// TODO(user): We can propagate more, especially in the case where denom
// spans across 0.
// TODO(user): We can propagate a bit more if min_div = 0:
// (min_num > -min_denom).
bool DivisionPropagator::Propagate() {
// Direct propagation if num_ and denom_ are fixed.
if (integer_trail_->IsFixed(num_) && integer_trail_->IsFixed(denom_)) {
const IntegerValue num_value = integer_trail_->FixedValue(num_);
const IntegerValue denom_value = integer_trail_->FixedValue(denom_);
const IntegerValue div_value = num_value / denom_value;
if (!integer_trail_->SafeEnqueue(
div_.LowerOrEqual(div_value),
{num_.LowerOrEqual(num_value), num_.GreaterOrEqual(num_value),
denom_.LowerOrEqual(denom_value),
denom_.GreaterOrEqual(denom_value)})) {
return false;
}
if (!integer_trail_->SafeEnqueue(
div_.GreaterOrEqual(div_value),
{num_.LowerOrEqual(num_value), num_.GreaterOrEqual(num_value),
denom_.LowerOrEqual(denom_value),
denom_.GreaterOrEqual(denom_value)})) {
return false;
}
return true;
}
if (integer_trail_->LowerBound(denom_) < 0 &&
integer_trail_->UpperBound(denom_) > 0) {
return true;
@@ -1226,8 +1252,8 @@ bool DivisionPropagator::Propagate() {
return PropagatePositiveDomains(num, denom, div_);
}
if (integer_trail_->UpperBound(num) <= 0 &&
integer_trail_->UpperBound(div_) <= 0) {
if (integer_trail_->LowerBound(negated_num) >= 0 &&
integer_trail_->LowerBound(negated_div_) >= 0) {
return PropagatePositiveDomains(negated_num, denom, negated_div_);
}
@@ -1293,8 +1319,7 @@ bool DivisionPropagator::PropagateUpperBounds(AffineExpression num,
if (max_div > new_max_div) {
if (!integer_trail_->SafeEnqueue(
div.LowerOrEqual(new_max_div),
{integer_trail_->UpperBoundAsLiteral(num),
integer_trail_->LowerBoundAsLiteral(denom)})) {
{num.LowerOrEqual(max_num), denom.GreaterOrEqual(min_denom)})) {
return false;
}
}
@@ -1307,9 +1332,8 @@ bool DivisionPropagator::PropagateUpperBounds(AffineExpression num,
if (max_num > new_max_num) {
if (!integer_trail_->SafeEnqueue(
num.LowerOrEqual(new_max_num),
{integer_trail_->UpperBoundAsLiteral(denom),
denom.GreaterOrEqual(1),
integer_trail_->UpperBoundAsLiteral(div)})) {
{denom.LowerOrEqual(max_denom), denom.GreaterOrEqual(1),
div.LowerOrEqual(max_div)})) {
return false;
}
}
@@ -1331,8 +1355,7 @@ bool DivisionPropagator::PropagatePositiveDomains(AffineExpression num,
if (min_div < new_min_div) {
if (!integer_trail_->SafeEnqueue(
div.GreaterOrEqual(new_min_div),
{integer_trail_->LowerBoundAsLiteral(num),
integer_trail_->UpperBoundAsLiteral(denom),
{num.GreaterOrEqual(min_num), denom.LowerOrEqual(max_denom),
denom.GreaterOrEqual(1)})) {
return false;
}
@@ -1345,8 +1368,7 @@ bool DivisionPropagator::PropagatePositiveDomains(AffineExpression num,
if (min_num < new_min_num) {
if (!integer_trail_->SafeEnqueue(
num.GreaterOrEqual(new_min_num),
{integer_trail_->LowerBoundAsLiteral(denom),
integer_trail_->LowerBoundAsLiteral(div)})) {
{denom.GreaterOrEqual(min_denom), div.GreaterOrEqual(min_div)})) {
return false;
}
}
@@ -1360,22 +1382,21 @@ bool DivisionPropagator::PropagatePositiveDomains(AffineExpression num,
if (max_denom > new_max_denom) {
if (!integer_trail_->SafeEnqueue(
denom.LowerOrEqual(new_max_denom),
{integer_trail_->UpperBoundAsLiteral(num), num.GreaterOrEqual(0),
integer_trail_->LowerBoundAsLiteral(div),
denom.GreaterOrEqual(1)})) {
{num.LowerOrEqual(max_num), num.GreaterOrEqual(0),
div.GreaterOrEqual(min_div), denom.GreaterOrEqual(1)})) {
return false;
}
}
}
// denom >= CeilRatio(num + 1, max_div+1)
// >= CeilRatio(min_num + 1, max_div +).
// denom >= CeilRatio(num + 1, max_div + 1)
// >= CeilRatio(min_num + 1, max_div + 1).
const IntegerValue new_min_denom = CeilRatio(min_num + 1, max_div + 1);
if (min_denom < new_min_denom) {
if (!integer_trail_->SafeEnqueue(denom.GreaterOrEqual(new_min_denom),
{integer_trail_->LowerBoundAsLiteral(num),
integer_trail_->UpperBoundAsLiteral(div),
div.GreaterOrEqual(0)})) {
if (!integer_trail_->SafeEnqueue(
denom.GreaterOrEqual(new_min_denom),
{num.GreaterOrEqual(min_num), div.LowerOrEqual(max_div),
div.GreaterOrEqual(0), denom.GreaterOrEqual(1)})) {
return false;
}
}

View File

@@ -23,7 +23,7 @@ option csharp_namespace = "Google.OrTools.Sat";
// Contains the definitions for all the sat algorithm parameters and their
// default values.
//
// NEXT TAG: 264
// NEXT TAG: 265
message SatParameters {
// In some context, like in a portfolio of search, it makes sense to name a
// given parameters set for logging purpose.
@@ -1113,6 +1113,11 @@ message SatParameters {
// current solution. This parameter selects the first option.
optional bool feasibility_jump_enable_restarts = 250 [default = true];
// Maximum size of no_overlap or no_overlap_2d constraint for a quadratic
// expansion.
optional int32 feasibility_jump_max_expanded_constraint_size = 264
[default = 100];
// This will create incomplete subsolvers (that are not LNS subsolvers)
// that use the feasibility jump code to find improving solution, treating
// the objective improvement as a hard constraint.

View File

@@ -30,6 +30,7 @@
#include "absl/container/flat_hash_map.h"
#include "absl/log/check.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "ortools/base/logging.h"
#include "ortools/base/stl_util.h"
@@ -274,7 +275,7 @@ bool CutIsEfficient(
// fixed_capacity * (makespan - makespan_min)
// as the available energy.
void GenerateCumulativeEnergeticCutsWithMakespanAndFixedCapacity(
const std::string& cut_name,
absl::string_view cut_name,
const absl::StrongVector<IntegerVariable, double>& lp_values,
std::vector<EnergyEvent> events, IntegerValue capacity,
AffineExpression makespan, TimeLimit* time_limit, Model* model,
@@ -466,7 +467,7 @@ void GenerateCumulativeEnergeticCutsWithMakespanAndFixedCapacity(
}
if (cut_generated) {
std::string full_name = cut_name;
std::string full_name(cut_name);
if (add_opt_to_name) full_name.append("_optional");
if (add_quadratic_to_name) full_name.append("_quadratic");
if (add_lifted_to_name) full_name.append("_lifted");

View File

@@ -630,15 +630,15 @@ class Bitset64 {
// Cryptic function! This is just an optimized version of a given piece of
// code and has probably little general use.
static uint64_t ConditionalXorOfTwoBits(IndexType i, uint64_t use1,
const Bitset64<IndexType>& set1,
Bitset64<IndexType>::ConstView set1,
uint64_t use2,
const Bitset64<IndexType>& set2) {
Bitset64<IndexType>::ConstView set2) {
DCHECK(use1 == 0 || use1 == 1);
DCHECK(use2 == 0 || use2 == 1);
const int bucket = BitOffset64(Value(i));
const int pos = BitPos64(Value(i));
return ((use1 << pos) & set1.data_[bucket]) ^
((use2 << pos) & set2.data_[bucket]);
return ((use1 << pos) & set1.data()[bucket]) ^
((use2 << pos) & set2.data()[bucket]);
}
// Returns a 0/1 string representing the bitset.

View File

@@ -14,6 +14,7 @@
#ifndef OR_TOOLS_UTIL_SORTED_INTERVAL_LIST_H_
#define OR_TOOLS_UTIL_SORTED_INTERVAL_LIST_H_
#include <cstdint>
#include <iterator>
#include <ostream>
#include <set>