[CP-SAT] new example; regroup some linear code

This commit is contained in:
Laurent Perron
2023-09-23 10:16:08 +02:00
parent f9540e8a6b
commit a7b26f6d8d
15 changed files with 472 additions and 151 deletions

View File

@@ -126,6 +126,11 @@ ABSL_FLAG(bool, cp_model_dump_models, false,
"format to 'FLAGS_cp_model_dump_prefix'{model|presolved_model|"
"mapping_model}.pb.txt.");
ABSL_FLAG(bool, cp_model_export_model, false,
"DEBUG ONLY. When set to true, SolveCpModel() will dump the model "
"proto of the original model format to "
"'FLAGS_cp_model_dump_prefix'{name}.pb.txt.");
ABSL_FLAG(bool, cp_model_dump_text_proto, true,
"DEBUG ONLY, dump models in text proto instead of binary proto.");
@@ -1000,19 +1005,17 @@ IntegerVariable AddLPConstraints(bool objective_need_to_be_tight,
// Dispatch every constraint to its LinearProgrammingConstraint.
std::vector<LinearProgrammingConstraint*> lp_constraints(num_components,
nullptr);
std::vector<std::vector<LinearConstraint>> component_to_constraints(
num_components);
for (int i = 0; i < num_lp_constraints; i++) {
const int c = index_to_component[get_constraint_index(i)];
if (component_sizes[c] <= 1) continue;
component_to_constraints[c].push_back(relaxation.linear_constraints[i]);
if (lp_constraints[c] == nullptr) {
lp_constraints[c] =
new LinearProgrammingConstraint(m, component_to_var[c]);
m->TakeOwnership(lp_constraints[c]);
}
// Load the constraint.
lp_constraints[c]->AddLinearConstraint(relaxation.linear_constraints[i]);
lp_constraints[c]->AddLinearConstraint(
std::move(relaxation.linear_constraints[i]));
}
// Dispatch every cut generator to its LinearProgrammingConstraint.
@@ -3978,6 +3981,10 @@ CpSolverResponse SolveCpModel(const CpModelProto& model_proto, Model* model) {
if (absl::GetFlag(FLAGS_cp_model_dump_models)) {
DumpModelProto(model_proto, "model");
}
if (absl::GetFlag(FLAGS_cp_model_export_model)) {
DumpModelProto(model_proto,
model_proto.name().empty() ? "model" : model_proto.name());
}
#endif // __PORTABLE_PLATFORM__
#if !defined(__PORTABLE_PLATFORM__)

View File

@@ -26,66 +26,11 @@
#include <vector>
#include "absl/log/check.h"
#include "absl/types/span.h"
#include "ortools/base/logging.h"
namespace operations_research {
namespace sat {
// Small utility class to store a vector<vector<>> where one can only append new
// vector and never change previously added ones.
//
// Note that we implement a really small subset of the vector<vector<>> API.
template <typename T>
class CompactVectorVector {
public:
// Same as push_back().
// Returns the previous size() as this is convenient for how we use it.
int Add(absl::Span<const T> data) {
const int index = size();
starts_.push_back(buffer_.size());
sizes_.push_back(data.size());
buffer_.insert(buffer_.end(), data.begin(), data.end());
return index;
}
// Same as Add() but for sat::Literal or any type from which we can get
// indices.
template <typename L>
int AddLiterals(const std::vector<L>& data) {
const int index = size();
starts_.push_back(buffer_.size());
sizes_.push_back(data.size());
for (const L literal : data) {
buffer_.push_back(literal.Index().value());
}
return index;
}
// Warning: this is only valid until the next clear() or Add() call.
absl::Span<const T> operator[](int index) const {
DCHECK_GE(index, 0);
DCHECK_LT(index, starts_.size());
DCHECK_LT(index, sizes_.size());
const size_t size = static_cast<size_t>(sizes_[index]);
if (size == 0) return {};
return {&buffer_[starts_[index]], size};
}
void clear() {
starts_.clear();
sizes_.clear();
buffer_.clear();
}
size_t size() const { return starts_.size(); }
private:
std::vector<int> starts_;
std::vector<int> sizes_;
std::vector<T> buffer_;
};
// An helper class to process many sets of integer in [0, n] and detects all the
// set included in each others. This is a common operations in presolve, and
// while it can be slow the algorithm used here is pretty efficient in practice.
@@ -99,10 +44,12 @@ class CompactVectorVector {
// The number n will be detected automatically but we allocate various vector
// of size n, so avoid having large integer values in your sets.
//
// All set contents will be accessed via storage_[index]. And of course Storage
// can be the CompactVectorVector defined above. But it can also be something
// that return a class that support .size() and integer range iteration over the
// element in the set on the fly.
// All set contents will be accessed via storage_[index].
// This can be used with a vector<vector<>> or our CompactVectorVector that we
// use in a few place. But it can also be anything that support:
// - storage_.size()
// - range iteration over storage_[index]
// - storage_[index].size()
template <class Storage>
class InclusionDetector {
public:

View File

@@ -484,5 +484,27 @@ IntegerValue GetCoefficientOfPositiveVar(const IntegerVariable var,
return IntegerValue(0);
}
bool PossibleOverflow(const IntegerTrail& integer_trail,
const LinearConstraint& constraint) {
IntegerValue min_activity(0);
IntegerValue max_activity(0);
const int size = constraint.vars.size();
for (int i = 0; i < size; ++i) {
const IntegerVariable var = constraint.vars[i];
const IntegerValue coeff = constraint.coeffs[i];
CHECK_NE(coeff, 0);
const IntegerValue lb = integer_trail.LevelZeroLowerBound(var);
const IntegerValue ub = integer_trail.LevelZeroUpperBound(var);
if (coeff > 0) {
if (!AddProductTo(lb, coeff, &min_activity)) return true;
if (!AddProductTo(ub, coeff, &max_activity)) return true;
} else {
if (!AddProductTo(ub, coeff, &min_activity)) return true;
if (!AddProductTo(lb, coeff, &max_activity)) return true;
}
}
return AtMinOrMaxInt64(CapSub(max_activity.value(), min_activity.value()));
}
} // namespace sat
} // namespace operations_research

View File

@@ -259,6 +259,15 @@ double ComputeActivity(
const LinearConstraint& constraint,
const absl::StrongVector<IntegerVariable, double>& values);
// Tests for possible overflow in the given linear constraint used for the
// linear relaxation. This is a bit relaxed compared to what we require for
// generic linear constraint that are used in our CP propagators.
//
// If this check pass, our constraint should be safe to use in our simplication
// code, our cut computation, etc...
bool PossibleOverflow(const IntegerTrail& integer_trail,
const LinearConstraint& constraint);
// Returns sqrt(sum square(coeff)).
double ComputeL2Norm(const LinearConstraint& constraint);

View File

@@ -46,28 +46,6 @@
namespace operations_research {
namespace sat {
bool PossibleOverflow(const IntegerTrail& integer_trail,
const LinearConstraint& constraint) {
IntegerValue min_activity(0);
IntegerValue max_activity(0);
const int size = constraint.vars.size();
for (int i = 0; i < size; ++i) {
const IntegerVariable var = constraint.vars[i];
const IntegerValue coeff = constraint.coeffs[i];
CHECK_NE(coeff, 0);
const IntegerValue lb = integer_trail.LevelZeroLowerBound(var);
const IntegerValue ub = integer_trail.LevelZeroUpperBound(var);
if (coeff > 0) {
if (!AddProductTo(lb, coeff, &min_activity)) return true;
if (!AddProductTo(ub, coeff, &max_activity)) return true;
} else {
if (!AddProductTo(ub, coeff, &min_activity)) return true;
if (!AddProductTo(lb, coeff, &max_activity)) return true;
}
}
return AtMinOrMaxInt64(CapSub(max_activity.value(), min_activity.value()));
}
namespace {
const LinearConstraintManager::ConstraintIndex kInvalidConstraintIndex(-1);
@@ -190,7 +168,8 @@ bool LinearConstraintManager::MaybeRemoveSomeInactiveConstraints(
// we regenerate identical cuts for some reason.
LinearConstraintManager::ConstraintIndex LinearConstraintManager::Add(
LinearConstraint ct, bool* added) {
CHECK(!ct.vars.empty());
DCHECK(!ct.vars.empty());
DCHECK(!PossibleOverflow(integer_trail_, ct)) << ct.DebugString();
DCHECK(NoDuplicateVariable(ct));
SimplifyConstraint(&ct);
DivideByGCD(&ct);

View File

@@ -41,11 +41,6 @@
namespace operations_research {
namespace sat {
// Tests for possible overflow in the simplification of the given linear
// constraint.
bool PossibleOverflow(const IntegerTrail& integer_trail,
const LinearConstraint& constraint);
// Stores for each IntegerVariable its temporary LP solution.
//
// This is shared between all LinearProgrammingConstraint because in the corner

View File

@@ -276,10 +276,9 @@ LinearProgrammingConstraint::LinearProgrammingConstraint(
}
}
void LinearProgrammingConstraint::AddLinearConstraint(
const LinearConstraint& ct) {
void LinearProgrammingConstraint::AddLinearConstraint(LinearConstraint ct) {
DCHECK(!lp_constraint_is_registered_);
constraint_manager_.Add(ct);
constraint_manager_.Add(std::move(ct));
}
glop::ColIndex LinearProgrammingConstraint::GetMirrorVariable(

View File

@@ -146,7 +146,7 @@ class LinearProgrammingConstraint : public PropagatorInterface,
absl::Span<const IntegerVariable> vars);
// Add a new linear constraint to this LP.
void AddLinearConstraint(const LinearConstraint& ct);
void AddLinearConstraint(LinearConstraint ct);
// Set the coefficient of the variable in the objective. Calling it twice will
// overwrite the previous value.

View File

@@ -43,6 +43,7 @@
#include "ortools/sat/intervals.h"
#include "ortools/sat/linear_constraint.h"
#include "ortools/sat/model.h"
#include "ortools/sat/precedences.h"
#include "ortools/sat/presolve_util.h"
#include "ortools/sat/routing_cuts.h"
#include "ortools/sat/sat_base.h"
@@ -204,10 +205,17 @@ void AppendRelaxationForEqualityEncoding(IntegerVariable var,
}
}
relaxation->linear_constraints.push_back(at_least_one.Build());
relaxation->linear_constraints.push_back(encoding_ct.Build());
relaxation->at_most_ones.push_back(at_most_one_ct);
++*num_tight;
// It is possible that the linear1 encoding respect our overflow
// precondition but not the Var = sum bool * value one. In this case, we
// just don't encode it this way. Hopefully, most normal model will not run
// into this.
const LinearConstraint lc = encoding_ct.Build();
if (!PossibleOverflow(*integer_trail, lc)) {
relaxation->linear_constraints.push_back(at_least_one.Build());
relaxation->linear_constraints.push_back(std::move(lc));
relaxation->at_most_ones.push_back(at_most_one_ct);
++*num_tight;
}
return;
}
@@ -221,13 +229,18 @@ void AppendRelaxationForEqualityEncoding(IntegerVariable var,
CHECK(encoding_ct.AddLiteralTerm(value_literal.literal,
rhs - value_literal.value));
}
relaxation->at_most_ones.push_back(at_most_one_ct);
relaxation->linear_constraints.push_back(encoding_ct.Build());
++*num_tight;
const LinearConstraint lc = encoding_ct.Build();
if (!PossibleOverflow(*integer_trail, lc)) {
relaxation->at_most_ones.push_back(at_most_one_ct);
relaxation->linear_constraints.push_back(std::move(lc));
++*num_tight;
}
return;
}
// min + sum l_i * (value_i - min) <= var.
// Note that this might overflow in corner cases, so we need to prevent that.
const IntegerValue d_min = min_not_encoded;
LinearConstraintBuilder lower_bound_ct(&model, d_min, kMaxIntegerValue);
lower_bound_ct.AddTerm(var, IntegerValue(1));
@@ -235,8 +248,14 @@ void AppendRelaxationForEqualityEncoding(IntegerVariable var,
CHECK(lower_bound_ct.AddLiteralTerm(value_literal.literal,
d_min - value_literal.value));
}
const LinearConstraint built_lb_ct = lower_bound_ct.Build();
if (!PossibleOverflow(*integer_trail, built_lb_ct)) {
relaxation->linear_constraints.push_back(std::move(built_lb_ct));
++*num_loose;
}
// var <= max + sum l_i * (value_i - max).
// Note that this might overflow in corner cases, so we need to prevent that.
const IntegerValue d_max = max_not_encoded;
LinearConstraintBuilder upper_bound_ct(&model, kMinIntegerValue, d_max);
upper_bound_ct.AddTerm(var, IntegerValue(1));
@@ -244,12 +263,14 @@ void AppendRelaxationForEqualityEncoding(IntegerVariable var,
CHECK(upper_bound_ct.AddLiteralTerm(value_literal.literal,
d_max - value_literal.value));
}
const LinearConstraint built_ub_ct = upper_bound_ct.Build();
if (!PossibleOverflow(*integer_trail, built_ub_ct)) {
relaxation->linear_constraints.push_back(std::move(built_ub_ct));
++*num_loose;
}
// Note that empty/trivial constraints will be filtered later.
relaxation->at_most_ones.push_back(at_most_one_ct);
relaxation->linear_constraints.push_back(lower_bound_ct.Build());
relaxation->linear_constraints.push_back(upper_bound_ct.Build());
++*num_loose;
}
void AppendPartialGreaterThanEncodingRelaxation(IntegerVariable var,
@@ -266,9 +287,8 @@ void AppendPartialGreaterThanEncodingRelaxation(IntegerVariable var,
// And also add the implications between used literals.
{
IntegerValue prev_used_bound = integer_trail->LowerBound(var);
LinearConstraintBuilder lb_constraint(&model, prev_used_bound,
kMaxIntegerValue);
lb_constraint.AddTerm(var, IntegerValue(1));
LinearConstraintBuilder builder(&model, prev_used_bound, kMaxIntegerValue);
builder.AddTerm(var, IntegerValue(1));
LiteralIndex prev_literal_index = kNoLiteralIndex;
for (const auto entry : greater_than_encoding) {
if (entry.value <= prev_used_bound) continue;
@@ -277,7 +297,7 @@ void AppendPartialGreaterThanEncodingRelaxation(IntegerVariable var,
const IntegerValue diff = prev_used_bound - entry.value;
// Skip the entry if the literal doesn't have a view.
if (!lb_constraint.AddLiteralTerm(entry.literal, diff)) continue;
if (!builder.AddLiteralTerm(entry.literal, diff)) continue;
if (prev_literal_index != kNoLiteralIndex) {
// Add var <= prev_var, which is the same as var + not(prev_var) <= 1
relaxation->at_most_ones.push_back(
@@ -286,26 +306,29 @@ void AppendPartialGreaterThanEncodingRelaxation(IntegerVariable var,
prev_used_bound = entry.value;
prev_literal_index = literal_index;
}
relaxation->linear_constraints.push_back(lb_constraint.Build());
// Note that by construction, this shouldn't be able to overflow.
relaxation->linear_constraints.push_back(builder.Build());
}
// Do the same for the var <= side by using NegationOfVar().
// Note that we do not need to add the implications between literals again.
{
IntegerValue prev_used_bound = integer_trail->LowerBound(NegationOf(var));
LinearConstraintBuilder lb_constraint(&model, prev_used_bound,
kMaxIntegerValue);
lb_constraint.AddTerm(var, IntegerValue(-1));
LinearConstraintBuilder builder(&model, prev_used_bound, kMaxIntegerValue);
builder.AddTerm(var, IntegerValue(-1));
for (const auto entry :
encoder->PartialGreaterThanEncoding(NegationOf(var))) {
if (entry.value <= prev_used_bound) continue;
const IntegerValue diff = prev_used_bound - entry.value;
// Skip the entry if the literal doesn't have a view.
if (!lb_constraint.AddLiteralTerm(entry.literal, diff)) continue;
if (!builder.AddLiteralTerm(entry.literal, diff)) continue;
prev_used_bound = entry.value;
}
relaxation->linear_constraints.push_back(lb_constraint.Build());
// Note that by construction, this shouldn't be able to overflow.
relaxation->linear_constraints.push_back(builder.Build());
}
}
@@ -1210,6 +1233,7 @@ void TryToLinearizeConstraint(const CpModelProto& /*model_proto*/,
CHECK_EQ(model->GetOrCreate<SatSolver>()->CurrentDecisionLevel(), 0);
DCHECK_GT(linearization_level, 0);
const int old_index = relaxation->linear_constraints.size();
switch (ct.constraint_case()) {
case ConstraintProto::ConstraintCase::kBoolOr: {
if (linearization_level > 1) {
@@ -1310,6 +1334,18 @@ void TryToLinearizeConstraint(const CpModelProto& /*model_proto*/,
default: {
}
}
if (DEBUG_MODE) {
const auto& integer_trail = *model->GetOrCreate<IntegerTrail>();
for (int i = old_index; i < relaxation->linear_constraints.size(); ++i) {
const bool issue =
PossibleOverflow(integer_trail, relaxation->linear_constraints[i]);
if (issue) {
LOG(INFO) << "Possible overflow in linearization of: "
<< ct.ShortDebugString();
}
}
}
}
// Cut generators.
@@ -1657,10 +1693,10 @@ void AddLinMaxCutGenerator(const ConstraintProto& ct, Model* m,
// it is harder to detect that if all literal are false then none of the implied
// value can be taken.
void AppendElementEncodingRelaxation(Model* m, LinearRelaxation* relaxation) {
auto* integer_trail = m->GetOrCreate<IntegerTrail>();
auto* element_encodings = m->GetOrCreate<ElementEncodings>();
int num_exactly_one_elements = 0;
for (const IntegerVariable var :
element_encodings->GetElementEncodedVariables()) {
for (const auto& [index, literal_value_list] :
@@ -1670,19 +1706,22 @@ void AppendElementEncodingRelaxation(Model* m, LinearRelaxation* relaxation) {
min_value = std::min(min_value, literal_value.value);
}
LinearConstraintBuilder linear_encoding(m, -min_value, -min_value);
linear_encoding.AddTerm(var, IntegerValue(-1));
LinearConstraintBuilder builder(m, -min_value, -min_value);
builder.AddTerm(var, IntegerValue(-1));
for (const auto& [value, literal] : literal_value_list) {
const IntegerValue delta_min = value - min_value;
if (delta_min != 0) {
// If the term has no view, we abort.
if (!linear_encoding.AddLiteralTerm(literal, delta_min)) {
if (!builder.AddLiteralTerm(literal, delta_min)) {
return;
}
}
}
++num_exactly_one_elements;
relaxation->linear_constraints.push_back(linear_encoding.Build());
const LinearConstraint lc = builder.Build();
if (!PossibleOverflow(*integer_trail, lc)) {
relaxation->linear_constraints.push_back(std::move(lc));
}
}
}
@@ -1728,7 +1767,7 @@ LinearRelaxation ComputeLinearRelaxation(const CpModelProto& model_proto,
var, *m, &relaxation, &num_tight_equality_encoding_relaxations,
&num_loose_equality_encoding_relaxations);
// The we try to linearize the inequality encoding. Note that on some
// Then we try to linearize the inequality encoding. Note that on some
// problem like pizza27i.mps.gz, adding both equality and inequality
// encoding is a must.
//

View File

@@ -452,28 +452,9 @@ ActivityBoundHelper::PartitionLiteralsIntoAmo(absl::Span<const int> literals) {
}
// We have the partition, lets construct the spans now.
part_starts_.assign(num_parts, 0);
part_sizes_.assign(num_parts, 0);
part_ends_.assign(num_parts, 0);
for (int i = 0; i < num_literals; ++i) {
DCHECK_GE(partition_[i], 0);
DCHECK_LT(partition_[i], num_parts);
part_sizes_[partition_[i]]++;
}
for (int p = 1; p < num_parts; ++p) {
part_starts_[p] = part_ends_[p] = part_sizes_[p - 1] + part_starts_[p - 1];
}
reordered_literals_.resize(num_literals);
for (int i = 0; i < num_literals; ++i) {
const int p = partition_[i];
reordered_literals_[part_ends_[p]++] = literals[i];
}
std::vector<absl::Span<const int>> result;
for (int p = 0; p < num_parts; ++p) {
result.push_back(
absl::MakeSpan(&reordered_literals_[part_starts_[p]], part_sizes_[p]));
}
return result;
part_to_literals_.ResetFromFlatMapping(partition_, literals);
DCHECK_EQ(part_to_literals_.size(), num_parts);
return part_to_literals_.AsVectorOfSpan();
}
bool ActivityBoundHelper::IsAmo(absl::Span<const int> literals) {

View File

@@ -31,6 +31,7 @@
#include "ortools/base/types.h"
#include "ortools/sat/cp_model.pb.h"
#include "ortools/sat/cp_model_utils.h"
#include "ortools/sat/util.h"
#include "ortools/util/bitset.h"
#include "ortools/util/logging.h"
#include "ortools/util/sorted_interval_list.h"
@@ -211,7 +212,7 @@ class ActivityBoundHelper {
bool PresolveEnforcement(absl::Span<const int> refs, ConstraintProto* ct,
absl::flat_hash_set<int>* literals_at_true);
// partition the list of literals into disjoint at most ones. The returned
// Partition the list of literals into disjoint at most ones. The returned
// spans are only valid until another function from this class is used.
std::vector<absl::Span<const int>> PartitionLiteralsIntoAmo(
absl::Span<const int> literals);
@@ -253,10 +254,7 @@ class ActivityBoundHelper {
std::vector<int64_t> second_max_by_partition_;
// Used by PartitionLiteralsIntoAmo().
std::vector<int> part_starts_;
std::vector<int> part_ends_;
std::vector<int> part_sizes_;
std::vector<int> reordered_literals_;
CompactVectorVector<int, int> part_to_literals_;
absl::flat_hash_set<int> triggered_amo_;
};

View File

@@ -63,6 +63,8 @@ code_sample_py(name = "overlapping_intervals_sample_sat")
code_sample_cc_py(name = "rabbits_and_pheasants_sat")
code_sample_py(name = "ranking_circuit_sample_sat")
code_sample_cc_py(name = "ranking_sample_sat")
code_sample_cc_py(name = "reified_sample_sat")

View File

@@ -0,0 +1,179 @@
#!/usr/bin/env python3
# Copyright 2010-2022 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Code sample to demonstrates how to rank intervals using a circuit."""
from typing import List, Sequence
from ortools.sat.python import cp_model
def rank_tasks_with_circuit(
model: cp_model.CpModel,
starts: Sequence[cp_model.IntVar],
durations: Sequence[int],
presences: Sequence[cp_model.IntVar],
ranks: Sequence[cp_model.IntVar],
):
"""This method uses a circuit constraint to rank tasks.
This method assumes that all starts are disjoint, meaning that all tasks have
a strictly positive duration, and they appear in the same NoOverlap
constraint.
To implement this ranking, we will create a dense graph with num_tasks + 1
nodes.
The extra node (with id 0) will be used to decide which task is first with
its only outgoing arc, and whhich task is last with its only incoming arc.
Each task i will be associated with id i + 1, and an arc between i + 1 and j +
1 indicates that j is the immediate successor of i.
The circuit constraint ensures there is at most 1 hamiltonian path of
length > 1. If no such path exists, then no tasks are active.
The multiple enforced linear constraints are meant to ensure the compatibility
between the order of starts and the order of ranks,
Args:
model: The CpModel to add the constraints to.
starts: The array of starts variables of all tasks.
durations: the durations of all tasks.
presences: The array of presence variables of all tasks.
ranks: The array of rank variables of all tasks.
"""
num_tasks = len(starts)
all_tasks = range(num_tasks)
arcs: List[cp_model.ArcT] = []
for i in all_tasks:
# if node i is first.
start_lit = model.NewBoolVar(f"start_{i}")
arcs.append((0, i + 1, start_lit))
model.Add(ranks[i] == 0).OnlyEnforceIf(start_lit)
# As there are no other constraints on the problem, we can add this
# redundant constraint.
model.Add(starts[i] == 0).OnlyEnforceIf(start_lit)
# if node i is last.
end_lit = model.NewBoolVar(f"end_{i}")
arcs.append((i + 1, 0, end_lit))
for j in all_tasks:
if i == j:
arcs.append((i + 1, i + 1, presences[i].Not()))
model.Add(ranks[i] == -1).OnlyEnforceIf(presences[i].Not())
else:
literal = model.NewBoolVar(f"arc_{i}_to_{j}")
arcs.append((i + 1, j + 1, literal))
model.Add(ranks[j] == ranks[i] + 1).OnlyEnforceIf(literal)
# To perform the transitive reduction from precedences to successors,
# we need to tie the starts of the tasks with 'literal'.
# In a pure problem, the following inequality could be an equality.
# It is not true in general.
#
# Note that we could use this literal to penalize the transition, add an
# extra delay to the precedence.
model.Add(starts[j] >= starts[i] + durations[i]).OnlyEnforceIf(literal)
# Manage the empty circuit
empty = model.NewBoolVar("empty")
arcs.append((0, 0, empty))
for i in all_tasks:
model.AddImplication(empty, presences[i].Not())
# Add the circuit constraint.
model.AddCircuit(arcs)
def ranking_sample_sat():
"""Ranks tasks in a NoOverlap constraint."""
model = cp_model.CpModel()
horizon = 100
num_tasks = 4
all_tasks = range(num_tasks)
starts = []
durations = []
intervals = []
presences = []
ranks = []
# Creates intervals, half of them are optional.
for t in all_tasks:
start = model.NewIntVar(0, horizon, f"start[{t}]")
duration = t + 1
presence = model.NewBoolVar(f"presence[{t}]")
interval = model.NewOptionalFixedSizeIntervalVar(
start, duration, presence, f"opt_interval[{t}]"
)
if t < num_tasks // 2:
model.Add(presence == 1)
starts.append(start)
durations.append(duration)
intervals.append(interval)
presences.append(presence)
# Ranks = -1 if and only if the tasks is not performed.
ranks.append(model.NewIntVar(-1, num_tasks - 1, f"rank[{t}]"))
# Adds NoOverlap constraint.
model.AddNoOverlap(intervals)
# Adds ranking constraint.
rank_tasks_with_circuit(model, starts, durations, presences, ranks)
# Adds a constraint on ranks.
model.Add(ranks[0] < ranks[1])
# Creates makespan variable.
makespan = model.NewIntVar(0, horizon, "makespan")
for t in all_tasks:
model.Add(starts[t] + durations[t] <= makespan).OnlyEnforceIf(presences[t])
# Minimizes makespan - fixed gain per tasks performed.
# As the fixed cost is less that the duration of the last interval,
# the solver will not perform the last interval.
model.Minimize(2 * makespan - 7 * sum(presences[t] for t in all_tasks))
# Solves the model model.
solver = cp_model.CpSolver()
status = solver.Solve(model)
if status == cp_model.OPTIMAL:
# Prints out the makespan and the start times and ranks of all tasks.
print(f"Optimal cost: {solver.ObjectiveValue()}")
print(f"Makespan: {solver.Value(makespan)}")
for t in all_tasks:
if solver.Value(presences[t]):
print(
f"Task {t} starts at {solver.Value(starts[t])} "
f"with rank {solver.Value(ranks[t])}"
)
else:
print(
f"Task {t} in not performed "
f"and ranked at {solver.Value(ranks[t])}"
)
else:
print(f"Solver exited with nonoptimal status: {status}")
ranking_sample_sat()

View File

@@ -31,9 +31,9 @@ void RankingSampleSat() {
const int kHorizon = 100;
const int kNumTasks = 4;
auto add_task_ranking = [&cp_model](const std::vector<IntVar>& starts,
const std::vector<BoolVar>& presences,
const std::vector<IntVar>& ranks) {
auto add_task_ranking = [&cp_model](absl::Span<const IntVar> starts,
absl::Span<const BoolVar> presences,
absl::Span<const IntVar> ranks) {
const int num_tasks = starts.size();
// Creates precedence variables between pairs of intervals.

View File

@@ -21,6 +21,7 @@
#include <deque>
#include <limits>
#include <string>
#include <type_traits>
#include <utility>
#include <vector>
@@ -44,6 +45,67 @@
namespace operations_research {
namespace sat {
// A simple class with always IdentityMap[t] == t.
// This is to avoid allocating vector with std::iota() in some Apis.
template <typename T>
class IdentityMap {
public:
T operator[](T t) const { return t; }
};
// Small utility class to store a vector<vector<>> where one can only append new
// vector and never change previously added ones. This allows to store a static
// key -> value(s) mapping.
//
// This is a lot more compact memorywise and thus faster than vector<vector<>>.
// Note that we implement a really small subset of the vector<vector<>> API.
//
// We support int and StrongType for key K and any copyable type for value V.
template <typename K = int, typename V = int>
class CompactVectorVector {
public:
// Size of the "key" space, always in [0, size()).
size_t size() const;
// Getters, either via [] or via a wrapping to be compatible with older api.
//
// Warning: Spans are only valid until the next modification!
absl::Span<const V> operator[](K key) const;
std::vector<absl::Span<const V>> AsVectorOfSpan() const;
// Restore to empty vector<vector<>>.
void clear();
// Given a flat mapping (keys[i] -> values[i]) with two parallel vectors, not
// necessarily sorted by key, regroup the same key so that
// CompactVectorVector[key] list all values in the order in which they appear.
//
// We only check keys.size(), so this can be used with IdentityMap() as
// second argument.
template <typename Keys, typename Values>
void ResetFromFlatMapping(Keys keys, Values values);
// Append a new entry.
// Returns the previous size() as this is convenient for how we use it.
int Add(absl::Span<const V> values);
// Hacky: same as Add() but for sat::Literal or any type from which we can get
// a value type V via L.Index().value().
template <typename L>
int AddLiterals(const std::vector<L>& wrapped_values);
private:
// Convert int and StrongInt to normal int.
static int InternalKey(K key);
// TODO(user): with a sentinel, we can infer
// size_[i] = starts_[i + 1] - starts_[i]. This should be slightly faster.
// Benchmark and change.
std::vector<int> starts_;
std::vector<int> sizes_;
std::vector<V> buffer_;
};
// Prints a positive number with separators for easier reading (ex: 1'348'065).
std::string FormatCounter(int64_t num);
@@ -581,6 +643,108 @@ IntType FloorOfRatio(IntType numerator, IntType denominator) {
return CeilOrFloorOfRatio<IntType, false>(numerator, denominator);
}
template <typename K, typename V>
inline int CompactVectorVector<K, V>::Add(absl::Span<const V> values) {
const int index = size();
starts_.push_back(buffer_.size());
sizes_.push_back(values.size());
buffer_.insert(buffer_.end(), values.begin(), values.end());
return index;
}
template <typename K, typename V>
template <typename L>
inline int CompactVectorVector<K, V>::AddLiterals(
const std::vector<L>& wrapped_values) {
const int index = size();
starts_.push_back(buffer_.size());
sizes_.push_back(wrapped_values.size());
for (const L wrapped_value : wrapped_values) {
buffer_.push_back(wrapped_value.Index().value());
}
return index;
}
// We need to support both StrongType and normal int.
template <typename K, typename V>
inline int CompactVectorVector<K, V>::InternalKey(K key) {
if constexpr (std::is_same_v<K, int>) {
return key;
} else {
return key.value();
}
}
template <typename K, typename V>
inline absl::Span<const V> CompactVectorVector<K, V>::operator[](K key) const {
DCHECK_GE(key, 0);
DCHECK_LT(key, starts_.size());
DCHECK_LT(key, sizes_.size());
const int k = InternalKey(key);
const size_t size = static_cast<size_t>(sizes_[k]);
if (size == 0) return {};
return {&buffer_[starts_[k]], size};
}
template <typename K, typename V>
inline std::vector<absl::Span<const V>>
CompactVectorVector<K, V>::AsVectorOfSpan() const {
std::vector<absl::Span<const V>> result(starts_.size());
for (int k = 0; k < starts_.size(); ++k) {
result[k] = (*this)[k];
}
return result;
}
template <typename K, typename V>
inline void CompactVectorVector<K, V>::clear() {
starts_.clear();
sizes_.clear();
buffer_.clear();
}
template <typename K, typename V>
inline size_t CompactVectorVector<K, V>::size() const {
return starts_.size();
}
template <typename K, typename V>
template <typename Keys, typename Values>
inline void CompactVectorVector<K, V>::ResetFromFlatMapping(Keys keys,
Values values) {
if (keys.empty()) return clear();
// Compute maximum index.
int max_key = 0;
for (const K key : keys) {
max_key = std::max(max_key, InternalKey(key) + 1);
}
// Compute sizes_;
sizes_.assign(max_key, 0);
for (const K key : keys) {
sizes_[InternalKey(key)]++;
}
// Compute starts_;
starts_.assign(max_key, 0);
for (int k = 1; k < max_key; ++k) {
starts_[k] = starts_[k - 1] + sizes_[k - 1];
}
// Copy data and uses starts as temporary indices.
buffer_.resize(keys.size());
for (int i = 0; i < keys.size(); ++i) {
buffer_[starts_[InternalKey(keys[i])]++] = values[i];
}
// Restore starts_.
for (int k = max_key - 1; k > 0; --k) {
starts_[k] = starts_[k - 1];
}
starts_[0] = 0;
}
} // namespace sat
} // namespace operations_research