[CP-SAT] work on scheduling cuts

This commit is contained in:
Laurent Perron
2022-04-21 17:29:56 +02:00
parent e3eb35132b
commit 5b87373be5
7 changed files with 331 additions and 202 deletions

View File

@@ -409,6 +409,18 @@ DoubleLinearExpr& DoubleLinearExpr::AddTerm(BoolVar var, double coeff) {
return *this;
}
DoubleLinearExpr& DoubleLinearExpr::AddExpression(const LinearExpr& expr,
double coeff) {
const std::vector<int>& indices = expr.variables();
const std::vector<int64_t> coefficients = expr.coefficients();
for (int i = 0; i < indices.size(); ++i) {
variables_.push_back(indices[i]);
coefficients_.push_back(1.0 * static_cast<double>(coefficients[i]) * coeff);
}
return *this;
}
DoubleLinearExpr& DoubleLinearExpr::operator-=(double value) {
constant_ -= value;
return *this;

View File

@@ -370,6 +370,9 @@ class DoubleLinearExpr {
DoubleLinearExpr& AddTerm(IntVar var, double coeff);
DoubleLinearExpr& AddTerm(BoolVar var, double coeff);
/// Adds a linear expression to the double linear expression.
DoubleLinearExpr& AddExpression(const LinearExpr& exprs, double coeff = 1.0);
/// Adds a constant value to the linear expression.
DoubleLinearExpr& operator-=(double value);

View File

@@ -556,7 +556,7 @@ void LinearProgrammingConstraint::RegisterWith(Model* model) {
if (objective_is_defined_) {
watcher->WatchUpperBound(objective_cp_, watcher_id);
}
watcher->SetPropagatorPriority(watcher_id, 2);
watcher->SetPropagatorPriority(watcher_id, 4);
watcher->AlwaysCallAtLevelZero(watcher_id);
// Registering it with the trail make sure this class is always in sync when

View File

@@ -601,87 +601,233 @@ void AppendRoutesRelaxation(const ConstraintProto& ct, Model* model,
relaxation->linear_constraints.push_back(zero_node_balance_lc.Build());
}
// Scan the intervals of a cumulative/no_overlap constraint, and its capacity (1
// for the no_overlap). It returns the index of the makespan interval if found,
// or -1 otherwise.
//
// Currently, this requires the capacity to be fixed in order to scan for a
// makespan interval.
//
// The makespan interval has the following property:
// - its end is fixed at the horizon
// - it is always present
// - its demand is the capacity of the cumulative/no_overlap.
// - its size is > 0.
//
// These property ensures that all other intervals ends before the start of
// the makespan interval.
int DetectMakespan(const std::vector<IntervalVariable>& intervals,
const std::vector<AffineExpression>& demands,
const AffineExpression& capacity, Model* model) {
IntegerTrail* integer_trail = model->GetOrCreate<IntegerTrail>();
IntervalsRepository* repository = model->GetOrCreate<IntervalsRepository>();
// TODO(user): Supports variable capacity.
if (!integer_trail->IsFixed(capacity)) {
return -1;
}
// Detect the horizon (max of all end max of all intervals).
IntegerValue horizon = kMinIntegerValue;
for (int i = 0; i < intervals.size(); ++i) {
if (repository->IsAbsent(intervals[i])) continue;
horizon = std::max(horizon, integer_trail->UpperBound(repository->End(i)));
}
const IntegerValue capacity_value = integer_trail->FixedValue(capacity);
for (int i = 0; i < intervals.size(); ++i) {
if (repository->IsAbsent(intervals[i])) continue;
const AffineExpression& end = repository->End(intervals[i]);
if (integer_trail->IsFixed(demands[i]) &&
integer_trail->FixedValue(demands[i]) == capacity_value &&
integer_trail->IsFixed(end) &&
integer_trail->FixedValue(end) == horizon &&
integer_trail->LowerBound(repository->Size(intervals[i])) > 0 &&
repository->IsPresent(intervals[i])) {
return i;
}
}
return -1;
}
void AppendNoOverlapRelaxationAndCutGenerator(const ConstraintProto& ct,
Model* model,
LinearRelaxation* relaxation) {
if (HasEnforcementLiteral(ct)) return;
auto* mapping = model->GetOrCreate<CpModelMapping>();
std::vector<IntervalVariable> intervals =
mapping->Intervals(ct.no_overlap().intervals());
const IntegerValue one(1);
std::vector<AffineExpression> demands(intervals.size(), one);
const int makespan_index =
DetectMakespan(intervals, demands, /*capacity=*/one, model);
std::optional<AffineExpression> makespan;
IntervalsRepository* repository = model->GetOrCreate<IntervalsRepository>();
if (makespan_index != -1) {
makespan = repository->Start(intervals[makespan_index]);
demands.pop_back(); // the vector is filled with ones.
intervals.erase(intervals.begin() + makespan_index);
}
SchedulingConstraintHelper* helper = repository->GetOrCreateHelper(intervals);
std::vector<std::optional<LinearExpression>> energies;
energies.reserve(helper->NumTasks());
for (int i = 0; i < helper->NumTasks(); ++i) {
LinearConstraintBuilder e(model);
e.AddTerm(helper->Sizes()[i], one);
energies.push_back(e.BuildExpression());
}
AddCumulativeRelaxation(helper, demands, /*capacity=*/one, energies, model,
relaxation);
if (model->GetOrCreate<SatParameters>()->linearization_level() > 1) {
AddNoOverlapCutGenerator(helper, makespan, model, relaxation);
}
}
void AppendCumulativeRelaxationAndCutGenerator(const ConstraintProto& ct,
Model* model,
LinearRelaxation* relaxation) {
if (HasEnforcementLiteral(ct)) return;
auto* mapping = model->GetOrCreate<CpModelMapping>();
std::vector<IntervalVariable> intervals =
mapping->Intervals(ct.cumulative().intervals());
std::vector<AffineExpression> demands =
mapping->Affines(ct.cumulative().demands());
const AffineExpression capacity = mapping->Affine(ct.cumulative().capacity());
const int makespan_index =
DetectMakespan(intervals, demands, capacity, model);
std::optional<AffineExpression> makespan;
IntervalsRepository* repository = model->GetOrCreate<IntervalsRepository>();
if (makespan_index != -1) {
// We remove the makespan data from the intervals the demands vector.
makespan = repository->Start(intervals[makespan_index]);
demands.erase(demands.begin() + makespan_index);
intervals.erase(intervals.begin() + makespan_index);
}
// We try to linearize the energy of each task (size * demand).
SchedulingConstraintHelper* helper = repository->GetOrCreateHelper(intervals);
std::vector<std::optional<LinearExpression>> energies;
energies.reserve(helper->NumTasks());
for (int i = 0; i < helper->NumTasks(); ++i) {
energies.push_back(
TryToLinearizeProduct(demands[i], helper->Sizes()[i], model));
}
// We can now add the relaxation and the cut generators.
AddCumulativeRelaxation(helper, demands, capacity, energies, model,
relaxation);
if (model->GetOrCreate<SatParameters>()->linearization_level() > 1) {
AddCumulativeCutGenerator(helper, demands, capacity, energies, makespan,
model, relaxation);
}
}
// This relaxation will compute the bounding box of all tasks in the cumulative,
// and add the constraint that the sum of energies of each task must fit in the
// capacity * span area.
// TODO(user): Exploit the makespan if found.
void AddCumulativeRelaxation(
const std::vector<IntervalVariable>& intervals,
SchedulingConstraintHelper* helper,
const std::vector<AffineExpression>& demands,
const std::vector<std::optional<LinearExpression>>& energies,
IntegerValue capacity_upper_bound, Model* model,
const AffineExpression& capacity,
const std::vector<std::optional<LinearExpression>>& energies, Model* model,
LinearRelaxation* relaxation) {
// TODO(user): Keep a map intervals -> helper, or ct_index->helper to avoid
// creating many helpers for the same constraint.
auto* helper = new SchedulingConstraintHelper(intervals, model);
model->TakeOwnership(helper);
const int num_intervals = helper->NumTasks();
IntegerTrail* integer_trail = model->GetOrCreate<IntegerTrail>();
std::vector<Literal> presence_literals;
std::vector<AffineExpression> starts;
std::vector<AffineExpression> ends;
std::vector<Literal> clause;
bool at_least_one_interval_is_present = false;
IntegerValue min_of_starts = kMaxIntegerValue;
IntegerValue max_of_ends = kMinIntegerValue;
int num_variable_sizes = 0;
int num_variable_energies = 0;
int num_optionals = 0;
IntegerTrail* integer_trail = model->GetOrCreate<IntegerTrail>();
for (int index = 0; index < num_intervals; ++index) {
if (helper->IsAbsent(index)) continue;
min_of_starts = std::min(min_of_starts, helper->StartMin(index));
max_of_ends = std::max(max_of_ends, helper->EndMax(index));
if (helper->IsOptional(index)) {
num_optionals++;
const Literal task_lit = helper->PresenceLiteral(index);
presence_literals.push_back(task_lit);
clause.push_back(task_lit);
} else {
at_least_one_interval_is_present = true;
presence_literals.push_back(
model->GetOrCreate<IntegerEncoder>()->GetTrueLiteral());
}
if (!helper->SizeIsFixed(index) ||
(!demands.empty() && !integer_trail->IsFixed(demands[index]))) {
num_variable_sizes++;
num_variable_energies++;
}
starts.push_back(helper->Starts()[index]);
ends.push_back(helper->Ends()[index]);
}
VLOG(2) << "Span [" << min_of_starts << ".." << max_of_ends << "] with "
<< num_optionals << " optional intervals, and " << num_variable_sizes
<< " variable size intervals out of " << num_intervals
<< " intervals";
<< num_optionals << " optional intervals, and "
<< num_variable_energies << " variable energy tasks out of "
<< num_intervals << " intervals";
if (num_variable_sizes + num_optionals == 0) return;
// If nothing is variable, the linear relaxation will already be enforced by
// the scheduling propagators.
if (num_variable_energies + num_optionals == 0) return;
const IntegerVariable span_start =
integer_trail->AddIntegerVariable(min_of_starts, max_of_ends);
const IntegerVariable span_size = integer_trail->AddIntegerVariable(
IntegerValue(0), max_of_ends - min_of_starts);
const IntegerVariable span_end =
integer_trail->AddIntegerVariable(min_of_starts, max_of_ends);
IntervalVariable span_var;
if (num_optionals < num_intervals) {
span_var = model->Add(NewInterval(span_start, span_end, span_size));
} else {
const Literal span_lit = Literal(model->Add(NewBooleanVariable()), true);
span_var = model->Add(
NewOptionalInterval(span_start, span_end, span_size, span_lit));
auto* sat_solver = model->GetOrCreate<SatSolver>();
const Literal cumulative_is_not_empty =
at_least_one_interval_is_present
? model->GetOrCreate<IntegerEncoder>()->GetTrueLiteral()
: Literal(model->Add(NewBooleanVariable()), true);
if (!at_least_one_interval_is_present) {
for (const Literal task_lit : clause) {
sat_solver->AddBinaryClause(task_lit.Negated(), cumulative_is_not_empty);
}
clause.push_back(cumulative_is_not_empty.Negated());
sat_solver->AddProblemClause(clause, /*is_safe=*/false);
}
model->Add(SpanOfIntervals(span_var, intervals));
// Link span_start and span_end to the starts and ends of the tasks.
model->Add(EqualMinOfSelectedVariables(cumulative_is_not_empty, span_start,
starts, presence_literals));
model->Add(EqualMaxOfSelectedVariables(cumulative_is_not_empty, span_end,
ends, presence_literals));
LinearConstraintBuilder lc(model, kMinIntegerValue, IntegerValue(0));
lc.AddTerm(span_size, -capacity_upper_bound);
lc.AddTerm(span_end, -integer_trail->UpperBound(capacity));
lc.AddTerm(span_start, integer_trail->UpperBound(capacity));
for (int i = 0; i < num_intervals; ++i) {
const IntegerValue demand_lower_bound =
demands.empty() ? IntegerValue(1)
: integer_trail->LowerBound(demands[i]);
const bool demand_is_fixed =
demands.empty() || integer_trail->IsFixed(demands[i]);
if (!helper->IsOptional(i)) {
if (demand_is_fixed) {
lc.AddTerm(helper->Sizes()[i], demand_lower_bound);
} else if (!helper->SizeIsFixed(i) && energies[i].has_value()) {
// We prefer the energy additional info instead of the McCormick
// relaxation.
if (energies[i].has_value()) {
// The energy is defined if built from a constant value, a linear
// expression, or a linearized product.
lc.AddLinearExpression(energies[i].value());
} else {
// The demand and the size are variable, and their product could not be
// linearized.
lc.AddQuadraticLowerBound(helper->Sizes()[i], demands[i],
integer_trail);
}
} else {
const IntegerValue product_min =
helper->SizeMin(i) * integer_trail->LowerBound(demands[i]);
const IntegerValue energy_min =
energies[i].has_value()
? LinExprLowerBound(energies[i].value(), *integer_trail)
: IntegerValue(0);
if (!lc.AddLiteralTerm(helper->PresenceLiteral(i),
helper->SizeMin(i) * demand_lower_bound)) {
std::max(energy_min, product_min))) {
return;
}
}
@@ -689,48 +835,6 @@ void AddCumulativeRelaxation(
relaxation->linear_constraints.push_back(lc.Build());
}
void AppendCumulativeRelaxation(const ConstraintProto& ct, Model* model,
LinearRelaxation* relaxation) {
CHECK(ct.has_cumulative());
if (HasEnforcementLiteral(ct)) return;
auto* mapping = model->GetOrCreate<CpModelMapping>();
std::vector<IntervalVariable> intervals =
mapping->Intervals(ct.cumulative().intervals());
const IntegerValue capacity_upper_bound =
model->GetOrCreate<IntegerTrail>()->UpperBound(
mapping->Affine(ct.cumulative().capacity()));
// Scan energies.
IntervalsRepository* intervals_repository =
model->GetOrCreate<IntervalsRepository>();
std::vector<std::optional<LinearExpression>> energies;
std::vector<AffineExpression> demands;
std::vector<AffineExpression> sizes;
for (int i = 0; i < ct.cumulative().demands_size(); ++i) {
demands.push_back(mapping->Affine(ct.cumulative().demands(i)));
sizes.push_back(intervals_repository->Size(intervals[i]));
energies.push_back(
TryToLinearizeProduct(demands.back(), sizes.back(), model));
}
AddCumulativeRelaxation(intervals, demands, energies, capacity_upper_bound,
model, relaxation);
}
void AppendNoOverlapRelaxation(const ConstraintProto& ct, Model* model,
LinearRelaxation* relaxation) {
CHECK(ct.has_no_overlap());
if (HasEnforcementLiteral(ct)) return;
auto* mapping = model->GetOrCreate<CpModelMapping>();
std::vector<IntervalVariable> intervals =
mapping->Intervals(ct.no_overlap().intervals());
AddCumulativeRelaxation(intervals, /*demands=*/{}, /*energies=*/{},
/*capacity_upper_bound=*/IntegerValue(1), model,
relaxation);
}
// Adds the energetic relaxation sum(areas) <= bounding box area.
void AppendNoOverlap2dRelaxation(const ConstraintProto& ct, Model* model,
LinearRelaxation* relaxation) {
@@ -1084,17 +1188,12 @@ void TryToLinearizeConstraint(const CpModelProto& model_proto,
break;
}
case ConstraintProto::ConstraintCase::kNoOverlap: {
if (linearization_level > 1) {
AppendNoOverlapRelaxation(ct, model, relaxation);
AddNoOverlapCutGenerator(ct, model, relaxation);
}
AppendNoOverlapRelaxationAndCutGenerator(ct, model, relaxation);
break;
}
case ConstraintProto::ConstraintCase::kCumulative: {
if (linearization_level > 1) {
AppendCumulativeRelaxation(ct, model, relaxation);
AddCumulativeCutGenerator(ct, model, relaxation);
}
AppendCumulativeRelaxationAndCutGenerator(ct, model, relaxation);
break;
}
case ConstraintProto::ConstraintCase::kNoOverlap2D: {
@@ -1241,41 +1340,27 @@ bool IntervalIsVariable(const IntervalVariable interval,
return false;
}
void AddCumulativeCutGenerator(const ConstraintProto& ct, Model* m,
LinearRelaxation* relaxation) {
if (HasEnforcementLiteral(ct)) return;
auto* mapping = m->GetOrCreate<CpModelMapping>();
const std::vector<IntervalVariable> intervals =
mapping->Intervals(ct.cumulative().intervals());
const AffineExpression capacity = mapping->Affine(ct.cumulative().capacity());
// Scan energies.
IntervalsRepository* intervals_repository =
m->GetOrCreate<IntervalsRepository>();
std::vector<std::optional<LinearExpression>> energies;
std::vector<AffineExpression> demands;
for (int i = 0; i < intervals.size(); ++i) {
demands.push_back(mapping->Affine(ct.cumulative().demands(i)));
energies.push_back(TryToLinearizeProduct(
demands.back(), intervals_repository->Size(intervals[i]), m));
}
void AddCumulativeCutGenerator(
SchedulingConstraintHelper* helper,
const std::vector<AffineExpression>& demands,
const AffineExpression& capacity,
const std::vector<std::optional<LinearExpression>>& energies,
std::optional<AffineExpression>& makespan, Model* m,
LinearRelaxation* relaxation) {
relaxation->cut_generators.push_back(
CreateCumulativeTimeTableCutGenerator(intervals, capacity, demands, m));
CreateCumulativeTimeTableCutGenerator(helper, capacity, demands, m));
relaxation->cut_generators.push_back(
CreateCumulativeCompletionTimeCutGenerator(intervals, capacity, demands,
CreateCumulativeCompletionTimeCutGenerator(helper, capacity, demands,
energies, m));
relaxation->cut_generators.push_back(
CreateCumulativePrecedenceCutGenerator(intervals, capacity, demands, m));
CreateCumulativePrecedenceCutGenerator(helper, capacity, demands, m));
// Checks if at least one rectangle has a variable size, is optional, or if
// the demand or the capacity are variable.
bool has_variable_part = false;
IntegerTrail* integer_trail = m->GetOrCreate<IntegerTrail>();
for (int i = 0; i < intervals.size(); ++i) {
if (IntervalIsVariable(intervals[i], intervals_repository)) {
for (int i = 0; i < helper->NumTasks(); ++i) {
if (!helper->SizeIsFixed(i)) {
has_variable_part = true;
break;
}
@@ -1287,35 +1372,29 @@ void AddCumulativeCutGenerator(const ConstraintProto& ct, Model* m,
}
if (has_variable_part || !integer_trail->IsFixed(capacity)) {
relaxation->cut_generators.push_back(CreateCumulativeEnergyCutGenerator(
intervals, capacity, demands, energies, m));
helper, capacity, demands, energies, makespan, m));
}
}
void AddNoOverlapCutGenerator(const ConstraintProto& ct, Model* m,
LinearRelaxation* relaxation) {
if (HasEnforcementLiteral(ct)) return;
auto* mapping = m->GetOrCreate<CpModelMapping>();
std::vector<IntervalVariable> intervals =
mapping->Intervals(ct.no_overlap().intervals());
void AddNoOverlapCutGenerator(SchedulingConstraintHelper* helper,
const std::optional<AffineExpression>& makespan,
Model* m, LinearRelaxation* relaxation) {
relaxation->cut_generators.push_back(
CreateNoOverlapPrecedenceCutGenerator(intervals, m));
CreateNoOverlapPrecedenceCutGenerator(helper, m));
relaxation->cut_generators.push_back(
CreateNoOverlapCompletionTimeCutGenerator(intervals, m));
CreateNoOverlapCompletionTimeCutGenerator(helper, m));
// Checks if at least one rectangle has a variable size or is optional.
IntervalsRepository* intervals_repository =
m->GetOrCreate<IntervalsRepository>();
bool has_variable_part = false;
for (int i = 0; i < intervals.size(); ++i) {
if (IntervalIsVariable(intervals[i], intervals_repository)) {
for (int i = 0; i < helper->NumTasks(); ++i) {
if (!helper->SizeIsFixed(i)) {
has_variable_part = true;
break;
}
}
if (has_variable_part) {
relaxation->cut_generators.push_back(
CreateNoOverlapEnergyCutGenerator(intervals, m));
CreateNoOverlapEnergyCutGenerator(helper, makespan, m));
}
}

View File

@@ -143,14 +143,16 @@ void AppendRoutesRelaxation(const ConstraintProto& ct, Model* model,
// Adds linearization of no overlap constraints.
// It adds an energetic equation linking the duration of all potential tasks to
// the actual span of the no overlap constraint.
void AppendNoOverlapRelaxation(const ConstraintProto& ct, Model* model,
LinearRelaxation* relaxation);
void AppendNoOverlapRelaxationAndCutGenerator(const ConstraintProto& ct,
Model* model,
LinearRelaxation* relaxation);
// Adds linearization of cumulative constraints.The second part adds an
// energetic equation linking the duration of all potential tasks to the actual
// max span * capacity of the cumulative constraint.
void AppendCumulativeRelaxation(const ConstraintProto& ct, Model* model,
LinearRelaxation* relaxation);
void AppendCumulativeRelaxationAndCutGenerator(const ConstraintProto& ct,
Model* model,
LinearRelaxation* relaxation);
// Cut generators.
void AddIntProdCutGenerator(const ConstraintProto& ct, int linearization_level,
@@ -168,15 +170,33 @@ void AddCircuitCutGenerator(const ConstraintProto& ct, Model* m,
void AddRoutesCutGenerator(const ConstraintProto& ct, Model* m,
LinearRelaxation* relaxation);
void AddCumulativeCutGenerator(const ConstraintProto& ct, Model* m,
LinearRelaxation* relaxation);
void AddNoOverlapCutGenerator(const ConstraintProto& ct, Model* m,
LinearRelaxation* relaxation);
void AddNoOverlap2dCutGenerator(const ConstraintProto& ct, Model* m,
LinearRelaxation* relaxation);
// Scheduling relaxations and cut generators.
// Adds linearization of cumulative constraints.The second part adds an
// energetic equation linking the duration of all potential tasks to the actual
// max span * capacity of the cumulative constraint.
void AddCumulativeRelaxation(
SchedulingConstraintHelper* helper,
const std::vector<AffineExpression>& demands,
const AffineExpression& capacity,
const std::vector<std::optional<LinearExpression>>& energies, Model* model,
LinearRelaxation* relaxation);
void AddCumulativeCutGenerator(
SchedulingConstraintHelper* helper,
const std::vector<AffineExpression>& demands,
const AffineExpression& capacity,
const std::vector<std::optional<LinearExpression>>& energies,
std::optional<AffineExpression>& makespan, Model* m,
LinearRelaxation* relaxation);
void AddNoOverlapCutGenerator(SchedulingConstraintHelper* helper,
const std::optional<AffineExpression>& makespan,
Model* m, LinearRelaxation* relaxation);
// Adds linearization of different types of constraints.
void TryToLinearizeConstraint(const CpModelProto& model_proto,
const ConstraintProto& ct,

View File

@@ -17,6 +17,7 @@
#include <cmath>
#include <cstdlib>
#include <functional>
#include <limits>
#include <optional>
#include <string>
#include <tuple>
@@ -151,15 +152,18 @@ void GenerateCumulativeEnergeticCuts(
const std::string& cut_name,
const absl::StrongVector<IntegerVariable, double>& lp_values,
std::vector<EnergyEvent> events, const AffineExpression capacity,
Model* model, LinearConstraintManager* manager) {
std::optional<AffineExpression> makespan, Model* model,
LinearConstraintManager* manager) {
// Compute relevant time points.
// TODO(user): We could reduce this set.
absl::btree_set<IntegerValue> time_points_set;
IntegerValue max_end_min = kMinIntegerValue;
for (const EnergyEvent& event : events) {
time_points_set.insert(event.x_start_min);
time_points_set.insert(event.x_start_max);
time_points_set.insert(event.x_end_min);
time_points_set.insert(event.x_end_max);
max_end_min = std::max(max_end_min, event.x_end_min);
}
const std::vector<IntegerValue> time_points(time_points_set.begin(),
time_points_set.end());
@@ -173,6 +177,10 @@ void GenerateCumulativeEnergeticCuts(
// add it either to energy_lp, or to the cut.
// It returns false if it tried to generate the cut, and failed.
IntegerTrail* integer_trail = model->GetOrCreate<IntegerTrail>();
// Checks the precondition of the code.
DCHECK(!makespan.has_value() || integer_trail->IsFixed(capacity));
const auto add_one_event = [integer_trail, &lp_values](
const EnergyEvent& event,
IntegerValue window_start,
@@ -285,13 +293,26 @@ void GenerateCumulativeEnergeticCuts(
};
std::vector<OverloadedTimeWindow> overloaded_time_windows;
const double capacity_lp = capacity.LpValue(lp_values);
const double makespan_lp = makespan.has_value()
? makespan->LpValue(lp_values)
: std::numeric_limits<double>::infinity();
const int num_time_points = time_points.size();
for (int i = 0; i + 1 < num_time_points; ++i) {
const IntegerValue window_start = time_points[i];
// After max_end_min, all tasks can fit before window_start.
if (window_start >= max_end_min) break;
// if (ToDouble(window_start) >= makespan_lp) continue;
for (int j = i + 1; j < num_time_points; ++j) {
const IntegerValue window_end = time_points[j];
const double max_energy_lp =
ToDouble(window_end - window_start) * capacity_lp;
const double energy_up_to_makespan_lp =
makespan.has_value()
? capacity_lp * (makespan_lp - ToDouble(window_start))
: std::numeric_limits<double>::infinity();
if (max_energy_lp >= sum_of_energies_lp) break;
// Scan all events and sum their energetic contributions.
@@ -303,8 +324,10 @@ void GenerateCumulativeEnergeticCuts(
break; // Abort.
}
}
if (energy_correctly_computed &&
energy_lp > max_energy_lp * (1.0 + kMinCutViolation)) {
if (!energy_correctly_computed) continue;
if (energy_lp >= std::min(max_energy_lp, energy_up_to_makespan_lp) *
(1.0 + kMinCutViolation)) {
overloaded_time_windows.push_back({window_start, window_end});
}
}
@@ -323,8 +346,24 @@ void GenerateCumulativeEnergeticCuts(
bool add_lifted_to_name = false;
bool add_quadratic_to_name = false;
bool add_energy_to_name = false;
bool use_makespan_in_cut = false;
LinearConstraintBuilder cut(model, kMinIntegerValue, IntegerValue(0));
cut.AddTerm(capacity, window_start - window_end);
double max_energy_lp = ToDouble(window_end - window_start) * capacity_lp;
if (makespan.has_value()) {
const double energy_up_to_makespan_lp =
capacity_lp * (makespan_lp - ToDouble(window_start));
if (energy_up_to_makespan_lp < max_energy_lp) {
max_energy_lp = energy_up_to_makespan_lp;
use_makespan_in_cut = true;
}
}
if (use_makespan_in_cut) {
IntegerValue capacity_value = integer_trail->FixedValue(capacity);
cut.AddConstant(capacity_value * window_start);
cut.AddTerm(makespan.value(), -capacity_value);
} else {
cut.AddTerm(capacity, window_start - window_end);
}
for (const EnergyEvent& event : events) {
if (!add_one_event(event, window_start, window_end,
/*energy_lp=*/nullptr, &cut, &add_energy_to_name,
@@ -341,6 +380,7 @@ void GenerateCumulativeEnergeticCuts(
if (add_quadratic_to_name) full_name.append("_quadratic");
if (add_lifted_to_name) full_name.append("_lifted");
if (add_energy_to_name) full_name.append("_energy");
if (use_makespan_in_cut) full_name.append("_makespan");
top_n_cuts.AddCut(cut.Build(), full_name, lp_values);
}
}
@@ -385,16 +425,12 @@ double ComputeEnergyLp(
}
CutGenerator CreateCumulativeEnergyCutGenerator(
const std::vector<IntervalVariable>& intervals,
const AffineExpression& capacity,
SchedulingConstraintHelper* helper, const AffineExpression& capacity,
const std::vector<AffineExpression>& demands,
const std::vector<std::optional<LinearExpression>>& energies,
Model* model) {
const std::optional<AffineExpression>& makespan, Model* model) {
CutGenerator result;
SchedulingConstraintHelper* helper =
model->GetOrCreate<IntervalsRepository>()->GetOrCreateHelper(intervals);
Trail* trail = model->GetOrCreate<Trail>();
IntegerEncoder* encoder = model->GetOrCreate<IntegerEncoder>();
IntegerTrail* integer_trail = model->GetOrCreate<IntegerTrail>();
@@ -408,12 +444,14 @@ CutGenerator CreateCumulativeEnergyCutGenerator(
gtl::STLSortAndRemoveDuplicates(&result.vars);
result.generate_cuts =
[capacity, demands, energies, trail, integer_trail, helper, model,
encoder](const absl::StrongVector<IntegerVariable, double>& lp_values,
LinearConstraintManager* manager) {
[makespan, capacity, demands, energies, trail, helper, model, encoder](
const absl::StrongVector<IntegerVariable, double>& lp_values,
LinearConstraintManager* manager) {
if (trail->CurrentDecisionLevel() > 0) return true;
if (!helper->SynchronizeAndSetTimeDirection(true)) return false;
IntegerTrail* integer_trail = model->GetOrCreate<IntegerTrail>();
std::vector<EnergyEvent> events;
for (int i = 0; i < helper->NumTasks(); ++i) {
if (helper->IsAbsent(i)) continue;
@@ -429,9 +467,7 @@ CutGenerator CreateCumulativeEnergyCutGenerator(
e.x_end_max = helper->EndMax(i);
e.x_size = helper->Sizes()[i];
e.y_size = demands[i];
if (energies[i].has_value()) {
e.energy = energies[i];
}
e.energy = energies[i];
e.x_size_min = helper->SizeMin(i);
e.y_size_min = integer_trail->LevelZeroLowerBound(demands[i]);
if (!helper->IsPresent(i)) {
@@ -444,7 +480,7 @@ CutGenerator CreateCumulativeEnergyCutGenerator(
}
GenerateCumulativeEnergeticCuts("CumulativeEnergy", lp_values, events,
capacity, model, manager);
capacity, makespan, model, manager);
return true;
};
@@ -452,28 +488,27 @@ CutGenerator CreateCumulativeEnergyCutGenerator(
}
CutGenerator CreateNoOverlapEnergyCutGenerator(
const std::vector<IntervalVariable>& intervals, Model* model) {
SchedulingConstraintHelper* helper,
const std::optional<AffineExpression>& makespan, Model* model) {
CutGenerator result;
Trail* trail = model->GetOrCreate<Trail>();
IntegerEncoder* encoder = model->GetOrCreate<IntegerEncoder>();
SchedulingConstraintHelper* helper =
model->GetOrCreate<IntervalsRepository>()->GetOrCreateHelper(intervals);
AddIntegerVariableFromIntervals(helper, model, &result.vars);
gtl::STLSortAndRemoveDuplicates(&result.vars);
// We need to convert AffineExpression to LinearExpression for the energy.
std::vector<LinearExpression> sizes;
sizes.reserve(intervals.size());
for (int i = 0; i < intervals.size(); ++i) {
sizes.reserve(helper->NumTasks());
for (int i = 0; i < helper->NumTasks(); ++i) {
LinearConstraintBuilder builder(model);
builder.AddTerm(helper->Sizes()[i], IntegerValue(1));
sizes.push_back(builder.BuildExpression());
}
result.generate_cuts =
[sizes, trail, helper, model, encoder](
[makespan, sizes, trail, helper, model, encoder](
const absl::StrongVector<IntegerVariable, double>& lp_values,
LinearConstraintManager* manager) {
if (trail->CurrentDecisionLevel() > 0) return true;
@@ -508,7 +543,8 @@ CutGenerator CreateNoOverlapEnergyCutGenerator(
}
GenerateCumulativeEnergeticCuts("NoOverlapEnergy", lp_values, events,
IntegerValue(1), model, manager);
IntegerValue(1), makespan, model,
manager);
return true;
};
return result;
@@ -536,9 +572,7 @@ void GenerateNoOverlap2dEnergyCut(
e.y_min = y_helper->StartMin(rect);
e.y_max = y_helper->EndMax(rect);
e.y_size = y_helper->Sizes()[rect];
if (energies[rect].has_value()) {
e.energy = energies[rect];
}
e.energy = energies[rect];
e.presence_literal_index =
x_helper->IsPresent(rect)
? (y_helper->IsPresent(rect)
@@ -806,12 +840,9 @@ CutGenerator CreateNoOverlap2dEnergyCutGenerator(
}
CutGenerator CreateCumulativeTimeTableCutGenerator(
const std::vector<IntervalVariable>& intervals,
const AffineExpression& capacity,
SchedulingConstraintHelper* helper, const AffineExpression& capacity,
const std::vector<AffineExpression>& demands, Model* model) {
CutGenerator result;
SchedulingConstraintHelper* helper =
model->GetOrCreate<IntervalsRepository>()->GetOrCreateHelper(intervals);
IntegerTrail* integer_trail = model->GetOrCreate<IntegerTrail>();
AppendVariablesToCumulativeCut(capacity, demands, integer_trail, &result);
@@ -1033,14 +1064,10 @@ void GenerateCutsBetweenPairOfNonOverlappingTasks(
}
CutGenerator CreateCumulativePrecedenceCutGenerator(
const std::vector<IntervalVariable>& intervals,
const AffineExpression& capacity,
SchedulingConstraintHelper* helper, const AffineExpression& capacity,
const std::vector<AffineExpression>& demands, Model* model) {
CutGenerator result;
SchedulingConstraintHelper* helper =
model->GetOrCreate<IntervalsRepository>()->GetOrCreateHelper(intervals);
IntegerTrail* integer_trail = model->GetOrCreate<IntegerTrail>();
AppendVariablesToCumulativeCut(capacity, demands, integer_trail, &result);
@@ -1081,12 +1108,9 @@ CutGenerator CreateCumulativePrecedenceCutGenerator(
}
CutGenerator CreateNoOverlapPrecedenceCutGenerator(
const std::vector<IntervalVariable>& intervals, Model* model) {
SchedulingConstraintHelper* helper, Model* model) {
CutGenerator result;
SchedulingConstraintHelper* helper =
model->GetOrCreate<IntervalsRepository>()->GetOrCreateHelper(intervals);
AddIntegerVariableFromIntervals(helper, model, &result.vars);
gtl::STLSortAndRemoveDuplicates(&result.vars);
@@ -1337,12 +1361,9 @@ void GenerateCompletionTimeCuts(
}
CutGenerator CreateNoOverlapCompletionTimeCutGenerator(
const std::vector<IntervalVariable>& intervals, Model* model) {
SchedulingConstraintHelper* helper, Model* model) {
CutGenerator result;
SchedulingConstraintHelper* helper =
model->GetOrCreate<IntervalsRepository>()->GetOrCreateHelper(intervals);
AddIntegerVariableFromIntervals(helper, model, &result.vars);
gtl::STLSortAndRemoveDuplicates(&result.vars);
@@ -1387,16 +1408,12 @@ CutGenerator CreateNoOverlapCompletionTimeCutGenerator(
}
CutGenerator CreateCumulativeCompletionTimeCutGenerator(
const std::vector<IntervalVariable>& intervals,
const AffineExpression& capacity,
SchedulingConstraintHelper* helper, const AffineExpression& capacity,
const std::vector<AffineExpression>& demands,
const std::vector<std::optional<LinearExpression>>& energies,
Model* model) {
CutGenerator result;
SchedulingConstraintHelper* helper =
model->GetOrCreate<IntervalsRepository>()->GetOrCreateHelper(intervals);
IntegerTrail* integer_trail = model->GetOrCreate<IntegerTrail>();
AppendVariablesToCumulativeCut(capacity, demands, integer_trail, &result);

View File

@@ -46,10 +46,10 @@ namespace sat {
//
// The maximum energy is capacity * span of intervals at level 0.
CutGenerator CreateCumulativeEnergyCutGenerator(
const std::vector<IntervalVariable>& intervals,
const AffineExpression& capacity,
SchedulingConstraintHelper* helper, const AffineExpression& capacity,
const std::vector<AffineExpression>& demands,
const std::vector<std::optional<LinearExpression>>& energies, Model* model);
const std::vector<std::optional<LinearExpression>>& energies,
const std::optional<AffineExpression>& makespan, Model* model);
// For a given set of intervals and demands, we first compute the mandatory part
// of the interval as [start_max , end_min]. We use this to calculate mandatory
@@ -63,24 +63,21 @@ CutGenerator CreateCumulativeEnergyCutGenerator(
// sum(demands of always present intervals)
// + sum(presence_literal * min_of_demand) <= capacity.
CutGenerator CreateCumulativeTimeTableCutGenerator(
const std::vector<IntervalVariable>& intervals,
const AffineExpression& capacity,
SchedulingConstraintHelper* helper, const AffineExpression& capacity,
const std::vector<AffineExpression>& demands, Model* model);
// Completion time cuts for the cumulative constraint. It is a simple relaxation
// where we replace a cumulative task with demand k and duration d by a
// no_overlap task with duration d * k / capacity_max.
CutGenerator CreateCumulativeCompletionTimeCutGenerator(
const std::vector<IntervalVariable>& intervals,
const AffineExpression& capacity,
SchedulingConstraintHelper* helper, const AffineExpression& capacity,
const std::vector<AffineExpression>& demands,
const std::vector<std::optional<LinearExpression>>& energies, Model* model);
// For a given set of intervals in a cumulative constraint, we detect violated
// mandatory precedences and create a cut for these.
CutGenerator CreateCumulativePrecedenceCutGenerator(
const std::vector<IntervalVariable>& intervals,
const AffineExpression& capacity,
SchedulingConstraintHelper* helper, const AffineExpression& capacity,
const std::vector<AffineExpression>& demands, Model* model);
// Completion time cuts for the no_overlap_2d constraint. It actually generates
@@ -117,18 +114,19 @@ CutGenerator CreateNoOverlap2dEnergyCutGenerator(
// sum(sizes of always present intervals)
// + sum(presence_literal * min_of_size) <= span of all intervals.
CutGenerator CreateNoOverlapEnergyCutGenerator(
const std::vector<IntervalVariable>& intervals, Model* model);
SchedulingConstraintHelper* helper,
const std::optional<AffineExpression>& makespan, Model* model);
// For a given set of intervals in a no_overlap constraint, we detect violated
// mandatory precedences and create a cut for these.
CutGenerator CreateNoOverlapPrecedenceCutGenerator(
const std::vector<IntervalVariable>& intervals, Model* model);
SchedulingConstraintHelper* helper, Model* model);
// For a given set of intervals in a no_overlap constraint, we detect violated
// area based cuts from Queyranne 93 [see note in the code] and create a cut for
// these.
CutGenerator CreateNoOverlapCompletionTimeCutGenerator(
const std::vector<IntervalVariable>& intervals, Model* model);
SchedulingConstraintHelper* helper, Model* model);
} // namespace sat
} // namespace operations_research