[CP-SAT] polish new cumulative cuts code; more experimental code on routing cuts; cleaning, fixes

This commit is contained in:
Laurent Perron
2025-02-07 22:24:22 +01:00
parent 281a9f8e14
commit 932f6866d9
18 changed files with 1131 additions and 474 deletions

View File

@@ -430,6 +430,7 @@ cc_library(
"//ortools/base",
"//ortools/base:stl_util",
"//ortools/util:strong_integers",
"@com_google_absl//absl/algorithm:container",
"@com_google_absl//absl/container:flat_hash_map",
"@com_google_absl//absl/container:flat_hash_set",
"@com_google_absl//absl/log:check",
@@ -1849,6 +1850,7 @@ cc_library(
":synchronization",
":util",
"//ortools/base",
"//ortools/base:mathutil",
"//ortools/base:stl_util",
"//ortools/base:strong_vector",
"//ortools/graph",
@@ -2610,6 +2612,7 @@ cc_library(
"@com_google_absl//absl/container:btree",
"@com_google_absl//absl/container:flat_hash_map",
"@com_google_absl//absl/container:flat_hash_set",
"@com_google_absl//absl/functional:any_invocable",
"@com_google_absl//absl/log",
"@com_google_absl//absl/log:check",
"@com_google_absl//absl/meta:type_traits",

View File

@@ -22,6 +22,7 @@
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
@@ -741,20 +742,46 @@ absl::flat_hash_map<std::string, SatParameters> GetNamedParameters(
// Base parameters for LNS worker.
{
SatParameters new_params = base_params;
new_params.set_stop_after_first_solution(false);
new_params.set_cp_model_presolve(true);
SatParameters lns_params = base_params;
lns_params.set_stop_after_first_solution(false);
lns_params.set_cp_model_presolve(true);
// We disable costly presolve/inprocessing.
new_params.set_use_sat_inprocessing(false);
new_params.set_cp_model_probing_level(0);
new_params.set_symmetry_level(0);
new_params.set_find_big_linear_overlap(false);
lns_params.set_use_sat_inprocessing(false);
lns_params.set_cp_model_probing_level(0);
lns_params.set_symmetry_level(0);
lns_params.set_find_big_linear_overlap(false);
new_params.set_log_search_progress(false);
new_params.set_debug_crash_on_bad_hint(false); // Can happen in lns.
new_params.set_solution_pool_size(1); // Keep the best solution found.
strategies["lns"] = new_params;
lns_params.set_log_search_progress(false);
lns_params.set_debug_crash_on_bad_hint(false); // Can happen in lns.
lns_params.set_solution_pool_size(1); // Keep the best solution found.
strategies["lns"] = lns_params;
// Note that we only do this for the derived parameters. The strategy "lns"
// will be handled along with the other ones.
auto it = absl::c_find_if(
base_params.subsolver_params(),
[](const SatParameters& params) { return params.name() == "lns"; });
if (it != base_params.subsolver_params().end()) {
lns_params.MergeFrom(*it);
}
SatParameters lns_params_base = lns_params;
lns_params_base.set_linearization_level(0);
lns_params_base.set_search_branching(SatParameters::AUTOMATIC_SEARCH);
strategies["lns_base"] = lns_params_base;
SatParameters lns_params_stalling = lns_params;
lns_params_stalling.set_search_branching(SatParameters::PORTFOLIO_SEARCH);
lns_params_stalling.set_search_random_variable_pool_size(5);
strategies["lns_stalling"] = lns_params_stalling;
// For routing, the LP relaxation seems pretty important, so we prefer an
// high linearization level to solve LNS subproblems.
SatParameters lns_params_routing = lns_params;
lns_params_routing.set_linearization_level(2);
lns_params_routing.set_search_branching(SatParameters::AUTOMATIC_SEARCH);
strategies["lns_routing"] = lns_params_routing;
}
// Add user defined ones.

View File

@@ -25,6 +25,7 @@
#include <optional>
#include <random>
#include <string>
#include <string_view>
#include <thread>
#include <utility>
#include <vector>
@@ -46,6 +47,7 @@
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "absl/strings/strip.h"
#include "absl/synchronization/mutex.h"
#include "absl/types/span.h"
#include "google/protobuf/arena.h"
@@ -1293,14 +1295,14 @@ class FeasibilityPumpSolver : public SubSolver {
class LnsSolver : public SubSolver {
public:
LnsSolver(std::unique_ptr<NeighborhoodGenerator> generator,
const SatParameters& lns_parameters,
NeighborhoodGeneratorHelper* helper, SharedClasses* shared,
int preferred_linearization_level = 0)
const SatParameters& lns_parameters_base,
const SatParameters& lns_parameters_stalling,
NeighborhoodGeneratorHelper* helper, SharedClasses* shared)
: SubSolver(generator->name(), INCOMPLETE),
preferred_linearization_level_(preferred_linearization_level),
generator_(std::move(generator)),
helper_(helper),
lns_parameters_(lns_parameters),
lns_parameters_base_(lns_parameters_base),
lns_parameters_stalling_(lns_parameters_stalling),
shared_(shared) {}
~LnsSolver() override {
@@ -1328,7 +1330,7 @@ class LnsSolver : public SubSolver {
// change the LNS behavior.
const int32_t low = static_cast<int32_t>(task_id);
const int32_t high = static_cast<int32_t>(task_id >> 32);
std::seed_seq seed{low, high, lns_parameters_.random_seed()};
std::seed_seq seed{low, high, lns_parameters_base_.random_seed()};
random_engine_t random(seed);
NeighborhoodGenerator::SolveData data;
@@ -1372,26 +1374,22 @@ class LnsSolver : public SubSolver {
if (!neighborhood.is_generated) return;
SatParameters local_params(lns_parameters_);
local_params.set_max_deterministic_time(data.deterministic_limit);
SatParameters local_params;
// TODO(user): Tune these.
// TODO(user): This could be a good candidate for bandits.
const int64_t stall = generator_->num_consecutive_non_improving_calls();
std::string search_info;
const int search_index = stall < 10 ? 0 : task_id % 2;
switch (search_index) {
case 0:
local_params.set_search_branching(SatParameters::AUTOMATIC_SEARCH);
local_params.set_linearization_level(preferred_linearization_level_);
search_info = absl::StrCat("auto_l", preferred_linearization_level_);
local_params = lns_parameters_base_;
break;
default:
local_params.set_search_branching(SatParameters::PORTFOLIO_SEARCH);
local_params.set_search_random_variable_pool_size(5);
search_info = "folio_rnd";
local_params = lns_parameters_stalling_;
break;
}
const std::string_view search_info =
absl::StripPrefix(std::string_view(local_params.name()), "lns_");
local_params.set_max_deterministic_time(data.deterministic_limit);
std::string source_info =
neighborhood.source_info.empty() ? name() : neighborhood.source_info;
@@ -1486,7 +1484,7 @@ class LnsSolver : public SubSolver {
}
}
bool hint_feasible_before_presolve = false;
if (lns_parameters_.debug_crash_if_presolve_breaks_hint()) {
if (lns_parameters_base_.debug_crash_if_presolve_breaks_hint()) {
hint_feasible_before_presolve =
SolutionHintIsCompleteAndFeasible(lns_fragment, /*logger=*/nullptr);
}
@@ -1525,7 +1523,7 @@ class LnsSolver : public SubSolver {
context.reset(nullptr);
neighborhood.delta.Clear();
if (lns_parameters_.debug_crash_if_presolve_breaks_hint() &&
if (lns_parameters_base_.debug_crash_if_presolve_breaks_hint() &&
hint_feasible_before_presolve &&
!SolutionHintIsCompleteAndFeasible(lns_fragment,
/*logger=*/nullptr)) {
@@ -1720,10 +1718,10 @@ class LnsSolver : public SubSolver {
}
private:
int preferred_linearization_level_ = 0;
std::unique_ptr<NeighborhoodGenerator> generator_;
NeighborhoodGeneratorHelper* helper_;
const SatParameters lns_parameters_;
const SatParameters lns_parameters_base_;
const SatParameters lns_parameters_stalling_;
SharedClasses* shared_;
// This is a optimization to allocate the arena for the LNS fragment already
// at roughly the right size. We will update it with the last size of the
@@ -1780,7 +1778,9 @@ void SolveCpModelParallel(SharedClasses* shared, Model* global_model) {
}));
const auto name_to_params = GetNamedParameters(params);
const SatParameters& lns_params = name_to_params.at("lns");
const SatParameters& lns_params_base = name_to_params.at("lns_base");
const SatParameters& lns_params_stalling = name_to_params.at("lns_stalling");
const SatParameters& lns_params_routing = name_to_params.at("lns_routing");
// Add the NeighborhoodGeneratorHelper as a special subsolver so that its
// Synchronize() is called before any LNS neighborhood solvers.
@@ -1854,7 +1854,7 @@ void SolveCpModelParallel(SharedClasses* shared, Model* global_model) {
std::make_unique<RelaxationInducedNeighborhoodGenerator>(
helper, shared->response, shared->lp_solutions.get(),
shared->incomplete_solutions.get(), name_filter.LastName()),
lns_params, helper, shared));
lns_params_base, lns_params_stalling, helper, shared));
}
// Add incomplete subsolvers that require an objective.
@@ -1870,37 +1870,37 @@ void SolveCpModelParallel(SharedClasses* shared, Model* global_model) {
reentrant_interleaved_subsolvers.push_back(std::make_unique<LnsSolver>(
std::make_unique<RelaxRandomVariablesGenerator>(
helper, name_filter.LastName()),
lns_params, helper, shared));
lns_params_base, lns_params_stalling, helper, shared));
}
if (name_filter.Keep("rnd_cst_lns")) {
reentrant_interleaved_subsolvers.push_back(std::make_unique<LnsSolver>(
std::make_unique<RelaxRandomConstraintsGenerator>(
helper, name_filter.LastName()),
lns_params, helper, shared));
lns_params_base, lns_params_stalling, helper, shared));
}
if (name_filter.Keep("graph_var_lns")) {
reentrant_interleaved_subsolvers.push_back(std::make_unique<LnsSolver>(
std::make_unique<VariableGraphNeighborhoodGenerator>(
helper, name_filter.LastName()),
lns_params, helper, shared));
lns_params_base, lns_params_stalling, helper, shared));
}
if (name_filter.Keep("graph_arc_lns")) {
reentrant_interleaved_subsolvers.push_back(std::make_unique<LnsSolver>(
std::make_unique<ArcGraphNeighborhoodGenerator>(
helper, name_filter.LastName()),
lns_params, helper, shared));
lns_params_base, lns_params_stalling, helper, shared));
}
if (name_filter.Keep("graph_cst_lns")) {
reentrant_interleaved_subsolvers.push_back(std::make_unique<LnsSolver>(
std::make_unique<ConstraintGraphNeighborhoodGenerator>(
helper, name_filter.LastName()),
lns_params, helper, shared));
lns_params_base, lns_params_stalling, helper, shared));
}
if (name_filter.Keep("graph_dec_lns")) {
reentrant_interleaved_subsolvers.push_back(std::make_unique<LnsSolver>(
std::make_unique<DecompositionGraphNeighborhoodGenerator>(
helper, name_filter.LastName()),
lns_params, helper, shared));
lns_params_base, lns_params_stalling, helper, shared));
}
if (params.use_lb_relax_lns() &&
params.num_workers() >= params.lb_relax_num_workers_threshold() &&
@@ -1908,7 +1908,7 @@ void SolveCpModelParallel(SharedClasses* shared, Model* global_model) {
reentrant_interleaved_subsolvers.push_back(std::make_unique<LnsSolver>(
std::make_unique<LocalBranchingLpBasedNeighborhoodGenerator>(
helper, name_filter.LastName(), shared->time_limit, shared),
lns_params, helper, shared));
lns_params_base, lns_params_stalling, helper, shared));
}
const bool has_no_overlap_or_cumulative =
@@ -1921,13 +1921,13 @@ void SolveCpModelParallel(SharedClasses* shared, Model* global_model) {
reentrant_interleaved_subsolvers.push_back(std::make_unique<LnsSolver>(
std::make_unique<RandomIntervalSchedulingNeighborhoodGenerator>(
helper, name_filter.LastName()),
lns_params, helper, shared));
lns_params_base, lns_params_stalling, helper, shared));
}
if (name_filter.Keep("scheduling_time_window_lns")) {
reentrant_interleaved_subsolvers.push_back(std::make_unique<LnsSolver>(
std::make_unique<SchedulingTimeWindowNeighborhoodGenerator>(
helper, name_filter.LastName()),
lns_params, helper, shared));
lns_params_base, lns_params_stalling, helper, shared));
}
const std::vector<std::vector<int>> intervals_in_constraints =
helper->GetUniqueIntervalSets();
@@ -1936,7 +1936,7 @@ void SolveCpModelParallel(SharedClasses* shared, Model* global_model) {
reentrant_interleaved_subsolvers.push_back(std::make_unique<LnsSolver>(
std::make_unique<SchedulingResourceWindowsNeighborhoodGenerator>(
helper, intervals_in_constraints, name_filter.LastName()),
lns_params, helper, shared));
lns_params_base, lns_params_stalling, helper, shared));
}
}
@@ -1948,31 +1948,31 @@ void SolveCpModelParallel(SharedClasses* shared, Model* global_model) {
reentrant_interleaved_subsolvers.push_back(std::make_unique<LnsSolver>(
std::make_unique<RandomRectanglesPackingNeighborhoodGenerator>(
helper, name_filter.LastName()),
lns_params, helper, shared));
lns_params_base, lns_params_stalling, helper, shared));
}
if (name_filter.Keep("packing_square_lns")) {
reentrant_interleaved_subsolvers.push_back(std::make_unique<LnsSolver>(
std::make_unique<RectanglesPackingRelaxOneNeighborhoodGenerator>(
helper, name_filter.LastName()),
lns_params, helper, shared));
lns_params_base, lns_params_stalling, helper, shared));
}
if (name_filter.Keep("packing_swap_lns")) {
reentrant_interleaved_subsolvers.push_back(std::make_unique<LnsSolver>(
std::make_unique<RectanglesPackingRelaxTwoNeighborhoodsGenerator>(
helper, name_filter.LastName()),
lns_params, helper, shared));
lns_params_base, lns_params_stalling, helper, shared));
}
if (name_filter.Keep("packing_precedences_lns")) {
reentrant_interleaved_subsolvers.push_back(std::make_unique<LnsSolver>(
std::make_unique<RandomPrecedencesPackingNeighborhoodGenerator>(
helper, name_filter.LastName()),
lns_params, helper, shared));
lns_params_base, lns_params_stalling, helper, shared));
}
if (name_filter.Keep("packing_slice_lns")) {
reentrant_interleaved_subsolvers.push_back(std::make_unique<LnsSolver>(
std::make_unique<SlicePackingNeighborhoodGenerator>(
helper, name_filter.LastName()),
lns_params, helper, shared));
lns_params_base, lns_params_stalling, helper, shared));
}
}
@@ -1982,13 +1982,10 @@ void SolveCpModelParallel(SharedClasses* shared, Model* global_model) {
reentrant_interleaved_subsolvers.push_back(std::make_unique<LnsSolver>(
std::make_unique<RandomPrecedenceSchedulingNeighborhoodGenerator>(
helper, name_filter.LastName()),
lns_params, helper, shared));
lns_params_base, lns_params_stalling, helper, shared));
}
}
// For routing, the LP relaxation seems pretty important, so we prefer an
// high linearization level to solve LNS subproblems.
const int routing_lin_level = 2;
const int num_circuit = static_cast<int>(
helper->TypeToConstraints(ConstraintProto::kCircuit).size());
const int num_routes = static_cast<int>(
@@ -1998,13 +1995,13 @@ void SolveCpModelParallel(SharedClasses* shared, Model* global_model) {
reentrant_interleaved_subsolvers.push_back(std::make_unique<LnsSolver>(
std::make_unique<RoutingRandomNeighborhoodGenerator>(
helper, name_filter.LastName()),
lns_params, helper, shared, routing_lin_level));
lns_params_routing, lns_params_stalling, helper, shared));
}
if (name_filter.Keep("routing_path_lns")) {
reentrant_interleaved_subsolvers.push_back(std::make_unique<LnsSolver>(
std::make_unique<RoutingPathNeighborhoodGenerator>(
helper, name_filter.LastName()),
lns_params, helper, shared, routing_lin_level));
lns_params_routing, lns_params_stalling, helper, shared));
}
}
if (num_routes > 0 || num_circuit > 1) {
@@ -2012,7 +2009,7 @@ void SolveCpModelParallel(SharedClasses* shared, Model* global_model) {
reentrant_interleaved_subsolvers.push_back(std::make_unique<LnsSolver>(
std::make_unique<RoutingFullPathNeighborhoodGenerator>(
helper, name_filter.LastName()),
lns_params, helper, shared, routing_lin_level));
lns_params_routing, lns_params_stalling, helper, shared));
}
}
}

View File

@@ -29,11 +29,11 @@
#include "absl/container/btree_set.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/functional/any_invocable.h"
#include "absl/numeric/int128.h"
#include "absl/strings/str_cat.h"
#include "absl/types/span.h"
#include "ortools/base/strong_vector.h"
#include "ortools/lp_data/lp_types.h"
#include "ortools/sat/implied_bounds.h"
#include "ortools/sat/integer.h"
#include "ortools/sat/integer_base.h"
@@ -56,7 +56,7 @@ namespace sat {
struct CutGenerator {
bool only_run_at_level_zero = false;
std::vector<IntegerVariable> vars;
std::function<bool(LinearConstraintManager* manager)> generate_cuts;
absl::AnyInvocable<bool(LinearConstraintManager* manager)> generate_cuts;
};
// To simplify cut generation code, we use a more complex data structure than

View File

@@ -828,6 +828,7 @@ bool LinMinPropagator::Propagate() {
void LinMinPropagator::RegisterWith(GenericLiteralWatcher* watcher) {
const int id = watcher->Register(this);
bool has_var_also_in_exprs = false;
for (const LinearExpression& expr : exprs_) {
for (int i = 0; i < expr.vars.size(); ++i) {
const IntegerVariable& var = expr.vars[i];
@@ -837,10 +838,14 @@ void LinMinPropagator::RegisterWith(GenericLiteralWatcher* watcher) {
} else {
watcher->WatchUpperBound(var, id);
}
has_var_also_in_exprs |= (var == min_var_);
}
}
watcher->WatchUpperBound(min_var_, id);
watcher->RegisterReversibleInt(id, &rev_unique_candidate_);
if (has_var_also_in_exprs) {
watcher->NotifyThatPropagatorMayNotReachFixedPointInOnePass(id);
}
}
ProductPropagator::ProductPropagator(AffineExpression a, AffineExpression b,

View File

@@ -2192,7 +2192,7 @@ bool LinearProgrammingConstraint::Propagate() {
// Try to add cuts.
if (level == 0 || !parameters_.only_add_cuts_at_level_zero()) {
for (const CutGenerator& generator : cut_generators_) {
for (CutGenerator& generator : cut_generators_) {
if (level > 0 && generator.only_run_at_level_zero) continue;
if (!generator.generate_cuts(&constraint_manager_)) {
return false;

View File

@@ -92,6 +92,8 @@ std::string ValidateParameters(const SatParameters& params) {
TEST_IS_FINITE(strategy_change_increase_ratio);
TEST_IS_FINITE(symmetry_detection_deterministic_time_limit);
TEST_IS_FINITE(variable_activity_decay);
TEST_IN_RANGE(routing_cut_subset_size_for_tight_binary_relation_bound, 0, 32);
TEST_IS_FINITE(routing_cut_dp_effort);
TEST_IS_FINITE(lns_initial_difficulty);
TEST_IS_FINITE(lns_initial_deterministic_limit);

View File

@@ -31,6 +31,7 @@
#include "absl/meta/type_traits.h"
#include "absl/types/span.h"
#include "ortools/base/logging.h"
#include "ortools/base/mathutil.h"
#include "ortools/base/stl_util.h"
#include "ortools/base/strong_vector.h"
#include "ortools/graph/graph.h"
@@ -1387,5 +1388,57 @@ int GreaterThanAtLeastOneOfDetector::AddGreaterThanAtLeastOneOfConstraints(
return num_added_constraints;
}
bool BinaryRelationRepository::PropagateLocalBounds(
const IntegerTrail& integer_trail, Literal lit,
const absl::flat_hash_map<IntegerVariable, IntegerValue>& input,
absl::flat_hash_map<IntegerVariable, IntegerValue>* output) const {
output->clear();
if (lit.Index() >= lit_to_relations_.size()) return true;
auto get_lower_bound = [&](IntegerVariable var) {
const auto it = input.find(var);
if (it != input.end()) return it->second;
return integer_trail.LevelZeroLowerBound(var);
};
auto get_upper_bound = [&](IntegerVariable var) {
return -get_lower_bound(NegationOf(var));
};
auto update_lower_bound_by_var = [&](IntegerVariable var, IntegerValue lb) {
if (lb <= integer_trail.LevelZeroLowerBound(var)) return;
const auto [it, inserted] = output->insert({var, lb});
if (!inserted) {
it->second = std::max(it->second, lb);
}
};
auto update_upper_bound_by_var = [&](IntegerVariable var, IntegerValue ub) {
update_lower_bound_by_var(NegationOf(var), -ub);
};
auto update_var_bounds = [&](const LinearTerm& a, const LinearTerm& b,
IntegerValue lhs, IntegerValue rhs) {
if (a.coeff == 0) return;
// lb(b.y) <= b.y <= ub(b.y) and lhs <= a.x + b.y <= rhs imply
// ceil((lhs - ub(b.y)) / a) <= x <= floor((rhs - lb(b.y)) / a)
lhs = lhs - b.coeff * get_upper_bound(b.var);
rhs = rhs - b.coeff * get_lower_bound(b.var);
update_lower_bound_by_var(a.var, MathUtil::CeilOfRatio(lhs, a.coeff));
update_upper_bound_by_var(a.var, MathUtil::FloorOfRatio(rhs, a.coeff));
};
for (const int relation_index : lit_to_relations_[lit]) {
auto r = relations_[relation_index];
r.a.MakeCoeffPositive();
r.b.MakeCoeffPositive();
update_var_bounds(r.a, r.b, r.lhs, r.rhs);
update_var_bounds(r.b, r.a, r.lhs, r.rhs);
}
// Check feasibility.
// TODO(user): we might do that earlier?
for (const auto [var, lb] : *output) {
if (lb > integer_trail.LevelZeroUpperBound(var)) return false;
}
return true;
}
} // namespace sat
} // namespace operations_research

View File

@@ -509,6 +509,17 @@ class BinaryRelationRepository {
// relations have been added.
void Build();
// Assuming level-zero bounds + any (var >= value) in the input map,
// fills "output" with a "propagated" set of bounds assuming lit is true (by
// using the relations enforced by lit). Note that we will only fill bounds >
// level-zero ones in output.
//
// Returns false if the new bounds are infeasible at level zero.
bool PropagateLocalBounds(
const IntegerTrail& integer_trail, Literal lit,
const absl::flat_hash_map<IntegerVariable, IntegerValue>& input,
absl::flat_hash_map<IntegerVariable, IntegerValue>* output) const;
private:
bool is_built_ = false;
std::vector<Relation> relations_;

File diff suppressed because it is too large Load Diff

View File

@@ -45,9 +45,23 @@ class MinOutgoingFlowHelper {
// Returns the minimum flow going out of `subset`, based on a conservative
// estimate of the maximum number of nodes of a feasible path inside this
// subset. `subset` must not be empty and must not contain the depot (node 0).
// Paths are approximated by their length and their last node, and can thus
// contain cycles. The complexity is O(subset.size() ^ 3).
int ComputeMinOutgoingFlow(absl::Span<const int> subset);
// Same as above, but uses less conservative estimates (paths are approximated
// by their set of nodes and their last node -- hence they can't contain
// cycles). The complexity is O(2 ^ subset.size()).
int ComputeTightMinOutgoingFlow(absl::Span<const int> subset);
private:
// Returns the minimum flow going out of a subset of size `subset_size`,
// assuming that the longest feasible path inside this subset has
// `longest_path_length` nodes and that there are at most `max_longest_paths`
// such paths.
int GetMinOutgoingFlow(int subset_size, int longest_path_length,
int max_longest_paths);
const std::vector<int>& tails_;
const std::vector<int>& heads_;
const std::vector<Literal>& literals_;
@@ -66,9 +80,11 @@ class MinOutgoingFlowHelper {
// position i+1.
std::vector<bool> in_subset_;
// For each node n, the indices (in tails_, heads_) of the m->n arcs inside
// the subset (self arcs excepted).
std::vector<int> index_in_subset_;
// For each node n, the indices (in tails_, heads_) of the m->n and n->m arcs
// inside the subset (self arcs excepted).
std::vector<std::vector<int>> incoming_arc_indices_;
std::vector<std::vector<int>> outgoing_arc_indices_;
// For each node n, whether it can appear at the current and next position in
// a feasible path.
std::vector<bool> reachable_;
@@ -171,17 +187,17 @@ std::vector<int> ComputeGomoryHuTree(
// connected. Note that we already assume basic constraint to be in the lp, so
// we do not add any cuts for components of size 1.
CutGenerator CreateStronglyConnectedGraphCutGenerator(
int num_nodes, std::vector<int> tails, std::vector<int> heads,
std::vector<Literal> literals, Model* model);
int num_nodes, absl::Span<const int> tails, absl::Span<const int> heads,
absl::Span<const Literal> literals, Model* model);
// Almost the same as CreateStronglyConnectedGraphCutGenerator() but for each
// components, computes the demand needed to serves it, and depending on whether
// it contains the depot (node zero) or not, compute the minimum number of
// vehicle that needs to cross the component border.
CutGenerator CreateCVRPCutGenerator(int num_nodes, std::vector<int> tails,
std::vector<int> heads,
std::vector<Literal> literals,
std::vector<int64_t> demands,
CutGenerator CreateCVRPCutGenerator(int num_nodes, absl::Span<const int> tails,
absl::Span<const int> heads,
absl::Span<const Literal> literals,
absl::Span<const int64_t> demands,
int64_t capacity, Model* model);
// Try to find a subset where the current LP capacity of the outgoing or

View File

@@ -45,6 +45,22 @@ namespace {
using ::testing::ElementsAre;
TEST(MinOutgoingFlowHelperTest, TwoNodesWithoutConstraints) {
Model model;
const std::vector<int> tails = {0, 1};
const std::vector<int> heads = {1, 0};
const std::vector<Literal> literals = {
Literal(model.Add(NewBooleanVariable()), true),
Literal(model.Add(NewBooleanVariable()), true)};
MinOutgoingFlowHelper helper(2, tails, heads, literals, &model);
const int min_flow = helper.ComputeMinOutgoingFlow({0, 1});
const int tight_min_flow = helper.ComputeTightMinOutgoingFlow({0, 1});
EXPECT_EQ(min_flow, 1);
EXPECT_EQ(tight_min_flow, 1);
}
TEST(MinOutgoingFlowHelperTest, CapacityConstraints) {
Model model;
const int num_nodes = 5;
@@ -84,6 +100,7 @@ TEST(MinOutgoingFlowHelperTest, CapacityConstraints) {
MinOutgoingFlowHelper helper(num_nodes, tails, heads, literals, &model);
const int min_flow = helper.ComputeMinOutgoingFlow({1, 2, 3, 4});
const int tight_min_flow = helper.ComputeTightMinOutgoingFlow({1, 2, 3, 4});
// Due to the capacity constraints, a feasible path can have at most 3 nodes,
// hence at least two paths are needed. The lower bound of the vehicle load
@@ -96,6 +113,7 @@ TEST(MinOutgoingFlowHelperTest, CapacityConstraints) {
// 3 | 0 13 24 -
// 4 | 0 14 24 -
EXPECT_EQ(min_flow, 2);
EXPECT_EQ(tight_min_flow, 2);
}
TEST(MinOutgoingFlowHelperTest, TimeWindows) {
@@ -136,6 +154,7 @@ TEST(MinOutgoingFlowHelperTest, TimeWindows) {
MinOutgoingFlowHelper helper(num_nodes, tails, heads, literals, &model);
const int min_flow = helper.ComputeMinOutgoingFlow({1, 2, 3, 4});
const int tight_min_flow = helper.ComputeTightMinOutgoingFlow({1, 2, 3, 4});
// Due to the time window constraints, a feasible path can have at most 3
// nodes, hence at least two paths are needed. The earliest departure time
@@ -148,6 +167,7 @@ TEST(MinOutgoingFlowHelperTest, TimeWindows) {
// 3 | 18 18 - -
// 4 | 28 28 28 -
EXPECT_EQ(min_flow, 2);
EXPECT_EQ(tight_min_flow, 2);
}
// Test on a simple tree:
@@ -473,7 +493,7 @@ TEST(CreateFlowCutGeneratorTest, BasicExample) {
*min_incoming_flow = std::max(IntegerValue(0), demand);
*min_outgoing_flow = std::max(IntegerValue(0), -demand);
};
const CutGenerator generator = CreateFlowCutGenerator(
CutGenerator generator = CreateFlowCutGenerator(
num_nodes, tails, heads, capacities, get_flows, &model);
LinearConstraintManager manager(&model);
@@ -517,7 +537,7 @@ TEST(CreateFlowCutGeneratorTest, WithMinusOneArcs) {
*min_incoming_flow = std::max(IntegerValue(0), demand);
*min_outgoing_flow = std::max(IntegerValue(0), -demand);
};
const CutGenerator generator = CreateFlowCutGenerator(
CutGenerator generator = CreateFlowCutGenerator(
num_nodes, tails, heads, capacities, get_flows, &model);
LinearConstraintManager manager(&model);

View File

@@ -23,7 +23,7 @@ option java_multiple_files = true;
// Contains the definitions for all the sat algorithm parameters and their
// default values.
//
// NEXT TAG: 313
// NEXT TAG: 315
message SatParameters {
// In some context, like in a portfolio of search, it makes sense to name a
// given parameters set for logging purpose.
@@ -914,12 +914,26 @@ message SatParameters {
// If the size of a subset of nodes of a RoutesConstraint is less than this
// value, use linear constraints of size 1 and 2 (such as capacity and time
// window constraints) enforced by the arc literals to compute cuts for this
// subset. The algorithm for these cuts has a O(n^3) complexity, where n is
// the subset size. Hence the value of this parameter should not be too large
// (e.g. 10 or 20).
// subset (unless the subset size is less than
// routing_cut_subset_size_for_tight_binary_relation_bound, in which case the
// corresponding algorithm is used instead). The algorithm for these cuts has
// a O(n^3) complexity, where n is the subset size. Hence the value of this
// parameter should not be too large (e.g. 10 or 20).
optional int32 routing_cut_subset_size_for_binary_relation_bound = 312
[default = 0];
// Similar to above, but with a different algorithm producing better cuts, at
// the price of a higher O(2^n) complexity, where n is the subset size. Hence
// the value of this parameter should be small (e.g. less than 10).
optional int32 routing_cut_subset_size_for_tight_binary_relation_bound = 313
[default = 0];
// The amount of "effort" to spend in dynamic programming for computing
// routing cuts. This is in term of basic operations needed by the algorithm
// in the worst case, so a value like 1e8 should take less than a second to
// compute.
optional double routing_cut_dp_effort = 314 [default = 1e7];
// The search branching will be used to decide how to branch on unfixed nodes.
enum SearchBranching {
// Try to fix all literals using the underlying SAT solver's heuristics,

View File

@@ -1266,49 +1266,126 @@ void GenerateShortCompletionTimeCutsWithExactBound(
top_n_cuts.TransferToManager(manager);
}
namespace {
// Returns a copy of the event with the start time increased to time.
// Energy (min and decomposed) are updated accordingly.
CtEvent TrimEventAfter(IntegerValue time, const CtEvent& old_event) {
DCHECK_GT(time, old_event.x_start_min);
CtEvent event = old_event; // Copy.
event.lifted = true;
// Build the vector of energies as the vector of sizes.
event.energy_min = ComputeEnergyMinInWindow(
event.x_start_min, event.x_start_max, event.x_end_min, event.x_end_max,
event.x_size_min, event.y_size_min, event.decomposed_energy, time,
event.x_end_max);
event.x_size_min = event.x_size_min + event.x_start_min - time;
event.x_start_min = time;
if (event.energy_min > event.x_size_min * event.y_size_min) {
event.use_energy = true;
}
DCHECK_GE(event.energy_min, event.x_size_min * event.y_size_min);
return event;
}
// Collects all possible demand values for the event, and adds them to the
// subset sum of the reachable capacity of the cumulative constraint.
void AddEventDemandsToCapacitySubsetSum(
const CtEvent& event, const VariablesAssignment& assignment,
IntegerValue capacity_max, std::vector<int64_t>& tmp_possible_demands,
MaxBoundedSubsetSum& dp) {
if (dp.CurrentMax() != capacity_max) {
if (event.y_size_is_fixed) {
dp.Add(event.y_size_min.value());
} else if (!event.decomposed_energy.empty()) {
tmp_possible_demands.clear();
for (const auto& [literal, size, demand] : event.decomposed_energy) {
if (assignment.LiteralIsFalse(literal)) continue;
if (assignment.LiteralIsTrue(literal)) {
tmp_possible_demands.assign({demand.value()});
break;
}
tmp_possible_demands.push_back(demand.value());
}
dp.AddChoices(tmp_possible_demands);
} else { // event.y_size_min is not fixed, we abort.
// TODO(user): We could still add all events in the range if the range
// is small.
dp.Add(capacity_max.value());
}
}
}
// In the no_overlap case, we have:
// area = event.x_size_min ^ 2
// In the simple cumulative case, we split split the task (demand, size) into
// demand tasks in the no_overlap case.
// area = event.y_size_min * event.x_size_min * event.x_size_min
// In the cumulative case, the minimum energy of a task can be greater than
// the product of its size and demand (energy_min > side * demand).
// We use energy_min * size.
IntegerValue SmithRuleIndividualContribution(const CtEvent& event) {
if (event.use_energy) {
return event.energy_min * event.x_size_min;
} else {
return event.x_size_min * event.x_size_min * event.y_size_min;
}
}
} // namespace
// We generate the cut from the Smith's rule from:
// M. Queyranne, Structure of a simple scheduling polyhedron,
// Mathematical Programming 58 (1993), 263285
//
// The original cut is:
// sum(end_min_i * duration_min_i) >=
// (sum(duration_min_i^2) + sum(duration_min_i)^2) / 2
// sum(end_min_i * size_min_i) >=
// (sum(size_min_i^2) + sum(size_min_i)^2) / 2
// We strengthen this cuts by noticing that if all tasks starts after S,
// then replacing end_min_i by (end_min_i - S) is still valid.
//
// A second difference is that we lift intervals that starts before a given
// value, but are forced to cross it.
//
// In the case of a cumulative constraint, we compute the cuts like if it
// was a disjunctive cut with all the tasks being `demand` tasks with
// duration `size_min`, all these spread out on `reachable_capacity`
// no_overlap machines, counted from `current_start_min`.
// In the case of a cumulative constraint with a capacity of C, we compute a
// valid equation by splitting the task (size_min si, demand_min di) into di
// tasks of size si and demand 1, that we spread across C no_overlap
// constraint. When doing so, the lhs of the equation is the same, the first
// term of the rhs is also unchanged. A lower bound of the second term of the
// rhs is reached when the split is exact (each no_overlap sees a long demand of
// sum(si * di / C). Thus the second term is greater or equal to
// (sum (si * di) ^ 2) / (2 * C)
//
// Thus, for each task (size si, end ei, demand di) and capacity c:
// sum (si * di * (ei - current_start_min)) >=
// sum(di * si ^ 2) c * (sum (si * di) / c) ^ 2
// ---------------- + ---------------------------
// 2 2
// Sometimes, the min energy of the task i is greater than si * di.
// Let's introduce ai the minimum energy of the task and rewrite the previous
// equation. In that new setting, we can rewrite the cumulative transformation
// by splitting each tasks into at least di tasks of size at least si and demand
// 1.
//
// sum (si * di * ei) - sum (si * di) * current_start_min >=
// sum(di * si ^ 2) sum (si * di) ^ 2
// ---------------- + -----------------
// 2 2 * c
// In that setting, the lhs is rewritten as sum(ai * ei) and the second term of
// the rhs is rewritten as sum(ai) ^ 2 / (2 * C).
//
// By introducing ai the min energy of the task, we reinforce the
// constraint into:
// The question is how to rewrite the term `sum(di * si * si). The minimum
// contribution is when the task has size si and demand ai / si. (note that si
// is the minimum size of the task, and di its minimum demand). We can
// replace the small rectangle area term by ai * si.
//
// sum (ai * ei) >=
// sum(di * ai) sum (ai) ^ 2
// ------------ + ------------ + sum (ai) * current_start_min
// 2 2 * c
// sum (ai * ei) - sum (ai) * current_start_min >=
// sum(si * ai) / 2 + (sum (ai) ^ 2) / (2 * C)
//
// The separation procedure is implemented using two loops:
// - first, we loop on potential start times in increasing order.
// - second loop, we add tasks that must contribute after this start time
// ordered by increasing end time in the LP relaxation.
void GenerateCompletionTimeCutsWithEnergy(absl::string_view cut_name,
std::vector<CtEvent> events,
IntegerValue capacity_max,
bool skip_low_sizes, Model* model,
LinearConstraintManager* manager) {
TopNCuts top_n_cuts(5);
const VariablesAssignment& assignment =
model->GetOrCreate<Trail>()->Assignment();
std::vector<int64_t> tmp_possible_demands;
// Sort by start min to bucketize by start_min.
std::sort(events.begin(), events.end(),
@@ -1316,6 +1393,8 @@ void GenerateCompletionTimeCutsWithEnergy(absl::string_view cut_name,
return std::tie(e1.x_start_min, e1.y_size_min, e1.x_lp_end) <
std::tie(e2.x_start_min, e2.y_size_min, e2.x_lp_end);
});
// First loop: we loop on potential start times.
for (int start = 0; start + 1 < events.size(); ++start) {
// Skip to the next start_min value.
if (start > 0 &&
@@ -1325,8 +1404,6 @@ void GenerateCompletionTimeCutsWithEnergy(absl::string_view cut_name,
const IntegerValue sequence_start_min = events[start].x_start_min;
std::vector<CtEvent> residual_tasks(events.begin() + start, events.end());
const VariablesAssignment& assignment =
model->GetOrCreate<Trail>()->Assignment();
// We look at events that start before sequence_start_min, but are forced
// to cross this time point. In that case, we replace this event by a
@@ -1336,83 +1413,56 @@ void GenerateCompletionTimeCutsWithEnergy(absl::string_view cut_name,
for (int before = 0; before < start; ++before) {
if (events[before].x_start_min + events[before].x_size_min >
sequence_start_min) {
// Build the vector of energies as the vector of sizes.
CtEvent event = events[before]; // Copy.
event.lifted = true;
event.energy_min = ComputeEnergyMinInWindow(
event.x_start_min, event.x_start_max, event.x_end_min,
event.x_end_max, event.x_size_min, event.y_size_min,
event.decomposed_energy, sequence_start_min, event.x_end_max);
event.x_size_min =
event.x_size_min + event.x_start_min - sequence_start_min;
event.x_start_min = sequence_start_min;
if (event.energy_min > event.x_size_min * event.y_size_min) {
event.use_energy = true;
}
DCHECK_GE(event.energy_min, event.x_size_min * event.y_size_min);
CtEvent event = TrimEventAfter(sequence_start_min, events[before]);
if (event.energy_min <= 0) continue;
residual_tasks.push_back(event);
residual_tasks.push_back(std::move(event));
}
}
std::sort(residual_tasks.begin(), residual_tasks.end(),
[](const CtEvent& e1, const CtEvent& e2) {
return e1.x_lp_end < e2.x_lp_end;
});
// Best cut so far for this loop.
int best_end = -1;
double best_efficacy = 0.01;
IntegerValue best_min_contrib = 0;
IntegerValue best_capacity = 0;
bool loop_is_valid = true;
// Area of the large rectangle.
// Used in the first term of the rhs of the equation.
IntegerValue sum_event_contributions = 0;
// Used in the second term of the rhs of the equation.
IntegerValue sum_energy = 0;
// For normalization.
IntegerValue sum_square_energy = 0;
// Area of the small rectangles.
IntegerValue weighted_sum_square_duration = 0;
double lp_contrib = 0.0;
IntegerValue current_start_min(kMaxIntegerValue);
MaxBoundedSubsetSum dp(capacity_max.value());
std::vector<int64_t> possible_demands;
// We will add tasks one by one, sorted by end time, and evaluate the
// potential cut at each step.
std::sort(residual_tasks.begin(), residual_tasks.end(),
[](const CtEvent& e1, const CtEvent& e2) {
return e1.x_lp_end < e2.x_lp_end;
});
// Second loop: we add tasks one by one.
for (int i = 0; i < residual_tasks.size(); ++i) {
const CtEvent& event = residual_tasks[i];
DCHECK_GE(event.x_start_min, sequence_start_min);
const IntegerValue tmp_contrib =
CapProdI(event.x_size_min, event.energy_min);
// Make sure we do not overflow, and we do not generate bogus cuts if we
// do.
loop_is_valid = false;
// Make sure we do not overflow.
if (!AddTo(event.energy_min, &sum_energy)) break;
if (!AddTo(tmp_contrib, &weighted_sum_square_duration)) break;
if (!AddTo(SmithRuleIndividualContribution(event),
&sum_event_contributions)) {
break;
}
if (!AddSquareTo(event.energy_min, &sum_square_energy)) break;
lp_contrib += event.x_lp_end * ToDouble(event.energy_min);
current_start_min = std::min(current_start_min, event.x_start_min);
if (dp.CurrentMax() != capacity_max) {
if (event.y_size_is_fixed) {
dp.Add(event.y_size_min.value());
} else if (!event.decomposed_energy.empty()) {
possible_demands.clear();
for (const auto& [literal, size, demand] : event.decomposed_energy) {
if (assignment.LiteralIsFalse(literal)) continue;
if (assignment.LiteralIsTrue(literal)) {
possible_demands.assign({demand.value()});
break;
}
possible_demands.push_back(demand.value());
}
dp.AddChoices(possible_demands);
} else {
dp.Add(capacity_max.value());
}
}
// Maintain the reachable capacity with a bounded complexity subset sum.
AddEventDemandsToCapacitySubsetSum(event, assignment, capacity_max,
tmp_possible_demands, dp);
// This is competing with the brute force approach. Skip cases covered
// by the other code.
@@ -1420,6 +1470,7 @@ void GenerateCompletionTimeCutsWithEnergy(absl::string_view cut_name,
const IntegerValue reachable_capacity = dp.CurrentMax();
// Do we have a violated cut ?
const IntegerValue large_rectangle_contrib =
CapProdI(sum_energy, sum_energy);
if (AtMinOrMaxInt64I(large_rectangle_contrib)) break;
@@ -1427,14 +1478,12 @@ void GenerateCompletionTimeCutsWithEnergy(absl::string_view cut_name,
CeilRatio(large_rectangle_contrib, reachable_capacity);
IntegerValue min_contrib =
CapAddI(weighted_sum_square_duration, mean_large_rectangle_contrib);
CapAddI(sum_event_contributions, mean_large_rectangle_contrib);
if (AtMinOrMaxInt64I(min_contrib)) break;
// The above is the double of the area.
min_contrib = CeilRatio(min_contrib, 2);
// shift contribution by current_start_min.
if (!AddTo(CapProdI(sum_energy, current_start_min), &min_contrib)) break;
if (!AddProductTo(sum_energy, current_start_min, &min_contrib)) break;
// The efficacy of the cut is the normalized violation of the above
// equation. We will normalize by the sqrt of the sum of squared energies.
@@ -1453,13 +1502,10 @@ void GenerateCompletionTimeCutsWithEnergy(absl::string_view cut_name,
best_min_contrib = min_contrib;
best_capacity = reachable_capacity;
}
// We got there, the loop is valid.
loop_is_valid = true;
}
if (!loop_is_valid) continue;
// We have inserted all tasks. Have we found a violated cut ?
// If so, add the most violated one to the top_n cut container.
if (best_end != -1) {
LinearConstraintBuilder cut(model, best_min_contrib, kMaxIntegerValue);
bool is_lifted = false;

View File

@@ -371,16 +371,28 @@ bool SchedulingConstraintHelper::PropagatePrecedence(int a, int b) {
if (before.coeff != 1) return true;
if (after.var == kNoIntegerVariable) return true;
if (before.var == kNoIntegerVariable) return true;
if (before.var == after.var) {
if (before.constant <= after.constant) {
return true;
} else {
sat_solver_->NotifyThatModelIsUnsat();
return false;
}
}
const IntegerValue offset = before.constant - after.constant;
if (precedence_relations_->Add(before.var, after.var, offset)) {
VLOG(2) << "new relation " << TaskDebugString(a)
<< " <= " << TaskDebugString(b);
// TODO(user): Adding new constraint during propagation might not be the
// best idea as it can create some complication.
AddWeightedSumLowerOrEqual({}, {before.var, after.var},
{int64_t{1}, int64_t{-1}}, -offset.value(),
model_);
if (before.var == NegationOf(after.var)) {
AddWeightedSumLowerOrEqual({}, {before.var}, {int64_t{2}},
-offset.value(), model_);
} else {
// TODO(user): Adding new constraint during propagation might not be the
// best idea as it can create some complication.
AddWeightedSumLowerOrEqual({}, {before.var, after.var},
{int64_t{1}, int64_t{-1}}, -offset.value(),
model_);
}
if (sat_solver_->ModelIsUnsat()) return false;
}
return true;

View File

@@ -903,5 +903,110 @@ std::vector<absl::Span<int>> AtMostOneDecomposition(
return decomposer.decomposition();
}
absl::Span<const int64_t> SortedSubsetSums::Compute(
absl::Span<const int64_t> elements, int64_t maximum_sum,
bool abort_if_maximum_reached) {
sums_.clear();
sums_.push_back(0);
for (const int64_t e : elements) {
DCHECK_GE(e, 0);
if (e == 0 || e > maximum_sum) continue;
// Optimization: If all the sums in [0, maximum_sum] are already reachable
// we can abort early since no new reachable sum wil be discovered.
if (sums_.size() == maximum_sum + 1) return sums_;
// Early abort when asked if we already reached maximum_sum.
if (abort_if_maximum_reached && sums_.back() == maximum_sum) return sums_;
// We merge sort sums_ and sums_ + e into new_sums_.
int i = 0;
int j = 0;
new_sums_.clear();
const int size = sums_.size();
const int64_t* const data = sums_.data();
int64_t last_pushed = -1;
while (i < size) {
DCHECK_LE(j, i); // Since we add a positive e.
const int64_t a = data[i];
const int64_t b = data[j] + e;
int64_t to_push;
if (a <= b) {
++i;
if (a == b) ++j;
to_push = a;
} else {
to_push = b;
++j;
}
if (to_push == last_pushed) continue;
if (to_push > maximum_sum) {
j = size; // so we don't keep pushing below.
break;
}
last_pushed = to_push;
new_sums_.push_back(to_push);
}
// We are sure of this since we will break only when to_push > maximum_sum
// and we are guarantee to have pushed all sums below "to_push" before, that
// includes all the initial sums in sums_.
DCHECK_EQ(i, size);
for (; j < size; ++j) {
const int64_t to_push = data[j] + e;
if (to_push == last_pushed) continue;
if (to_push > maximum_sum) break;
last_pushed = to_push;
new_sums_.push_back(to_push);
}
std::swap(sums_, new_sums_);
}
return sums_;
}
double MaxBoundedSubsetSumExact::ComplexityEstimate(int num_elements,
int64_t bin_size) {
double estimate = std::numeric_limits<double>::infinity();
if (num_elements < 100) {
estimate = 2 * pow(2.0, num_elements / 2);
}
return std::min(estimate, static_cast<double>(bin_size) *
static_cast<double>(num_elements));
}
int64_t MaxBoundedSubsetSumExact::MaxSubsetSum(
absl::Span<const int64_t> elements, int64_t bin_size) {
// Get rid of a few easy to decide corner cases.
if (elements.empty()) return 0;
if (elements.size() == 1) {
if (elements[0] > bin_size) return 0;
return elements[0];
}
// We split the elements in two equal-sized parts.
const int middle = elements.size() / 2;
const auto span_a = sums_a_.Compute(elements.subspan(0, middle), bin_size,
/*abort_if_maximum_reached=*/true);
if (span_a.back() == bin_size) return bin_size;
const auto span_b = sums_b_.Compute(elements.subspan(middle), bin_size,
/*abort_if_maximum_reached=*/true);
if (span_b.back() == bin_size) return bin_size;
// For all possible sum a, we compute the largest sum b that fits.
// We do that in linear time thanks to the sorted partial sums.
int64_t result = 0;
CHECK(!span_a.empty());
CHECK(!span_b.empty());
int j = span_b.size() - 1;
for (int i = 0; i < span_a.size(); ++i) {
while (j >= 0 && span_a[i] + span_b[j] > bin_size) --j;
result = std::max(result, span_a[i] + span_b[j]);
if (result == bin_size) return bin_size;
}
return result;
}
} // namespace sat
} // namespace operations_research

View File

@@ -502,6 +502,54 @@ class FirstFewValues {
std::unique_ptr<int64_t[]> new_reachable_;
};
// Yet another variant of FirstFewValues or MaxBoundedSubsetSum.
class SortedSubsetSums {
public:
// Computes all the possible subset sums in [0, maximum_sum].
// Returns them sorted. All elements must be non-negative.
//
// If abort_if_maximum_reached is true, we might not return all possible
// subset sums as we stop the exploration as soon as a subset sum is equal to
// maximum_sum. When this happen, we guarantee that the last element returned
// will be maximum_sum though.
//
// Worst case complexity is in O(2^num_elements) if maximum_sum is large or
// O(maximum_sum * num_elements) if that is lower.
//
// TODO(user): We could optimize even further the case of a small maximum_sum.
absl::Span<const int64_t> Compute(absl::Span<const int64_t> elements,
int64_t maximum_sum,
bool abort_if_maximum_reached = false);
// Returns the possible subset sums sorted.
absl::Span<const int64_t> SortedSums() const { return sums_; }
private:
std::vector<int64_t> sums_;
std::vector<int64_t> new_sums_;
};
// Similar to MaxBoundedSubsetSum() above but use a different algo.
class MaxBoundedSubsetSumExact {
public:
// If we pack the given elements into a bin of size 'bin_size', returns
// largest possible sum that can be reached.
//
// This implementation allow to solve this in O(2^(num_elements/2)) allowing
// to go easily to 30 or 40 elements. If bin_size is small, complexity is more
// like O(num_element * bin_size).
int64_t MaxSubsetSum(absl::Span<const int64_t> elements, int64_t bin_size);
// Returns an estimate of how many elementary operations
// MaxSubsetSum() is going to take.
double ComplexityEstimate(int num_elements, int64_t bin_size);
private:
// We use a class just to reuse the memory and not allocate it on each query.
SortedSubsetSums sums_a_;
SortedSubsetSums sums_b_;
};
// Use Dynamic programming to solve a single knapsack. This is used by the
// presolver to simplify variables appearing in a single linear constraint.
//

View File

@@ -36,6 +36,7 @@
#include "ortools/base/gmock.h"
#include "ortools/base/logging.h"
#include "ortools/base/mathutil.h"
#include "ortools/base/stl_util.h"
#include "ortools/sat/cp_model.pb.h"
#include "ortools/sat/cp_model_solver.h"
#include "ortools/sat/cp_model_utils.h"
@@ -972,6 +973,82 @@ TEST(WeightedPickTest, SimpleTest) {
}
}
TEST(SortedSubsetSums, RandomTest) {
const int maximum = 100;
const int num_elements = 20;
absl::BitGen random;
std::vector<int64_t> elements(num_elements);
for (int i = 0; i < num_elements; ++i) {
elements[i] = absl::Uniform(random, 0, maximum);
}
std::vector<int64_t> brute_force_result;
for (int mask = 0; mask < (1 << num_elements); ++mask) {
int64_t sum = 0;
for (int i = 0; i < num_elements; ++i) {
if ((mask >> i) & 1) sum += elements[i];
}
if (sum <= maximum) {
brute_force_result.push_back(sum);
}
}
gtl::STLSortAndRemoveDuplicates(&brute_force_result);
SortedSubsetSums tested;
tested.Compute(elements, maximum);
EXPECT_EQ(tested.SortedSums(), absl::MakeSpan(brute_force_result));
}
TEST(SortedSubsetSums, CornerCase) {
SortedSubsetSums helper;
EXPECT_THAT(helper.Compute({0, 5}, 4), ElementsAre(0));
}
TEST(MaxBoundedSubsetSumExactTest, RandomTest) {
const int bin_size = 100;
const int num_elements = 20;
absl::BitGen random;
std::vector<int64_t> elements(num_elements);
const int average_size = 2 * bin_size / num_elements;
int64_t sum_of_all = 0;
for (int i = 0; i < num_elements; ++i) {
elements[i] = absl::Uniform(random, average_size - 3, average_size + 3);
sum_of_all += elements[i];
}
// Lets compute the maximum by brute force.
int64_t brute_force_result = 0;
for (int mask = 0; mask < (1 << num_elements); ++mask) {
int64_t sum = 0;
for (int i = 0; i < num_elements; ++i) {
if ((mask >> i) & 1) sum += elements[i];
}
if (sum > bin_size) continue;
brute_force_result = std::max(brute_force_result, sum);
}
MaxBoundedSubsetSumExact helper;
EXPECT_EQ(brute_force_result, helper.MaxSubsetSum(elements, bin_size));
}
TEST(MaxBoundedSubsetSumExactTest, UsedToFail) {
const int64_t capacity = 109;
const std::vector<int64_t> elements = {
9, 2, 1, 9, 7, 10, 3, 4, 5, 5, 5, 8, 1, 3, 1, 7, 9, 5, 2, 7, 9, 6, 1,
5, 2, 2, 2, 3, 1, 2, 3, 5, 9, 8, 4, 1, 5, 3, 5, 4, 6, 7, 9, 1, 1, 3};
int64_t sum_of_elements = 0;
for (const int64_t e : elements) sum_of_elements += e;
MaxBoundedSubsetSumExact helper;
EXPECT_EQ(helper.MaxSubsetSum(elements, capacity), capacity);
}
TEST(MaxBoundedSubsetSumExactTest, CornerCase) {
MaxBoundedSubsetSumExact helper;
EXPECT_EQ(helper.MaxSubsetSum({0, 5, 6}, 4), 0);
}
} // namespace
} // namespace sat
} // namespace operations_research